2012-09-06 1 views
0

kinect skd 1.5 및 WPF 응용 프로그램을 사용하여 마우스 이벤트를 시뮬레이션하고 있습니다. 그렇게하려면 kinect 센서를 사용하고 있습니다. 깊이 뼈대를 사용하여 손을 잡고 추적합니다. 나는 그 손을 올바르게 추적했다. 그러나 제스쳐 인식을하려면 그레이 스케일 이미지를 사용해야합니다. depthFrame을 사용하여 손을 얻을 수 있습니다. 그러나 byte [] 배열을 반환합니다 ..이 바이트 배열을 grayScale 이미지로 변환 할 수있는 방법이 있습니까? 제스처 인식을하려면 openCV 래퍼에 EmguCV를 사용하고 있습니다. 이것은 내 소스 코드입니다. 그러나 내가 변환 할 때, 내 비트 맵 opject-> 정적 멤버가 "Expression.The 포인터가 유효하지 않습니다 dereference 수 없습니다"오류가 발생했습니다 .. 어떻게 내가 corectly이 과정을 할 수 있습니다. PLZ 도와주세요 ..WPF 응용 프로그램에서 kinect 1.5 sdk를 사용하여 byte [] 배열을 비트 맵으로 변환

using System; 
using System.Collections.Generic; 
using System.Linq; 
using System.Text; 
using System.Windows; 
using System.Windows.Controls; 
using System.Windows.Data; 
using System.Windows.Documents; 
using System.Windows.Input; 
using System.Windows.Media; 
using System.Windows.Media.Imaging; 
using System.Windows.Navigation; 
using System.Windows.Shapes; 
using Microsoft.Kinect; 
using System.Windows.Forms; 
using Emgu.CV.Structure; 
using Emgu.CV; 
using System.IO; 
using System.Drawing; 
using System.ComponentModel; 
using System.Drawing.Imaging; 
using System.Runtime.InteropServices; 

namespace SkelitonApp 
{ 
/// <summary> 
/// Interaction logic for MainWindow.xaml 
/// </summary> 
public partial class MainWindow : Window 
{ 

    byte[] pixeData; 
    private WriteableBitmap colorBitmap; 
    KinectSensor kinectsensor = KinectSensor.KinectSensors.FirstOrDefault(s => s.Status == KinectStatus.Connected); 
    public MainWindow() 
    { 

     InitializeComponent(); 

    } 

    const int skeletonCount = 6; 
    Skeleton[] allSkeletons = new Skeleton[skeletonCount]; 

    private void Window_Loaded(object sender, RoutedEventArgs e) 
    { 

     kinectsensor.Start(); 
     kinectsensor.AllFramesReady+=new EventHandler<AllFramesReadyEventArgs>(kinectsensor_AllFramesReady); 
     kinectsensor.ColorStream.Enable(); 
     kinectsensor.DepthStream.Enable(); 
     kinectsensor.SkeletonStream.Enable(); 
    } 

    void kinectsensor_AllFramesReady(object sender, AllFramesReadyEventArgs e) 
    { 

     Skeleton first = GetFirstSkeleton(e); 
     if (first == null) 
     { 
      return; 
     } 
     GetCameraPoint(first, e); 
     //set scaled position 
     /*ScalePosition(headImage, first.Joints[JointType.Head]); 
     ScalePosition(leftEllipse1, first.Joints[JointType.HandLeft]); 
     ScalePosition(rightEllipse2, first.Joints[JointType.HandRight]);*/ 


     using (DepthImageFrame handDepthFrame = e.OpenDepthImageFrame()) 
     { 
      byte[] handBytes = null; 
      SkeletonFrame newskeletonFrame; 
      if (handDepthFrame == null) 
      { 
       return; 
      } 
      using (newskeletonFrame = e.OpenSkeletonFrame()) 
      { 
       if (newskeletonFrame == null) 
       { 
        return; 
       } 


      } 
      handBytes = GenerateColoredBytes(handDepthFrame, newskeletonFrame, first); 
      int stride = handDepthFrame.Width * 4; 
      image2.Source = 
       BitmapSource.Create(handDepthFrame.Width, handDepthFrame.Height, 96, 96, PixelFormats.Bgr32, null, handBytes, stride); 

      //Hear is the place that i have the error(nnn bitmap variable) 
      Bitmap nnn = BitmapSourceToBitmap2(BitmapSource.Create(handDepthFrame.Width, handDepthFrame.Height, 96, 96, PixelFormats.Bgr32, null, handBytes, stride)); 



      Console.WriteLine("aa"); 
     } 
    } 
    public static System.Drawing.Bitmap BitmapSourceToBitmap2(BitmapSource srs) 
    { 
     System.Drawing.Bitmap btm = null; 
     int width = srs.PixelWidth; 
     int height = srs.PixelHeight; 
     int stride = width * ((srs.Format.BitsPerPixel + 7)/8); 
     IntPtr ptr = Marshal.AllocHGlobal(height * stride); 
     srs.CopyPixels(new Int32Rect(0, 0, width, height), ptr, height * stride, stride); 
     btm = new System.Drawing.Bitmap(width, height, stride, System.Drawing.Imaging.PixelFormat.Format1bppIndexed, ptr); 
     return btm; 
    } 

    private byte[] GenerateColoredBytes(DepthImageFrame handDepthFrame, SkeletonFrame newskeletonFrame, Skeleton first) 
    { 


     short[] rawDepthdata=new short[handDepthFrame.PixelDataLength]; 
     handDepthFrame.CopyPixelDataTo(rawDepthdata); 
     Byte[] pixels=new byte[handDepthFrame.Height*handDepthFrame.Width*4]; 

     DepthImagePoint rightHandPoint = handDepthFrame.MapFromSkeletonPoint(first.Joints[JointType.HandRight].Position); 

     int DistanceToHand = rightHandPoint.Depth; 
     const int BlueIndex = 0; 
     const int GreenIndex =1; 
     const int RedIndex = 2; 

     int handDistanceMax = DistanceToHand + 10; 
     int handDistancemin = DistanceToHand - 60; 
     //int handAreaDiff = handDistanceMax - handDistancemin; 

     for (int depthIndex = 0, colorIndex = 0; depthIndex < rawDepthdata.Length && colorIndex < pixeData.Length; depthIndex++, colorIndex += 4) 
     { 
      int player = rawDepthdata[depthIndex] & DepthImageFrame.PlayerIndexBitmask; 
      int depth = rawDepthdata[depthIndex] >> DepthImageFrame.PlayerIndexBitmaskWidth; 

      /*if (depth < 900) 
      { 
       pixels[colorIndex + BlueIndex] = 255; 
       pixels[colorIndex + GreenIndex] = 0; 
       pixels[colorIndex + RedIndex] = 0; 

      }*/ 

      if (depth <handDistanceMax && depth>handDistancemin) 
      { 
       pixels[colorIndex + BlueIndex] = 255; 
       pixels[colorIndex + GreenIndex] = 0; 
       pixels[colorIndex + RedIndex] = 0; 

      } 

     } 


     return pixels; 
    } 




    private void ScalePosition(FrameworkElement element, Joint joint) 
    { 
     /*Joint scaledJoint = joint.ScaleTo(1280, 720); 

     Canvas.SetLeft(element, scaledJoint.Position.X); 
     Canvas.SetTop(element, scaledJoint.Position.Y); */ 
    } 

    private void GetCameraPoint(Skeleton first, AllFramesReadyEventArgs e) 
    { 
     using (DepthImageFrame depth = e.OpenDepthImageFrame()) 
     { 
      if (depth == null) 
      { 
       return; 
      } 

      //map a joint location to a point on the depth map 
      DepthImagePoint headDepthPoint = 
       depth.MapFromSkeletonPoint(first.Joints[JointType.Head].Position); 
      DepthImagePoint leftDepthPoint = 
       depth.MapFromSkeletonPoint(first.Joints[JointType.HandLeft].Position); 
      DepthImagePoint rightDepthPoint = 
       depth.MapFromSkeletonPoint(first.Joints[JointType.HandRight].Position); 

      //map a depth point to a point in the color image 
      ColorImagePoint headColorPoint = 
       depth.MapToColorImagePoint(headDepthPoint.X,headDepthPoint.Y, 
       ColorImageFormat.RgbResolution640x480Fps30); 

      ColorImagePoint leftColorPoint = 
       depth.MapToColorImagePoint(leftDepthPoint.X, leftDepthPoint.Y, 
       ColorImageFormat.RgbResolution640x480Fps30); 

      ColorImagePoint rightColorPoint = 
       depth.MapToColorImagePoint(rightDepthPoint.X, rightDepthPoint.Y, 
       ColorImageFormat.RgbResolution640x480Fps30); 

      //set location 

      //System.Windows.Forms.Cursor.Position = new System.Drawing.Point(rightColorPoint.X,rightColorPoint.Y); 

      double screenWidth = Screen.PrimaryScreen.WorkingArea.Width; 
      double screenHeight = Screen.PrimaryScreen.WorkingArea.Height; 
      double windowWidth = Convert.ToInt32(image1.Width); 
      double windowHeight = Convert.ToInt32(image1.Height); 
      double x1 = rightColorPoint.X; 
      double y1 = rightColorPoint.Y; 
      double posX = (x1*100/ windowWidth); 
      posX = posX/100 * screenWidth; 

      double posY = (y1 * 100/windowHeight); 
      posY = posY/100 * screenHeight; 

      // System.Windows.Forms.Cursor.Position = new System.Drawing.Point((int)posX, (int)posY); 

      /* 
      CameraPosition(headImage,headColorPoint); 
      CameraPosition(leftEllipse1, leftColorPoint); 
      CameraPosition(rightEllipse2, rightColorPoint); 
      */ 


     } 
    } 

    private void CameraPosition(FrameworkElement element, ColorImagePoint point) 
    { 

     Canvas.SetLeft(element,point.X-element.Width/2); 
     Canvas.SetTop(element, point.Y - element.Height/2); 

    } 

    private Skeleton GetFirstSkeleton(AllFramesReadyEventArgs e) 
    { 
     //////////////////////// 
     bool receiveData = false; 
     using (ColorImageFrame colorImageFrame = e.OpenColorImageFrame()) 
     { 
      if (colorImageFrame != null) 
      { 
       if (pixeData == null) 
       { 
        pixeData = new byte[colorImageFrame.PixelDataLength]; 
       } 
       colorImageFrame.CopyPixelDataTo(pixeData); 
       receiveData = true; 



       this.colorBitmap = new WriteableBitmap(this.kinectsensor.ColorStream.FrameWidth, this.kinectsensor.ColorStream.FrameHeight, 96.0, 96.0, PixelFormats.Bgr32, null); 


      } 
      else 
      { 
       // apps processing of image data is taking too long, it got more than 2 frames behind. 
       // the data is no longer avabilable. 
      } 

     } 
     if (receiveData) 
     { 
      this.colorBitmap.WritePixels(
        new Int32Rect(0, 0, this.colorBitmap.PixelWidth, this.colorBitmap.PixelHeight), 
        this.pixeData, 
        this.colorBitmap.PixelWidth * sizeof(int), 
        0); 
      image1.Source = this.colorBitmap; 
     } 

     /////////////////////////// 



     using(SkeletonFrame skeletonFrameData=e.OpenSkeletonFrame()) 
     { 
      if (skeletonFrameData == null) 
      { 
       return null; 
      } 

      skeletonFrameData.CopySkeletonDataTo(allSkeletons); 

      //get the first tracked skeleton 
      Skeleton first=(from s in allSkeletons 
           where s.TrackingState==SkeletonTrackingState.Tracked 
           select s).FirstOrDefault(); 

      return first; 
     } 

    } 


} 

}

답변

1

질문에 응답 : 바이트 배열은 비트 맵 변환하는 방법?

public static Bitmap ToBitmap(this byte[] pixels, int width, int height, PixelFormat format) 
    { 
     if (pixels == null) 
      return null; 

     var bitmap = new Bitmap(width, height, format); 

     var data = bitmap.LockBits(
      new System.Drawing.Rectangle(0, 0, bitmap.Width, bitmap.Height), 
      ImageLockMode.ReadWrite, 
      bitmap.PixelFormat); 

     Marshal.Copy(pixels, 0, data.Scan0, pixels.Length); 

     bitmap.UnlockBits(data); 

     return bitmap; 
    } 

문제가 해결되는지 잘 모르겠습니다. 어떤 방법이나 코드 줄에 문제가 있는지 설명하는 질문을 업데이트하십시오.