Windows 10 x64 @ ASUS X450J + Visual Studio 2017 + Emgu 3.2.0.2721 + Kinect 1.8 SDK
1. MainWindow.xaml.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows;
using System.Windows.Controls;
using System.Windows.Data;
using System.Windows.Documents;
using System.Windows.Input;
using System.Windows.Media;
using System.Windows.Media.Imaging;
using System.Windows.Navigation;
using System.Windows.Shapes;
namespace wk1302
{
// 1. nuget emgu 3.4.3
// 2. Add using ref FaceMouse
using Emgu.CV;
using Emgu.CV.Structure;
using Emgu.Util;
using System.Threading;
using System.Runtime.InteropServices;
using System.Windows.Interop;
//-- Kinect cam
using Microsoft.Kinect;
/// <summary>
/// MainWindow.xaml 的互動邏輯
/// </summary>
public partial class MainWindow : Window
{
private VideoCapture _capture;
// https://stackoverflow.com/questions/46410342/c-sharp-emgu-could-not-be-found-capture-and-haarcascade
//private HaarCascade _face;
private CascadeClassifier _face;
private KinectSensor sensor;
/// <summary>
/// Bitmap that will hold color information
/// </summary>
private WriteableBitmap colorBitmap;
/// <summary>
/// Intermediate storage for the color data received from the camera
/// </summary>
private byte[] colorPixels;
public MainWindow()
{
InitializeComponent();
_face = new CascadeClassifier("haarcascade_frontalface_alt2.xml");
sensor = KinectSensor.KinectSensors[0];
this.sensor.ColorStream.Enable(ColorImageFormat.RgbResolution640x480Fps30);
// Allocate space to put the pixels we'll receive
this.colorPixels = new byte[this.sensor.ColorStream.FramePixelDataLength];
// This is the bitmap we'll display on-screen
this.colorBitmap = new WriteableBitmap(this.sensor.ColorStream.FrameWidth, this.sensor.ColorStream.FrameHeight, 96.0, 96.0, PixelFormats.Bgr32, null);
// Set the image we display to point to the bitmap where we'll put the image data
this.image1.Source = this.colorBitmap;
// Add an event handler to be called whenever there is new color frame data
this.sensor.ColorFrameReady += this.SensorColorFrameReady;
// Start the sensor!
this.sensor.Start();
// https://stackoverflow.com/questions/1111615/getting-inactivity-idle-time-in-a-wpf-application
ComponentDispatcher.ThreadIdle += ComponentDispatcher_ThreadIdle;
}
private void SensorColorFrameReady(object sender, ColorImageFrameReadyEventArgs e)
{
using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
{
if (colorFrame != null)
{
// Copy the pixel data from the image to a temporary array
colorFrame.CopyPixelDataTo(this.colorPixels);
// Write the pixel data into our bitmap
this.colorBitmap.WritePixels(
new Int32Rect(0, 0, this.colorBitmap.PixelWidth, this.colorBitmap.PixelHeight),
this.colorPixels,
this.colorBitmap.PixelWidth * sizeof(int),
0);
}
}
}
int count = 0;
private void ComponentDispatcher_ThreadIdle(object sender, EventArgs e)
{
this.Title = (count++).ToString();
using (var imageFrame = (BitmapSourceConvert.ToMat(colorBitmap)).ToImage<Bgr, byte>())
{
if (imageFrame != null)
{
var grayframe = imageFrame.Convert<Gray, byte>();
var vfaces = _face.DetectMultiScale(grayframe, 1.1, 10, System.Drawing.Size.Empty); //the actual face detection happens here
if (vfaces.Length > 0)
{
System.Drawing.Rectangle Maxface = vfaces[0];
int maxw = vfaces[0].Width;
int maxh = vfaces[0].Height;
for (int i = 1; i < vfaces.Length; i++)
{
if (vfaces[i].Width * vfaces[i].Height > maxw * maxh)
{
Maxface = vfaces[i];
maxw = vfaces[i].Width;
maxh = vfaces[i].Height;
}
}
imageFrame.Draw(Maxface, new Bgr(System.Drawing.Color.BurlyWood), 3); //the detected face(s) is highlighted here using a box that is drawn around it/them
//---
System.Drawing.Point biggestFaceCenter = new System.Drawing.Point(Maxface.X + Maxface.Width / 2, Maxface.Y + Maxface.Height / 2);
//Point imageAreaCenter = new Point(imageArea.X + imageArea.Width / 2, imageArea.Y + imageArea.Height / 2);
//draw a green cross at the center of the biggest face
imageFrame.Draw(
new Cross2DF(biggestFaceCenter, Maxface.Width * 0.1f, Maxface.Height * 0.1f),
new Bgr(0, 255, 0), 1);
}
}
image1.Source = BitmapSourceConvert.ToBitmapSource(imageFrame);
}
}
// Add ref: SYstem.Drawing.DLL
public static class BitmapSourceConvert
{
[DllImport("gdi32")]
private static extern int DeleteObject(IntPtr o);
public static BitmapSource ToBitmapSource(IImage image)
{
using (System.Drawing.Bitmap source = image.Bitmap)
{
IntPtr ptr = source.GetHbitmap();
BitmapSource bs = System.Windows.Interop.Imaging.CreateBitmapSourceFromHBitmap(
ptr,
IntPtr.Zero,
Int32Rect.Empty,
System.Windows.Media.Imaging.BitmapSizeOptions.FromEmptyOptions());
DeleteObject(ptr);
return bs;
}
}
//ref: https://stackoverflow.com/questions/16596915/emgu-with-c-sharp-wpf/16597958
public static Mat ToMat(BitmapSource source)
{
if (source.Format == PixelFormats.Bgr32) // .Bgra32)
{
Mat result = new Mat();
result.Create(source.PixelHeight, source.PixelWidth, Emgu.CV.CvEnum.DepthType.Cv8U, 4);
source.CopyPixels(Int32Rect.Empty, result.DataPointer, result.Step * result.Rows, result.Step);
return result;
}
else if (source.Format == PixelFormats.Bgr24)
{
Mat result = new Mat();
result.Create(source.PixelHeight, source.PixelWidth, Emgu.CV.CvEnum.DepthType.Cv8U, 3);
source.CopyPixels(Int32Rect.Empty, result.DataPointer, result.Step * result.Rows, result.Step);
return result;
}
else
{
throw new Exception(String.Format("Convertion from BitmapSource of format {0} is not supported.", source.Format));
}
}
}
}
}
2. Result:
沒有留言:
張貼留言