using System; using System.Collections.Generic; using System.ComponentModel; using System.Data; using System.Drawing; using System.Linq; using System.Text; using System.Windows.Forms; using Emgu.CV; using Emgu.CV.Structure; using Emgu.Util; using Emgu.CV.GPU; using ZedGraph; //Add this its important namespace cs1 { public partial class Form1 : Form { //declaring global variables private Capture capture; //takes images from camera as image frames private bool isCapturing = true; private Image grayFrame; private Image ImageFrame; private Image FrozenFrame; private Image LUVframe; private Image LABframe; private Image HSVframe; private Image HLSframe; private Image YCCframe; private Image XYZframe; public Image Overlay; public Form2 form2; //closing form2 freezes form1 :( Timer fpsTimer = new Timer(); //this is a windows.timer. int FPSframecount; // Create and initialize histogram // DenseHistogram hist = new DenseHistogram(256, new RangeF(0.0f, 255.0f)); private int i, j, k, l; //wow never had to use 4 nested loops before. this is gonna be a hit on performance :( private int[] scorearray; private int[] sortedscorearray; private int topscorecount; private int[] topscores; private int[] topscoreindexes; private int[] dividerindexes; int rows, cols; public Form1() { form2 = new Form2(); form2.Show(); InitializeComponent(); //create a FPS counter fpsTimer.Interval = 1000; fpsTimer.Tick += new EventHandler(timer_Tick); //draw number on imageBox fpsTimer.Start(); // http://stackoverflow.com/questions/9387267/transparent-background-label-over-picturebox labelFPS.BackColor = Color.Transparent; var pos = this.PointToScreen(labelFPS.Location); pos = CamImageBox.PointToClient(pos); labelFPS.Parent = CamImageBox; labelFPS.Location = pos; //capture keyboard events this.KeyDown += OnKeyDown; foreach (Control control in this.Controls) { control.KeyDown += OnKeyDown; } //draw if Cuda found if (GpuInvoke.HasCuda) { GpuDeviceInfo gpuinfo = new GpuDeviceInfo(); rtOutput.AppendText(" CUDA " + gpuinfo.CudaComputeCapability.Major.ToString() + "." + gpuinfo.CudaComputeCapability.Minor.ToString() + " found.\n"); //rtOutput.AppendText(); } else { rtOutput.AppendText(" CUDA Not found.\n"); } } //end instantiator private void OnKeyDown(object sender, KeyEventArgs e) { if (e.KeyValue == (int)Keys.Space) //go one frame at a time when space is hit { Application.Idle -= CaptureStream; FrozenFrame = capture.QueryFrame(); } } void timer_Tick(object sender, EventArgs e) { labelFPS.Text = FPSframecount.ToString(); FPSframecount = 0; } private void CaptureStream(object sender, EventArgs arg) { try { FrozenFrame = capture.QueryFrame(); FrozenFrame = FrozenFrame.Resize(CamImageBox.Width, CamImageBox.Height, Emgu.CV.CvEnum.INTER.CV_INTER_AREA); // CamImageBox.SizeMode = PictureBoxSizeMode.StretchImage; //http://stackoverflow.com/questions/11419873/c-emgu-webcam-choose-capture-size //can also be done in form designer. click on the tiny > }catch{}; } private void ProcessFrame(object sender, EventArgs arg) { try //catch all exceptions { ImageFrame = FrozenFrame; //GPUframe = FrozenFrame; //GpuHOGDescriptor descriptor = new GpuHOGDescriptor(); //wth is a HOG //descriptor.SetSVMDetector(GpuHOGDescriptor.GetDefaultPeopleDetector()); //http://www.emgu.com/wiki/index.php/Pedestrian_Detection_in_CSharp // Rectangle[] regions = descriptor.DetectMultiScale(GPUframe.Convert()); if (cbBlur.Checked) { ImageFrame = ImageFrame.SmoothBlur((int)BlurUpDown.Value, (int)BlurUpDown.Value); } #region color schemas //select your color schema from the dropdown if (checkBox1.Checked) //Only pass in split colors into the different schemas just to see what it looks like { ImageFrame = hidechannels(ImageFrame); } if (cbColorSchema.Text == "Bgr") { ImageFrame = hidechannels(ImageFrame); ImageFrame = IsolateColors(ImageFrame); } else if (cbColorSchema.Text == "Luv") { LUVframe = ImageFrame.Convert(); LUVframe = IsolateColors(LUVframe); LUVframe = hidechannels(LUVframe); ImageFrame = LUVframe.Convert(); } else if (cbColorSchema.Text == "Hsv") { HSVframe = ImageFrame.Convert(); HSVframe = hidechannels(HSVframe); HSVframe = IsolateColors(HSVframe); ImageFrame = HSVframe.Convert(); } else if (cbColorSchema.Text == "Lab") { LABframe = ImageFrame.Convert(); LABframe = hidechannels(LABframe); LABframe = IsolateColors(LABframe); ImageFrame = LABframe.Convert(); } else if (cbColorSchema.Text == "Hls") { HLSframe = ImageFrame.Convert(); HLSframe = hidechannels(HLSframe); HLSframe = IsolateColors(HLSframe); ImageFrame = HLSframe.Convert(); } else if (cbColorSchema.Text == "Xyz") { XYZframe = ImageFrame.Convert(); XYZframe = hidechannels(XYZframe); XYZframe = IsolateColors(XYZframe); ImageFrame = XYZframe.Convert(); } else if (cbColorSchema.Text == "Ycc") { YCCframe = ImageFrame.Convert(); YCCframe = hidechannels(YCCframe); YCCframe = IsolateColors(YCCframe); ImageFrame = YCCframe.Convert(); } if (cbSkinDetect.Checked)// i just, i just, i just cannot get this to work { AdaptiveSkinDetector myskin = new AdaptiveSkinDetector((int)udSamplingDivider.Value, AdaptiveSkinDetector.MorphingMethod.NONE); Image hueMask = new Image(ImageFrame.Size); myskin.Process(ImageFrame, hueMask); // CamImageBox.Image = hueMask; // return; //AdaptiveSkinDetector.MorphingMethod.NONE IntPtr ptr = new IntPtr(); //ptr = ImageFrame; ptr = ImageFrame.Ptr; //ptr = ImageFrame.Ptr.ToPointer(); //ptr = ImageFrame->Ptr.ToPointer(); //CvInvoke.cvCopy(ptr, ImageFrame->Ptr.ToPointer(),null); //CvInvoke.cv Image skinImage = ImageFrame; IntPtr ptr1 = CvInvoke.cvCreateImage(ImageFrame.Size, Emgu.CV.CvEnum.IPL_DEPTH.IPL_DEPTH_8U, 1); //CvInvoke.cvInRangeS(ImageFrame.Ptr, new MCvScalar(0, 10, 60), new MCvScalar(20, 150, 255), ImageFrame.Ptr); grayFrame = ImageFrame.InRange(new Bgr(0,10,60),new Bgr(20,150,255)); ImageFrame = grayFrame.Convert(); //ImageFrame = new Image(ptr1); // CvInvoke.cvCopy(ptr1, ImageFrame.Ptr,new IntPtr()); //opencv exception saying dst channels do not match? } if (clbColors.CheckedItems.Count > 0) { // ImageFrame = threshColors(ImageFrame); int range = (int)udColorRange.Value; Color acolor = Color.FromName(clbColors.CheckedItems[0].ToString()); grayFrame = ImageFrame.InRange(new Bgr(acolor.B - range, acolor.G - range, acolor.R - range), new Bgr(acolor.B + range, acolor.G + range, acolor.R + range)); CamImageBox.Image = grayFrame; CamImageBox.Image = Overlay; return; } #endregion #region checkbox processing if (cbBlur.Checked) { ImageFrame = ImageFrame.SmoothBlur((int)BlurUpDown.Value, (int)BlurUpDown.Value); } if (cbgray.Checked) { grayFrame = ImageFrame.Convert(); ImageFrame = grayFrame.Convert(); } if (cbSobel.Checked) { ImageFrame = ImageFrame.Sobel(1, 1, (int)SobelUpDown.Value).Convert(); // GpuImage gpuBgra = new GpuImage(GPUframe); // GpuInvoke.Sobel(gpuBgra, gpuBgra, 1, 1, aBuffer, 3, 0, Emgu.CV.CvEnum.BORDER_TYPE.BORDER_DEFAULT, Emgu.CV.CvEnum.BORDER_TYPE.BORDER_DEFAULT, IntPtr.Zero); // } if (cbThresh.Checked) { grayFrame = ImageFrame.Convert(); if (cbThreshAdaptive.Checked) { grayFrame = grayFrame.ThresholdAdaptive(new Gray(255), Emgu.CV.CvEnum.ADAPTIVE_THRESHOLD_TYPE.CV_ADAPTIVE_THRESH_GAUSSIAN_C, Emgu.CV.CvEnum.THRESH.CV_THRESH_BINARY,(int) udBlockSize.Value, new Gray(0)); } else { grayFrame = grayFrame.ThresholdBinary(new Gray((double)ThreshUpDown.Value), new Gray(255)); } ImageFrame = grayFrame.Convert(); } if (cbCanny.Checked) { ImageFrame = ImageFrame.Canny((double)udCannyThreshold.Value, (double)udCannyThresholdLinkage.Value).Convert(); } if (cbFindContours.Checked) { Random rnd = new Random(1); //attempting to reduce the wild variations in colors from frame to frame especially when paused using (MemStorage stor = new MemStorage()) { grayFrame = ImageFrame.Convert(); try //since some of findContours is not implemented, I don't know what will crash or not so I just ignore crashes { for (Contour contours = grayFrame.FindContours((Emgu.CV.CvEnum.CHAIN_APPROX_METHOD)udFindContoursType.Value, (Emgu.CV.CvEnum.RETR_TYPE)udFindContoursMethod.Value,stor); contours != null; contours = contours.HNext) { ImageFrame.Draw(contours, new Bgr(rnd.Next(100, 255), rnd.Next(100, 255), rnd.Next(100, 255)), (int)udLineThickness.Value); } } catch { }; } } if (cbApproxPoly.Checked) { Random rnd = new Random(1); //attempting to reduce the wild variations in colors from frame to frame using (MemStorage stor = new MemStorage()) { grayFrame = ImageFrame.Convert(); try //since some of findContours is not implemented, I don't know what will crash or not so I just ignore crashes { for (Contour contours = grayFrame.FindContours((Emgu.CV.CvEnum.CHAIN_APPROX_METHOD)udFindContoursType.Value, (Emgu.CV.CvEnum.RETR_TYPE)udFindContoursMethod.Value); contours != null; contours = contours.HNext) { Contour currentContour = contours.ApproxPoly((double)udApproxPolyAccuracy.Value,(int)udContourLevel.Value,stor); ImageFrame.Draw(currentContour, new Bgr(rnd.Next(100, 255), rnd.Next(100, 255), rnd.Next(100, 255)), (int)udLineThickness.Value); //ImageFrame.Draw(contours, new Bgr(aprime * colorcount++ % 255, aprime * colorcount++ % 255, aprime * colorcount++ % 255), 2); } } catch { }; } } if (cbPolyLine.Checked) { Random rnd = new Random(1); //attempting to reduce the wild variations in colors from frame to frame using (MemStorage stor = new MemStorage()) { grayFrame = ImageFrame.Convert(); try //since some of findContours is not implemented, I don't know what will crash or not so I just ignore crashes { for (Contour contours = grayFrame.FindContours((Emgu.CV.CvEnum.CHAIN_APPROX_METHOD)udFindContoursType.Value, (Emgu.CV.CvEnum.RETR_TYPE)udFindContoursMethod.Value); contours != null; contours = contours.HNext) { Point[] pts = contours.ToArray(); LineSegment2D[] edges = PointCollection.PolyLine(pts, true); foreach (LineSegment2D edge in edges) { ImageFrame.Draw(edge, new Bgr(rnd.Next(100, 255), rnd.Next(100, 255), rnd.Next(100, 255)), (int)udLineThickness.Value); } //ImageFrame.Draw(contours, new Bgr(aprime * colorcount++ % 255, aprime * colorcount++ % 255, aprime * colorcount++ % 255), 2); } } catch { }; } } if (cbConvexHull.Checked) { Random rnd = new Random(1); //attempting to reduce the wild variations in colors from frame to frame using (MemStorage storage = new MemStorage()) { grayFrame = ImageFrame.Convert(); for (Contour contours = grayFrame.FindContours((Emgu.CV.CvEnum.CHAIN_APPROX_METHOD)udFindContoursType.Value, (Emgu.CV.CvEnum.RETR_TYPE)udFindContoursMethod.Value); contours != null; contours = contours.HNext) { //remove hulls that are too small if (contours.Perimeter >= (double)udPerimeter.Value) //don't draw the tiny ones. could also use .total { Point[] pts1 = contours.ToArray(); //pts1.Cast(); //doesnt work //PointF[] ptsF = new PointF[pts1.Length]; //Array.ConvertAll(pts1, (p => (PointF)p)).CopyTo(ptsF, 0); //works but ugly. from http://stackoverflow.com/questions/2068120/c-sharp-cast-entire-array PointF[] ptsF = point2pointf(pts1); //i prefer this PointF[] hull = PointCollection.ConvexHull(ptsF, storage, Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE).ToArray(); ImageFrame.DrawPolyline(Array.ConvertAll(hull, Point.Round), cbClosedHull.Checked, new Bgr(rnd.Next(100, 255), rnd.Next(100, 255), rnd.Next(100, 255)), (int)udLineThickness.Value); //cannot seem to get this to work //contours.GetConvexHull(Emgu.CV.CvEnum.ORIENTATION.CV_COUNTER_CLOCKWISE); } } } } if (cbLines.Checked) { //grayFrame = ImageFrame.Convert(); //the binary one needs grey, not normal houghLines() LineSegment2D[][] lines = ImageFrame.HoughLines((double)udCannyThreshold.Value, (double)udCannyThresholdLinkage.Value, //???? (double)udResolution.Value, //sub size of pixel? (Math.PI/180) * (double)udAngle.Value, //angle increments allowed (int)udAccumThreshold.Value, //line (ie all segments in a row) must have this many pixels (double)udMinWidth.Value, //segment must be at least this long (double)udGapBetween.Value); //treat line segments as together if they are smaller than this number foreach (LineSegment2D[] channel in lines) { foreach (LineSegment2D segment in channel) { ImageFrame.Draw(segment, new Bgr(0,200,0), (int)udLineThickness.Value); } } } if (cbCircles.Checked) { double cannyThresh = (double)udCannyThreshold.Value; double accumThresh = (double)udAccumThreshold.Value; CircleF[][] circles = ImageFrame.HoughCircles(new Bgr(cannyThresh, cannyThresh, cannyThresh), new Bgr(accumThresh, accumThresh, accumThresh), (double)udResolution.Value, (double)udGapBetween.Value, (int)udMinWidth.Value, 0); foreach (CircleF[] channel in circles) { foreach (CircleF circle in channel) { ImageFrame.Draw(circle, new Bgr(0, 255, 200), (int)udLineThickness.Value); } } } if (cbEllipse.Checked) { //CvInvoke.cvFitEllipse2() //this is the preferred way but its more work since I dont know how to use an IntPtr (yet) // CvInvoke.cvFitEllipse2(ImageFrame); //lets try the pointcollection function from http://www.emgu.com/wiki/index.php/Ellipse_Fitting_in_CSharp - this works, but it puts an ellipse AROUND the points, whereas we want the ellipse ON the points. :( using (MemStorage storage = new MemStorage()) { grayFrame = ImageFrame.Convert(); for (Contour contours = grayFrame.FindContours((Emgu.CV.CvEnum.CHAIN_APPROX_METHOD)udFindContoursType.Value, (Emgu.CV.CvEnum.RETR_TYPE)udFindContoursMethod.Value); contours != null; contours = contours.HNext) { if (contours.Total > udAccumThreshold.Value) { // Contour currentContour = contours.ApproxPoly((double)udApproxPolyAccuracy.Value, (int)udContourLevel.Value, storage); Point[] pts1 = contours.ToArray(); PointF[] ptsF = point2pointf(pts1); //i prefer this // ImageFrame.Draw(currentContour, new Bgr(rnd.Next(100, 255), rnd.Next(100, 255), rnd.Next(100, 255)), (int)udLineThickness.Value); //ImageFrame.Draw(contours, new Bgr(aprime * colorcount++ % 255, aprime * colorcount++ % 255, aprime * colorcount++ % 255), 2); Ellipse myEllipse = PointCollection.EllipseLeastSquareFitting(ptsF); if (myEllipse.MCvBox2D.size.Height > (float)udMinWidth.Value && myEllipse.MCvBox2D.size.Width > (float)udMinWidth.Value) { int angle = Convert.ToInt32(myEllipse.MCvBox2D.angle); angle = (angle - (angle-45)*2) % 360; //the ellipse seems to be off, flipped around the line x=y Ellipse rotatedEllipse = new Ellipse(myEllipse.MCvBox2D.center, myEllipse.MCvBox2D.size, angle); ImageFrame.Draw(rotatedEllipse, new Bgr(155, 0, 155), (int)udLineThickness.Value); ImageFrame.Draw(myEllipse, new Bgr(55, 0, 55), (int)udLineThickness.Value); //the original, in a darker color, for comparision } } } } } #endregion if (ImageFrame != null) { doHistogram(ImageFrame); CamImageBox.Image = ImageFrame; //the big moment } FPSframecount++; } catch (Exception e) { rtOutput.AppendText(e.ToString() + "\n"); }; } #region hidechannel and other functions public int[] minmaxpeaks(int[] histarray) { ///////////////////////////////////// find max statistically //seems to work well //only possible issue is that areas that are flat and uninteresting still accumulate because at the smaller scale, //*something* has to accumulate //solution: either give larger segment sizes a higher score, // or limit the scope of analysis to 'large' segments and 'small' segments // or skip areas that are relatively flat // int arraysize = 256; int arraysize = histarray.Length; int i; //float[] integralarray = new float[arraysize]; //integralarray[0] = histarray[0]; //for (i = 1; i < arraysize; i++) //{ //create integral array -not sure if needed // integralarray[i] = integralarray[i - 1] + histarray[i]; //} int[] scorearray = new int[arraysize]; //counts which indexes get a 'hit' and accumulates - remember this is still C. It's zero based. int indexstart, indexend, segmentsize; float localmax = 0; int maxindex = 0; int segmentstepsize = arraysize / 10; //splitting up into 10 regions sounds right for (int k = segmentstepsize; k < arraysize; k = k + segmentstepsize) //test different segment sizes { segmentsize = k; for (int j = 0 - segmentsize; j < arraysize + segmentsize; j = j + segmentsize) //move our segment along - we start at such extremes to make sure every index gets evaluated the same number of times by every segment { indexstart = j; indexend = indexstart + segmentsize; localmax = 0; maxindex = 0; for (i = indexstart; i <= indexend; i++) //look at each value in the segment { if (i < 0 || i >= arraysize) { continue; //skip loop } if (histarray[i] > localmax) //is i the largest seen so far in this segment { localmax = histarray[i]; maxindex = i; } } //mark that index as 'good' (add +1 to our score array) scorearray[maxindex]++; } } //Array.Sort(scorearray); form2.Add_Histogram(scorearray, "Scores"); //zedGraph class] return scorearray; // rtOutput.AppendText(string.Join(",", scorearray) + "\n"); } private int locOfMax(float[] myarray, float max){ for(int i = 0;i <= myarray.GetUpperBound(0);i++) { if (myarray[i] == max) { //found location of max return i; } } return -1; } public Image hidechannels(Image frame) //http://www.emgu.com/forum/viewtopic.php?f=7&t=4321 where TColor : struct, IColor where TDepth : new() { if (frame == null) return frame; Image[] splitimage = frame.Split(); //errors out if (cbChannel1.Checked == false) { splitimage[0].SetZero(); } if (cbChannel2.Checked == false) { splitimage[1].SetZero(); } if (cbChannel3.Checked == false) { splitimage[2].SetZero(); } Image mergedimage = new Image(splitimage); //merge 3 arrays into an Image<> return mergedimage; } private void doHistogram(Image frame) where TColor : struct, IColor where TDepth : new() { // Histogram Computing // hist.Calculate(new Image[] { ImageFrame }, true, null); histogramBox1.ClearHistogram(); histogramBox1.GenerateHistograms(frame, 64); histogramBox1.Refresh(); // form2.doHistogram(frame); } private void ReleaseData() { if (capture != null) capture.Dispose(); } private void Form1_Load(object sender, EventArgs e) { Application.Idle += ProcessFrame; Application.Idle += CaptureStream; #region if capture is not created, create it now if (capture == null) { try { capture = new Capture(); } catch (NullReferenceException excpt) { MessageBox.Show(excpt.Message); } } ImageFrame = capture.QueryFrame(); Overlay = new Image(new Size(CamImageBox.Width,CamImageBox.Height)); #endregion } private void CamImageBox_Click(object sender, EventArgs e) { freezeUnfreeze(); } private void freezeUnfreeze() { //freeze picture on click if (isCapturing = !isCapturing) Application.Idle += CaptureStream; else Application.Idle -= CaptureStream; } private PointF[] point2pointf(Point[] pts) //cannot find a method to change array of points to pointF so I rolled my own { PointF[] ptsF = new PointF[pts.Length]; for (int i =0; i < pts.Length;i++){ ptsF[i] = pts[i]; } return ptsF; } private void cbThresh_CheckedChanged(object sender, EventArgs e) //disable checkboxes that cannot be used at the moment { cbThreshAdaptive.Enabled = !cbThreshAdaptive.Enabled; } #endregion private void checkedListBox1_SelectedIndexChanged(object sender, EventArgs e) { } private void CamImageBox_MouseMove(object sender, MouseEventArgs e) { if(CamImageBox != null && lPixelColor != null){ if (e.Y < CamImageBox.Image.Size.Height && e.Y >= 0 && e.X >= 0 && e.X < CamImageBox.Image.Size.Width) //error catching { lPixelColor.Text = e.X.ToString() + "," + e.Y.ToString() + " " + CamImageBox.Image.Bitmap.GetPixel(e.X, e.Y).ToString(); //show color at cursor //Overlay.Data[e.Y, e.X, 0] = 250; //Overlay.Data[e.Y, e.X, 1] = 250; //Overlay.Data[e.X, e.Y, 2] = 250; //Emgu.CV.Structure.MCvFont font = new MCvFont(); //ImageFrame.Draw("Drawing", ref font, new Point(5,5),new Bgr(0,0,250)); // ImageFrame.Draw(new Cross2DF(new PointF(10,100),200,300),new Bgr(100,100,250),10); // ImageFrame = ImageFrame.Add(Overlay); //wont work?? } //CamImageBox.Image = img; } } private void rtOutput_TextChanged(object sender, EventArgs e) { rtOutput.SelectionStart = rtOutput.Text.Length; rtOutput.ScrollToCaret(); } #region all the colorHiding stuff #region bgr public Image IsolateColors(Image frame) { if (!cbColorIsolate.Checked) return frame; Image[] greyImages = frame.Split(); Image[] singleImages = new Image[greyImages.Length]; for (i = 0; i < greyImages.Length; i++) { singleImages[i] = greyImages[i].Convert(); } singleImages = IsolateColors2(singleImages); for (i = 0; i < greyImages.Length; i++) { greyImages[i] = singleImages[i].Convert(); } return new Image(greyImages); //merge the 3 images } #endregion public Image IsolateColors(Image frame){ if (!cbColorIsolate.Checked) return frame; Image[] greyImages = frame.Split(); greyImages = IsolateColors2(greyImages); return new Image(greyImages); //merge the 3 images } public Image IsolateColors(Image frame) { if (!cbColorIsolate.Checked) return frame; Image[] greyImages = frame.Split(); greyImages = IsolateColors2(greyImages); //pass in the array of channels return new Image(greyImages); //merge the 3 images } public Image IsolateColors(Image frame) { if (!cbColorIsolate.Checked) return frame; Image[] greyImages = frame.Split(); greyImages = IsolateColors2(greyImages); return new Image(greyImages); //merge the 3 images } public Image IsolateColors(Image frame) { if (!cbColorIsolate.Checked) return frame; Image[] greyImages = frame.Split(); greyImages = IsolateColors2(greyImages); return new Image(greyImages); //merge the 3 images } public Image IsolateColors(Image frame) { if (!cbColorIsolate.Checked) return frame; Image[] greyImages = frame.Split(); greyImages = IsolateColors2(greyImages); return new Image(greyImages); //merge the 3 images } public Image IsolateColors(Image frame) { if (!cbColorIsolate.Checked) return frame; Image[] greyImages = frame.Split(); greyImages = IsolateColors2(greyImages); return new Image(greyImages); //merge the 3 images } //this is the part of isolating colors that can be all the same color type/size public Image[] IsolateColors2(Image[] greyImages) { int byCurrent; Single datum; int adjusteddatum; Image grayimage; if (!cbColorIsolate.Checked) return greyImages; topscorecount = (int)udBuckets.Value; // so if this were 2, its the same as binary threshing topscores = new int[topscorecount]; topscoreindexes = new int[topscorecount]; dividerindexes = new int[topscorecount + 1]; // 4 buckets mean 5 dividing lines form2.Clear_Histograms(); for (l = 0; l < greyImages.Length; l++) //loop through each channel { grayimage = greyImages[l].Convert(); // hist.Clear(); // Array.Clear(histarray, 0, 256); //create a histogram of floats for the image // hist.Calculate(new Image[] { grayimage }, true, null); //The data is here // hist.MatND.ManagedArray.CopyTo(histarray, 0); //get max value in grayimage double[] minvals; double[] maxvals; Point[] minlocs; Point[] maxlocs; grayimage.MinMax(out minvals, out maxvals, out minlocs, out maxlocs); int arraysize = ((int)maxvals[0] - (int)minvals[0]) + 1;// +1 to cover rounding up int[] histarray = new int[arraysize]; cols = grayimage.Cols; rows = grayimage.Rows; if (arraysize > topscorecount) { //make sure there were actually enough colors to isolate :) for (j = 0; j < rows; j++) { for (k = 0; k < cols; k++) { //count all values (rounded) datum = grayimage.Data[j, k, 0]; adjusteddatum = (int)datum - (int)minvals[0]; //need to adjust because sometimes the data is -negative. I don't know why some images have negative values, but arrays cannot have negative indexes so the only fix is to increase the array size by that amount and adjust everything. of course we also have to un-adjust these numbers when changing pixel values histarray[adjusteddatum]++; } } scorearray = minmaxpeaks(histarray); sortedscorearray = new int[scorearray.Length]; Array.Copy(scorearray, sortedscorearray, scorearray.Length); //had to make a copy because we have to sort the original to get the max values Array.Sort(sortedscorearray); //take the top topscorecount Array.Copy(sortedscorearray, sortedscorearray.Length - topscorecount, topscores, 0, topscorecount); //find the indexes of those top scores from the histogram for (k = 0; k < topscores.Length; k++) { for (j = 0; j < scorearray.Length; j++) { if (scorearray[j] == topscores[k]) { //set it to zero because just in case the top scores are the same score, we do not want those 2 top scores to belong to the same index scorearray[j] = 0; topscoreindexes[k] = j + (int)minvals[0]; //rmember we had to adjust our array to accomodate negative values so all indexes are off by this amount break; } } } Array.Sort(topscoreindexes); //only find midpoints of *adjacent* maximum, lol. I was getting a lot of strange results otherwise //calculate dividers as midpoints between the top indexes dividerindexes[0] = 0; for (k = 1; k < topscoreindexes.Length; k++) { dividerindexes[k] = topscoreindexes[k] - (topscoreindexes[k] - topscoreindexes[k - 1]) / 2; } dividerindexes[topscoreindexes.Length] = (int)maxvals[0]; cols = grayimage.Cols; rows = grayimage.Rows; for (j = 0; j < rows; j++) { for (k = 0; k < cols; k++) { //loopthrough every pixel //byCurrent = frame.Data[j, k, l];//http://www.emgu.com/wiki/index.php/Working_with_Images#Depth_and_Color_as_Generic_Parameter byCurrent = (int)grayimage.Data[j, k, 0];//http://www.emgu.com/wiki/index.php/Working_with_Images#Depth_and_Color_as_Generic_Parameter //(int) is the same as rounding for (i = 1; i < dividerindexes.Length; i++) //figure out which bucket this color belongs to { if (byCurrent >= (dividerindexes[i - 1]) && byCurrent <= dividerindexes[i]) //if it falls into this bucket... { //set it to that color grayimage.Data[j, k, 0] = (Single)topscoreindexes[i - 1]; break; } } } } } //when done changing all the pixels, store the current image in our array of grey images greyImages[l] = grayimage; } return greyImages; } #endregion public Image threshColors(Image frame) //http://www.emgu.com/forum/viewtopic.php?f=7&t=4321 { for (i = 0; i < clbColors.CheckedItems.Count; i++) { rtOutput.AppendText(clbColors.CheckedItems[i].ToString()); } frame.InRange(new Bgr(Color.Brown.B - 20, Color.Brown.G - 20, Color.Brown.R - 20), new Bgr(Color.Brown.B + 20, Color.Brown.G + 20, Color.Brown.R + 20)); return frame; } } }