C#:SURF特征匹配

2024-02-23  本文已影响0人  大龙10

一、SURF算法

二、核心函数

1、SURF.Create()

创建SURF检测器

SURF.Create(hessianThreshold, extended, upright)

2、detector.DetectAndCompute()

检测关键点并计算特征描述向量

DetectAndCompute(Mat image, Mat mask, out KeyPoint[] keypoints, Mat descriptors)

3、Cv2.DrawMatches()

绘制特征匹配

void Cv2.DrawMatches(
Mat img1, KeyPoint[] keypoints1,
Mat img2, KeyPoint[] keypoints2,
DMatch[] matches1to2,
Mat outImg,
Scalar matchColor, Scalar singlePointColor,
MatOfByte mask,
DrawMatchesFlags flags = DrawMatchesFlags.Default );

4、KeyPoint 类:

三、匹配思路:

1、创建检测器
2、检测关键点并计算特征描述向量
3、创建特征点匹配器并进行匹配
4、绘制两个图像匹配出得关键点
5、显示匹配图像

四、程序

        private Image image = null;
        private Image image2 = null;
        private Mat dst = new Mat();
        private Mat srcImage1;
        private Mat srcImage2;
        string filePath = "";

       private void button2_Click(object sender, EventArgs e)
        {
            OpenFileDialog openFileDialog = new OpenFileDialog();
            openFileDialog.Title = "选择操作的图片";
            openFileDialog.Filter = "图片 *.jpg|*.jpg|*.bmp|*.bmp|图像*.png|*.png";
            if (openFileDialog.ShowDialog() == DialogResult.OK)
            {
                filePath = openFileDialog.FileName;
                image2 = Image.FromFile(filePath);
                srcImage2 = Cv2.ImRead(filePath);

            }
            if (filePath != "")
            {
                pictureBox2.Image = image2;
            }

        }

        private void button3_Click(object sender, EventArgs e)
        {
            // 定义SURF中的hessian阈值特征点检测算子
            int minHessian = 400;
            // 定义一个特征检测类对象
            var MySurf = OpenCvSharp.XFeatures2D.SURF.Create(minHessian, 4, 3, true, true);

            Mat descriptors1 = new Mat();
            Mat descriptors2 = new Mat();

            // 模板类是能够存放任意类型的动态数组,能够增加和压缩数据
            // 方法1:计算描述符(特征向量),将Detect和Compute操作分开
            //KeyPoint[] keyPoint1 = MySurf.Detect(srcImage1);
            //KeyPoint[] keyPoint2 = MySurf.Detect(srcImage2);
            //MySurf.Compute(srcImage1, ref keyPoint1, descriptors1);
            //MySurf.Compute(srcImage2, ref keyPoint2, descriptors2);

            // 方法2:计算描述符(特征向量),将Detect和Compute操作合并
            OpenCvSharp.KeyPoint[] keyPoint1, keyPoint2;
            MySurf.DetectAndCompute(srcImage1, null, out keyPoint1, descriptors1);
            MySurf.DetectAndCompute(srcImage2, null, out keyPoint2, descriptors2);

            // 使用BruteForce进行匹配 暴力匹配
            // 创建特征点匹配器
            BFMatcher matcher = new BFMatcher(NormTypes.L2, crossCheck: false);
            // 匹配两幅图中的描述子(descriptors)
            DMatch[] matches = matcher.Match(descriptors1, descriptors2);


            // 设置比率阈值
            double ratio_thresh = 0.2;

            List<DMatch> good_matches = new List<DMatch>();
            for (int i = 0; i < matches.Length; i++)
            {
                if (matches[i].Distance < ratio_thresh)
                {
                    good_matches.Add(matches[i]);
                }
            }

            if (good_matches.Count <= 4)
            {
                // 匹配点不足,无法进行透视变换
                MessageBox.Show("合格匹配点数量不足" + good_matches.Count);
            }


            // 创建一个新的图像以绘制匹配结果
            Mat imgMatches = new Mat();
            // 绘制匹配关键点
            Cv2.DrawMatches(srcImage1, keyPoint1, srcImage2, keyPoint2, good_matches, imgMatches, null, null, null, DrawMatchesFlags.NotDrawSinglePoints);


            // -------锚定物体------------
            // 创建两个数组来存储匹配成功的特征点,一个用于物体图像(obj),另一个用于场景图像(scene)
            Point2d[] obj = new Point2d[good_matches.Count()], scene = new Point2d[good_matches.Count()];


            // 遍历匹配成功的特征点
            for (int i = 0; i < good_matches.Count(); i++)
            {
                // 获取查询图像中特征点的坐标,通过good_matches[i].QueryIdx找到对应特征点的索引
                obj[i] = new Point2d((int)keyPoint1[good_matches[i].QueryIdx].Pt.X, (int)keyPoint1[good_matches[i].QueryIdx].Pt.Y);

                // 获取模板图像中对应的特征点坐标,通过good_matches[i].TrainIdx找到对应特征点的索引
                scene[i] = new Point2d((int)keyPoint2[good_matches[i].TrainIdx].Pt.X, (int)keyPoint2[good_matches[i].TrainIdx].Pt.Y);
            }
            if (obj.Length < 4)
            {
                MessageBox.Show("obj优秀匹配点不足,数量为" + obj.Length);

            }

            if (scene.Length < 4)
            {
                MessageBox.Show("匹配点不足,数量为" + scene.Length);
                return;
            }
            // 使用Cv2.FindHomography方法计算透视变换矩阵H,它可以将物体图像映射到场景图像上
            // HomographyMethods.Ransac表示使用RANSAC算法来估计透视变换矩阵,3表示RANSAC算法的最大迭代次数,null表示不使用掩码
            Mat H = Cv2.FindHomography(obj, scene, HomographyMethods.Ransac, 3, null);

            // 创建两点,初始值设为最小和最大的浮点数,用于存储最左上角和最右下角的点
            OpenCvSharp.Point2f topLeft = new OpenCvSharp.Point2f(float.MaxValue, float.MaxValue);
            OpenCvSharp.Point2f bottomRight = new OpenCvSharp.Point2f(float.MinValue, float.MinValue);

            // 遍历所有匹配点
            foreach (DMatch match in good_matches)
            {
                // 获取当前匹配对中源图像和目标图像的点坐标
                OpenCvSharp.Point2f srcPt = keyPoint1[match.QueryIdx].Pt;
                OpenCvSharp.Point2f dstPt = keyPoint2[match.TrainIdx].Pt;

                // 寻找最左上角的点
                topLeft.X = Math.Min(topLeft.X, srcPt.X);
                topLeft.Y = Math.Min(topLeft.Y, srcPt.Y);

                // 寻找最右下角的点
                bottomRight.X = Math.Max(bottomRight.X, srcPt.X);
                bottomRight.Y = Math.Max(bottomRight.Y, srcPt.Y);
            }

            // 将浮点数坐标转换为整数坐标
            OpenCvSharp.Point topLeftPoint = new OpenCvSharp.Point((int)topLeft.X, (int)topLeft.Y);
            OpenCvSharp.Point bottomRightPoint = new OpenCvSharp.Point((int)bottomRight.X, (int)bottomRight.Y);
            // 绘制一个矩形框,框住匹配的区域
            Cv2.Rectangle(imgMatches, topLeftPoint, bottomRightPoint, new Scalar(0, 255, 0), 2);
            // 显示最终的匹配图像
            Cv2.ImShow("匹配图", imgMatches);

        }

五、结果

SURF特征匹配结果

六、资料

亦陈不染的博客:
https://blog.csdn.net/m0_55074196/article/details/134181416
上一篇 下一篇

猜你喜欢

热点阅读