I am writing a functional for combining images with intersecting areas. Used the groundwork from here .

The code itself:

static Mat image1; static Mat image2; static Mat img1; static Mat img2; static FeatureDetector fd; static DescriptorExtractor fe; static DescriptorMatcher fm; public void init(){ fd = FeatureDetector.create(FeatureDetector.BRISK); fe = DescriptorExtractor.create(DescriptorExtractor.BRISK); fm = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE); System.out.println("---->> 1"); //images try { img1 = Utils.loadResource(context, R.drawable.image1, Imgcodecs.CV_LOAD_IMAGE_COLOR); img2 = Utils.loadResource(context, R.drawable.image2, Imgcodecs.CV_LOAD_IMAGE_COLOR); System.out.println("---->> 2"); } catch (IOException e) { System.out.println("-------======= Ошибка загрузки изображений =======-------"); e.printStackTrace(); } image1 = new Mat(); image2 = new Mat(); Imgproc.cvtColor(img1, image1, Imgproc.COLOR_BGR2GRAY); Imgproc.cvtColor(img2, image2, Imgproc.COLOR_BGR2GRAY); //structures for the keypoints from the 2 images MatOfKeyPoint keypoints1 = new MatOfKeyPoint(); MatOfKeyPoint keypoints2 = new MatOfKeyPoint(); System.out.println("---->> 3"); //structures for the computed descriptors Mat descriptors1 = new Mat(); Mat descriptors2 = new Mat(); //structure for the matches MatOfDMatch matches = new MatOfDMatch(); //getting the keypoints fd.detect(image1, keypoints1); fd.detect(image2, keypoints2); //getting the descriptors from the keypoints fe.compute(image1, keypoints1, descriptors1); fe.compute(image2,keypoints2,descriptors2); //getting the matches the 2 sets of descriptors fm.match(descriptors2,descriptors1, matches); //turn the matches to a list List<DMatch> matchesList = matches.toList(); Double maxDist = 0.0; //keep track of max distance from the matches Double minDist = 100.0; //keep track of min distance from the matches //calculate max & min distances between keypoints for(int i=0; i<keypoints1.rows();i++){ Double dist = (double) matchesList.get(i).distance; if (dist<minDist) minDist = dist; if(dist>maxDist) maxDist=dist; } System.out.println("max dist: " + maxDist ); System.out.println("min dist: " + minDist); //structure for the good matches LinkedList<DMatch> goodMatches = new LinkedList<DMatch>(); //use only the good matches (ie whose distance is less than 3*min_dist) for(int i=0;i<descriptors1.rows();i++){ KeyPoint kp[] = keypoints1.toArray(); KeyPoint kp2[] = keypoints2.toArray(); Point point1 = kp[matchesList.get(i).queryIdx].pt; Point point2 = kp2[matchesList.get(i).trainIdx].pt; if(matchesList.get(i).distance<3*minDist){ goodMatches.addLast(matchesList.get(i)); } } //structures to hold points of the good matches (coordinates) LinkedList<Point> objList = new LinkedList<Point>(); // image1 LinkedList<Point> sceneList = new LinkedList<Point>(); //image 2 List<KeyPoint> keypoints_objectList = keypoints1.toList(); List<KeyPoint> keypoints_sceneList = keypoints2.toList(); //putting the points of the good matches into above structures for(int i = 0; i<goodMatches.size(); i++){ objList.addLast(keypoints_objectList.get(goodMatches.get(i).queryIdx).pt); sceneList.addLast(keypoints_sceneList.get(goodMatches.get(i).trainIdx).pt); } System.out.println("\nNum. of good matches" +goodMatches.size()); MatOfDMatch gm = new MatOfDMatch(); gm.fromList(goodMatches); //converting the points into the appropriate data structure MatOfPoint2f obj = new MatOfPoint2f(); obj.fromList(objList); MatOfPoint2f scene = new MatOfPoint2f(); scene.fromList(sceneList); //finding the homography matrix Mat H = Calib3d.findHomography(obj, scene, Calib3d.RANSAC, 1); //LinkedList<Point> cornerList = new LinkedList<Point>(); Mat obj_corners = new Mat(4,1, CvType.CV_32FC2); Mat scene_corners = new Mat(4,1, CvType.CV_32FC2); obj_corners.put(0,0, new double[]{0,0}); obj_corners.put(0,0, new double[]{image1.cols(),0}); obj_corners.put(0,0,new double[]{image1.cols(),image1.rows()}); obj_corners.put(0,0,new double[]{0,image1.rows()}); Core.perspectiveTransform(obj_corners, scene_corners, H); //structure to hold the result of the homography matrix Mat result = new Mat(); //size of the new image - ie image 1 + image 2 Size s = new Size(image1.cols()+image2.cols(),image1.rows()); //using the homography matrix to warp the two images Imgproc.warpPerspective(image1, result, H, s); int i = image1.cols(); Mat m = new Mat(result,new Rect(i,0,image2.cols(), image2.rows())); image2.copyTo(m); Mat img_mat = new Mat(); Features2d.drawMatches(image1, keypoints1, image2, keypoints2, gm, img_mat, new Scalar(254,0,0),new Scalar(254,0,0) , new MatOfByte(), 2); Bitmap bmp1 = Bitmap.createBitmap(result.cols(), result.rows(), Bitmap.Config.ARGB_8888); Utils.matToBitmap(result, bmp1); Bitmap bmp2 = Bitmap.createBitmap(img_mat.cols(), img_mat.rows(), Bitmap.Config.ARGB_8888); Utils.matToBitmap(img_mat, bmp2); String download_path = Environment.getExternalStoragePublicDirectory( Environment.DIRECTORY_DOWNLOADS).getPath(); File file = new File(download_path, "imageStitched.jpg"); file.getParentFile().mkdirs(); System.out.println("-------======= "+ download_path +" =======-------"); try { FileOutputStream fos = null; try { fos = new FileOutputStream(file); bmp2.compress(Bitmap.CompressFormat.JPEG, 100, fos); } finally { if (fos != null) fos.close(); } } catch (Exception e) { e.printStackTrace(); } 

As a result, I get this enter image description here enter image description here

Although it should be like this: enter image description here

Help please find the error

PS Do not pay attention to monochrome, this is not a mistake.

UPD Found the main error, it was in the line: fm.match (descriptors2, descriptors1, matches); it was necessary to swap the first and second parameter.

Now the key points are true: enter image description here

But there is a problem with merging images. enter image description here

0