0
votes

I'm working on a android application and I need to estimate online camera rotation in 3D-plan using images from camera and opencv library. I like to calculate Euler angles.

I have read this and this page and I can estimate homography matrix like here.

My first question is, should I really know the camera intrinsic matrix from camera calibrtion or is the homography matrix (camera extrinsic) enough to estimate euler angles (pitch, roll, yaw)?

If homography matrix is enough, how can I do it exactly?

Sorry, I am really beginner with opencv and cannot decompose "Mat" of homography to rotation matrix and translation matrix like describes here. How can I implement euler angles in android?

You can see my code using solvePnPRansac() and decomposeProjectionMatrix to calculate euler angles.

But it returns just a null-vector as double[] eulerArray = {0,0,0}!!! Can somebody help me?! What is wrong there? Thank you very much for any response!

public double[] findEulerAngles(MatOfKeyPoint keypoints1, MatOfKeyPoint keypoints2, MatOfDMatch matches){

    KeyPoint[] k1 = keypoints1.toArray();
    KeyPoint[] k2 = keypoints2.toArray();


    List<DMatch> matchesList = matches.toList();
    List<KeyPoint> referenceKeypointsList = keypoints2.toList();
    List<KeyPoint> sceneKeypointsList = keypoints1.toList();
    // Calculate the max and min distances between keypoints.
    double maxDist = 0.0;
    double minDist = Double.MAX_VALUE;
    for(DMatch match : matchesList) {
        double dist = match.distance;
        if (dist < minDist) {
            minDist = dist;
        }
        if (dist > maxDist) {
            maxDist = dist;
        }
    }

    // Identify "good" keypoints based on match distance.
    List<Point3> goodReferencePointsList = new ArrayList<Point3>();
    ArrayList<Point> goodScenePointsList = new ArrayList<Point>();
    double maxGoodMatchDist = 1.75 * minDist;
    for(DMatch match : matchesList) {
        if (match.distance < maxGoodMatchDist) {
            Point kk2 = k2[match.queryIdx].pt;
            Point kk1 = k1[match.trainIdx].pt;

            Point3 point3 = new Point3(kk1.x, kk1.y, 0.0);
            goodReferencePointsList.add(point3);
            goodScenePointsList.add( kk2);
            sceneKeypointsList.get(match.queryIdx).pt);
        }
    }


    if (goodReferencePointsList.size() < 4 || goodScenePointsList.size() < 4) {
        // There are too few good points to find the pose.
        return;
    }

    MatOfPoint3f goodReferencePoints = new MatOfPoint3f();
    goodReferencePoints.fromList(goodReferencePointsList);
    MatOfPoint2f goodScenePoints = new MatOfPoint2f();
    goodScenePoints.fromList(goodScenePointsList);

    MatOfDouble mRMat = new MatOfDouble(3, 3, CvType.CV_32F);
    MatOfDouble mTVec = new MatOfDouble(3, 1, CvType.CV_32F);

    //TODO: solve camera intrinsic matrix
    Mat intrinsics = Mat.eye(3, 3, CvType.CV_32F); // dummy camera matrix
    intrinsics.put(0, 0, 400);
    intrinsics.put(1, 1, 400);
    intrinsics.put(0, 2, 640 / 2);
    intrinsics.put(1, 2, 480 / 2);
    Calib3d.solvePnPRansac(goodReferencePoints, goodScenePoints, intrinsics, new MatOfDouble(), mRMat, mTVec);
    MatOfDouble rotCameraMatrix1 = new MatOfDouble(3, 1, CvType.CV_32F);
    double[] rVecArray = mRMat.toArray();
    // Calib3d.Rodrigues(mRMat, rotCameraMatrix1);
    double[] tVecArray = mTVec.toArray();

    MatOfDouble projMatrix = new MatOfDouble(3, 4, CvType.CV_32F); //projMatrix 3x4 input projection matrix P.
    projMatrix.put(0, 0, rVecArray[0]);
    projMatrix.put(0, 1, rVecArray[1]);
    projMatrix.put(0, 2, rVecArray[2]);
    projMatrix.put(0, 3, 0);
    projMatrix.put(1, 0, rVecArray[3]);
    projMatrix.put(1, 1, rVecArray[4]);
    projMatrix.put(1, 2, rVecArray[5]);
    projMatrix.put(1, 3, 0);
    projMatrix.put(2, 0, rVecArray[6]);
    projMatrix.put(2, 1, rVecArray[7]);
    projMatrix.put(2, 2, rVecArray[8]);
    projMatrix.put(2, 3, 0);

    MatOfDouble cameraMatrix = new MatOfDouble(3, 3, CvType.CV_32F); //cameraMatrix Output 3x3 camera matrix K.
    MatOfDouble rotMatrix = new MatOfDouble(3, 3, CvType.CV_32F); //rotMatrix Output 3x3 external rotation matrix R.
    MatOfDouble transVect = new MatOfDouble(4, 1, CvType.CV_32F); //transVect Output 4x1 translation vector T.
    MatOfDouble rotMatrixX = new MatOfDouble(3, 3, CvType.CV_32F); //rotMatrixX a rotMatrixX
    MatOfDouble rotMatrixY = new MatOfDouble(3, 3, CvType.CV_32F); //rotMatrixY a rotMatrixY
    MatOfDouble rotMatrixZ = new MatOfDouble(3, 3, CvType.CV_32F); //rotMatrixZ a rotMatrixZ
    MatOfDouble eulerAngles = new MatOfDouble(3, 1, CvType.CV_32F); //eulerAngles Optional three-element vector containing three Euler angles of rotation in degrees.

    Calib3d.decomposeProjectionMatrix( projMatrix,
            cameraMatrix,
            rotMatrix,
            transVect,
            rotMatrixX,
            rotMatrixY,
            rotMatrixZ,
            eulerAngles);

    double[] eulerArray = eulerAngles.toArray();

    return eulerArray;
}
2
Homography did not work well for me. What do you develop a tracking for? I found a library from moverio. Please let me know how it helps you.Jakob Alexander Eichler
It's really similar to your cockpit app!!! I want to reduce the noise of sensors and estimate a better euler angles using feature-base image processing to have a better accuracy! I have read, the angels can be estimate from homography matrix. I did it right now. I don't know how can I calculate the change of angles between two images from camera using this homography! Where have you found the library from moverio? Can you use it with opencv or is a fork library?! I saw you are from Germany too!! Maybe can we use skype or teamviewer, when you have time for me ;-)mox

2 Answers

0
votes

Homography relates images of the same planar surface, so it works only if there is a dominant plane in the image and you can find enough feature points lying on the plane in both images and successfully match them. Minimum number of matches is four and the math will work under the assumption, that the matches are 100% correct. With the help of robust estimation like RANSAC, you can get the result even if some elements in your set of feature point matches are obvious mismatches or are not placed on a plane.

For a more general case of a set of macthed features without the planarity assumption, you will need to find an essential matrix. The exact definition of the matrix can be found here. In short, it works more or less like homography - it relates corresponding points in two images. The minimum number of matches required to compute the essential matrix is five. To get the result from such a minimum sample, you need to make sure that the established matches are 100% correct. Again, robust estimation can help if there are outliers in your correspondence set -- and with automatic feature detection and matching there usually are.

OpenCV 3.0 has a function for essential matrix computation, conveniently integrated with RANSAC robust estimation. The essential matrix can be decomposed to the rotation matrix and translation vector as shown in the Wikipedia article I linked before. OpenCV 3.0 has a readily available function for this, too.

0
votes

Now works the flowing code for me and I have decomposed the euler angles from homography matrix! I have some values for pitch, roll and yaw, which I don't know, whether there are correct. Have somebody any Idee, how can I test it?!

private static MatOfDMatch filterMatchesByHomography(MatOfKeyPoint keypoints1, MatOfKeyPoint keypoints2, MatOfDMatch matches){

    List<Point> lp1 = new ArrayList<Point>(500);
    List<Point> lp2 = new ArrayList<Point>(500);

    KeyPoint[] k1 = keypoints1.toArray();
    KeyPoint[] k2 = keypoints2.toArray();

    List<DMatch> matchesList = matches.toList();

    if (matchesList.size() < 4){
        MatOfDMatch mat = new MatOfDMatch();
        return mat;
    }

    // Add matches keypoints to new list to apply homography
    for(DMatch match : matchesList){
        Point kk1 = k1[match.queryIdx].pt;
        Point kk2 = k2[match.trainIdx].pt;
        lp1.add(kk1);
        lp2.add(kk2);
    }

    MatOfPoint2f srcPoints = new MatOfPoint2f(lp1.toArray(new Point[0]));
    MatOfPoint2f dstPoints  = new MatOfPoint2f(lp2.toArray(new Point[0]));

    //---------------------------------------

    Mat mask = new Mat();
    Mat homography = Calib3d.findHomography(srcPoints, dstPoints, Calib3d.RANSAC, 0.2, mask); // Finds a perspective transformation between two planes. ---Calib3d.LMEDS

    Mat pose = cameraPoseFromHomography(homography);

    //Decomposing a rotation matrix to eulerangle
    pitch = Math.atan2(pose.get(2, 1)[0], pose.get(2, 2)[0]); // arctan2(r32, r33)
    roll = Math.atan2(-1*pose.get(2, 0)[0], Math.sqrt( Math.pow(pose.get(2, 1)[0], 2) + Math.pow(pose.get(2, 2)[0], 2)) ); // arctan2(-r31, sqrt(r32^2 + r33^2))
    yaw = Math.atan2(pose.get(2, 0)[0], pose.get(0, 0)[0]);

    List<DMatch> matches_homo = new ArrayList<DMatch>();
    int size = (int) mask.size().height;
    for(int i = 0; i < size; i++){          
        if ( mask.get(i, 0)[0] == 1){
            DMatch d = matchesList.get(i);
            matches_homo.add(d);
        }
    }

    MatOfDMatch mat = new MatOfDMatch();
    mat.fromList(matches_homo);
    return mat;
}

This is my camera pose from homography matrix (see this page too):

private static Mat cameraPoseFromHomography(Mat h) {
    //Log.d("DEBUG", "cameraPoseFromHomography: homography " + matToString(h));

    Mat pose = Mat.eye(3, 4, CvType.CV_32FC1);  // 3x4 matrix, the camera pose
    float norm1 = (float) Core.norm(h.col(0));
    float norm2 = (float) Core.norm(h.col(1));
    float tnorm = (norm1 + norm2) / 2.0f;       // Normalization value

    Mat normalizedTemp = new Mat();
    Core.normalize(h.col(0), normalizedTemp);
    normalizedTemp.convertTo(normalizedTemp, CvType.CV_32FC1);
    normalizedTemp.copyTo(pose.col(0)); // Normalize the rotation, and copies the column to pose

    Core.normalize(h.col(1), normalizedTemp);
    normalizedTemp.convertTo(normalizedTemp, CvType.CV_32FC1);    
    normalizedTemp.copyTo(pose.col(1));// Normalize the rotation and copies the column to pose

    Mat p3 = pose.col(0).cross(pose.col(1)); // Computes the cross-product of p1 and p2
    p3.copyTo(pose.col(2));// Third column is the crossproduct of columns one and two

    Mat temp = h.col(2);
    double[] buffer = new double[3];
    h.col(2).get(0, 0, buffer);
    pose.put(0, 3, buffer[0] / tnorm);  //vector t [R|t] is the last column of pose
    pose.put(1, 3, buffer[1] / tnorm);
    pose.put(2, 3, buffer[2] / tnorm);

    return pose;
}