Difference between revisions of "Script: homography estimation.py"

From RidgeRun Developer Connection
Jump to: navigation, search
(Created page with "<syntaxhighlight lang='python'> """ Tool for estimate the homography matrix """ import sys import argparse import numpy as np import cv2 #-----------------------------------...")
 
(Replaced content with "<syntaxhighlight lang='python'> // The script is located in the Scrip's directory of the cudastitcher project. </syntaxhighlight>")
(Tag: Replaced)
 
(4 intermediate revisions by the same user not shown)
Line 1: Line 1:
 
<syntaxhighlight lang='python'>
 
<syntaxhighlight lang='python'>
""" Tool for estimate the homography matrix """
+
// The script is located in the Scrip's directory of the cudastitcher project.
 
 
import sys
 
import argparse
 
import numpy as np
 
import cv2
 
 
 
#----------------------------------------------------------------------------
 
 
 
def drawMatches(imageA, imageB, kpsA, kpsB, matches, status):
 
# initialize the output visualization image
 
(hA, wA) = imageA.shape[:2]
 
(hB, wB) = imageB.shape[:2]
 
vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8")
 
vis[0:hA, 0:wA] = imageA
 
vis[0:hB, wA:] = imageB
 
 
 
# loop over the matches
 
for ((trainIdx, queryIdx), s) in zip(matches, status):
 
# only process the match if the keypoint was successfully
 
# matched
 
if s == 1:
 
# draw the match
 
ptA = (int(kpsA[queryIdx][0]), int(kpsA[queryIdx][1]))
 
ptB = (int(kpsB[trainIdx][0]) + wA, int(kpsB[trainIdx][1]))
 
cv2.line(vis, ptA, ptB, (0, 255, 0), 1)
 
 
 
# return the visualization
 
return vis
 
 
 
def detectAndDescribe(gray, mask):
 
# detect and extract features from the image
 
    detector = cv2.xfeatures2d.SIFT_create()
 
    (kps, features) = detector.detectAndCompute(gray, mask)
 
    kps = np.float32([kp.pt for kp in kps])
 
 
 
# return a tuple of keypoints and features
 
    return (kps, features)
 
 
 
def matchKeypoints(kpsA, kpsB, featuresA, featuresB,
 
ratio, reprojThresh):
 
# compute the raw matches and initialize the list of actual
 
# matches
 
    #matcher = cv2.DescriptorMatcher_create("BruteForce-Hamming")
 
    matcher = cv2.DescriptorMatcher_create("BruteForce")
 
    rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
 
    matches = []
 
 
 
# loop over the raw matches
 
    for m in rawMatches:
 
# ensure the distance is within a certain ratio of each
 
# other (i.e. Lowe's ratio test)
 
    if len(m) == 2 and m[0].distance < m[1].distance * ratio:
 
    matches.append((m[0].trainIdx, m[0].queryIdx))
 
 
 
# computing a homography requires at least 4 matches
 
    print("matches: " + str(len(matches)))
 
    if len(matches) >= 4:
 
# construct the two sets of points
 
    ptsA = np.float32([kpsA[i] for (_, i) in matches])
 
    ptsB = np.float32([kpsB[i] for (i, _) in matches])
 
 
 
# compute the homography between the two sets of points
 
    (H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC,
 
    reprojThresh)
 
 
 
# return the matches along with the homograpy matrix
 
# and status of each matched point
 
    return (matches, H, status)
 
 
 
# otherwise, no homograpy could be computed
 
    return None
 
 
 
def imageEnhancement(img, sigma):
 
    # Declare the variables we are going to use
 
    kernel_size = 5
 
 
 
    # Remove noise
 
    img = cv2.GaussianBlur(img,(kernel_size,kernel_size),sigma)
 
 
 
    # Convert to grayscale
 
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
 
 
 
    return gray
 
 
 
def undistort(img):
 
    # Define camera matrix K
 
    K = np.array([[2.8472876737532920e+03, 0., 9.7983673800322515e+02],
 
                [0., 2.8608529052506838e+03, 5.0423299551699932e+02],
 
                [0., 0., 1.]])
 
 
 
    # Define distortion coefficients d
 
    d = np.array([-6.7260720359999060e-01, 2.5160831522455513e+00, 5.4007310542765141e-02, -1.1365265232659062e-02, -1.2760075297700798e+01])
 
 
 
    # Read an example image and acquire its size
 
    h, w = img.shape[:2]
 
 
 
    # Generate new camera matrix from parameters
 
    newcameramatrix, roi = cv2.getOptimalNewCameraMatrix(K, d, (w,h), 0)
 
 
 
    # Generate look-up tables for remapping the camera image
 
    mapx, mapy = cv2.initUndistortRectifyMap(K, d, None, newcameramatrix, (w, h), 5)
 
 
 
    # Remap the original image to a new image
 
    newimg = cv2.remap(img, mapx, mapy, cv2.INTER_LINEAR)
 
    return newimg
 
 
 
#----------------------------------------------------------------------------
 
 
 
def left_fixed(targetImage, originalImage, overlap, sigma):
 
    # Variables
 
    ratio=0.9
 
    reprojThresh=1.0
 
 
 
    # Load images
 
    target = cv2.imread(targetImage)
 
    original = cv2.imread(originalImage)
 
 
 
    # remove undistort
 
    target = undistort(target)
 
    original = undistort(original)
 
 
 
    # Enhance images
 
    enhancedTarget = imageEnhancement(target, sigma)
 
    enhancedOriginal = imageEnhancement(original, sigma)
 
 
 
    # Mask
 
    targetMask = np.zeros(enhancedTarget.shape, dtype=np.uint8)
 
    targetMask[0:, (-1*overlap):] = 1
 
    originalMask = np.zeros(enhancedOriginal.shape, dtype=np.uint8)
 
    originalMask[0:, 0:overlap] = 1
 
 
 
    # Extract keypoints
 
    (kpsA, featuresA) = detectAndDescribe(enhancedOriginal, originalMask)
 
    (kpsB, featuresB) = detectAndDescribe(enhancedTarget, targetMask)
 
 
 
    # Match features
 
    M = matchKeypoints(kpsA, kpsB, featuresA, featuresB, ratio, reprojThresh)
 
    (matches, H, status) = M
 
 
 
    print("""RC_HOMOGRAPHY= \\"h00\\":{},\\"h01\\":{}, \\"h02\\":{}, \\"h10\\":{}, \\"h11\\":{}, \\"h12\\":{}, \\"h20\\":{}, \\"h21\\":{}, \\"h22\\":1  """.format(
 
          H[0][0],
 
          H[0][1],
 
          H[0][2],
 
          H[1][0],
 
          H[1][1],
 
          H[1][2],
 
          H[2][0],
 
          H[2][1])
 
    )
 
 
 
    # Dray matches
 
    vis = drawMatches(original, target, kpsA, kpsB, matches, status)
 
    cv2.imwrite('vis.jpg', vis)
 
    vis = cv2.resize(vis,(1920, 540), interpolation = cv2.INTER_CUBIC)
 
 
 
    # Apply homography
 
    result = cv2.warpPerspective(original, H, (target.shape[1] + original.shape[1], original.shape[0]))
 
    result[0:target.shape[0], 0:target.shape[1]] = target
 
    cv2.imwrite('result.jpg', result)
 
    result = cv2.resize(result,(1920, 540), interpolation = cv2.INTER_CUBIC)
 
 
 
    # Display images
 
    cv2.imshow("Target", enhancedTarget)
 
    cv2.imwrite('target.jpg', enhancedTarget)
 
    cv2.imshow("Original", enhancedOriginal)
 
    cv2.imwrite('original.jpg', enhancedOriginal)
 
    cv2.imshow("VIS", vis)
 
    cv2.imshow("Result", result)
 
    cv2.waitKey(0)
 
 
 
def right_fixed(targetImage, originalImage, overlap, sigma):
 
    # Variables
 
    ratio=0.9
 
    reprojThresh=1.0
 
 
 
    # Load images
 
    target = cv2.imread(targetImage)
 
    original = cv2.imread(originalImage)
 
 
 
    # remove undistort
 
    target = undistort(target)
 
    original = undistort(original)
 
 
 
    # Enhance images
 
    enhancedTarget = imageEnhancement(target, sigma)
 
    enhancedOriginal = imageEnhancement(original, sigma)
 
 
 
    # Mask
 
    targetMask = np.zeros(enhancedTarget.shape, dtype=np.uint8)
 
    targetMask[0:, 0:overlap] = 1
 
    originalMask = np.zeros(enhancedOriginal.shape, dtype=np.uint8)
 
    originalMask[0:, (-1*overlap):] = 1
 
 
 
    # Extract keypoints
 
    (kpsA, featuresA) = detectAndDescribe(enhancedOriginal, originalMask)
 
    (kpsB, featuresB) = detectAndDescribe(enhancedTarget, targetMask)
 
 
 
    # Match features
 
    M = matchKeypoints(kpsA, kpsB, featuresA, featuresB, ratio, reprojThresh)
 
    (matches, H, status) = M
 
 
 
    print("""LC_HOMOGRAPHY=\\"h00\\":{},\\"h01\\":{}, \\"h02\\":{}, \\"h10\\":{}, \\"h11\\":{}, \\"h12\\":{}, \\"h20\\":{}, \\"h21\\":{}, \\"h22\\":1""".format(
 
          H[0][0],
 
          H[0][1],
 
          H[0][2],
 
          H[1][0],
 
          H[1][1],
 
          H[1][2],
 
          H[2][0],
 
          H[2][1])
 
    )
 
 
 
    H[0, 2] = H[0, 2] + target.shape[1]
 
 
 
    # Dray matches
 
    vis = drawMatches(original, target, kpsA, kpsB, matches, status)
 
    cv2.imwrite('vis.jpg', vis)
 
    vis = cv2.resize(vis,(1920, 540), interpolation = cv2.INTER_CUBIC)
 
 
 
    # Apply homography
 
    result = cv2.warpPerspective(original, H, (target.shape[1] + original.shape[1], original.shape[0]))
 
    result[0:target.shape[0], target.shape[1]:] = target
 
    cv2.imwrite('result.jpg', result)
 
    result = cv2.resize(result,(1920, 540), interpolation = cv2.INTER_CUBIC)
 
 
 
    # Display images
 
    cv2.imshow("Target", enhancedTarget)
 
    cv2.imwrite('target.jpg', enhancedTarget)
 
    cv2.imshow("Original", enhancedOriginal)
 
    cv2.imwrite('original.jpg', enhancedOriginal)
 
    cv2.imshow("VIS", vis)
 
    cv2.imshow("Result", result)
 
    cv2.waitKey(0)
 
 
 
#----------------------------------------------------------------------------
 
 
 
def cmdline(argv):
 
    prog = argv[0]
 
    parser = argparse.ArgumentParser(
 
        prog        = prog,
 
        description = 'Tool for use the prediction capabilities of the models in the Adversarial Anomaly Detector.',
 
        epilog      = 'Type "%s <command> -h" for more information.' % prog)
 
 
 
    subparsers = parser.add_subparsers(dest='command')
 
    subparsers.required = True
 
    def add_command(cmd, desc, example=None):
 
        epilog = 'Example: %s %s' % (prog, example) if example is not None else None
 
        return subparsers.add_parser(cmd, description=desc, help=desc, epilog=epilog)
 
 
 
    p = add_command(    'left_fixed', 'Estimation of homography between two images')
 
 
 
    p.add_argument(    '--targetImage',    help='Path of the target image', default='')
 
    p.add_argument(    '--originalImage',  help='Path of the original image', default='')
 
    p.add_argument(    '--overlap',        help='Overlap size', type=int, default=350)
 
    p.add_argument(    '--sigma',          help='Gaussian filter sigma', type=float, default=1.5)
 
 
 
    p = add_command(    'right_fixed', 'Estimation of homographies two images')
 
 
 
    p.add_argument(    '--targetImage',    help='Path of the target image', default='')
 
    p.add_argument(    '--originalImage',  help='Path of the original image', default='')
 
    p.add_argument(    '--overlap',        help='Overlap size', type=int, default=350)
 
    p.add_argument(    '--sigma',          help='Gaussian filter sigma', type=float, default=1.5)
 
 
 
    args = parser.parse_args(argv[1:] if len(argv) > 1 else ['-h'])
 
    func = globals()[args.command]
 
    del args.command
 
    func(**vars(args))
 
 
 
#----------------------------------------------------------------------------
 
 
 
if __name__ == "__main__":
 
    cmdline(sys.argv)
 
 
 
#----------------------------------------------------------------------------
 
  
 
</syntaxhighlight>
 
</syntaxhighlight>

Latest revision as of 22:24, 30 July 2020

// The script is located in the Scrip's directory of the cudastitcher project.