#!/usr/bin/env python # coding: utf-8 # ![](pics/header-2.png) # # # Calibration with Markers # # An ArUco marker is a synthetic square marker composed by a wide black border and a inner binary matrix which determines its identifier (id). The black border facilitates its fast detection in the image and the binary codification allows its identification and the application of error detection and correction techniques. The marker size determines the size of the internal matrix. For instance a marker size of 4x4 is composed by 16 bits. # # The ArUco module can be used to calibrate a camera. Camera calibration consists in obtaining the camera intrinsic parameters and distortion coefficients. Camera calibration is usually performed using the OpenCV `calibrateCamera()` function. This function requires some correspondences between environment points and their projection in the camera image from different viewpoints. In general, these correspondences are obtained from the corners of chessboard patterns. Calibrating using ArUco is much more versatile than using traditional chessboard patterns, since it allows occlusions or partial views. # # ![](pics/charucodefinition.png) # # ChArUco boards combine ArUco markers with a standard checkerboard. ChArUco board corners provide much more accuratacy in comparison to the ArUco marker corners. Calibration using a standard Board should only be employed in those scenarios where the ChArUco boards cannot be employed because of any kind of restriction. # # ## References # # - [OpenCV 4.0: ArUco Tutorials](https://docs.opencv.org/4.0.0/d9/d6d/tutorial_table_of_content_aruco.html) # - [Automatic generation and detection of highly reliable fiducial markers under occlusion](docs/paper-2014.pdf) # - [Notes using aruco markers](https://mecaruco2.readthedocs.io/en/latest/notebooks_rst/Aruco/sandbox/ludovic/aruco_calibration_rotation.html) # # ## Contents # # - [Stereo Calibration](#stereo-calibration) # - [ChArUco Target Board](#charuco-target-board) # - [Get Calibration Images](#get-calibration-images) # - [Camera Intrinsics](#camera-intrinsics) # - [Camera Extrinsics](#camera-extrinsics) # - [Yaml](#yaml) # - Appendix A: Calibration # - Appendix B: Blurry # # Stereo Calibration # # 1. Let's calibrate each camera individually and find its intrinsic parameters # 2. Use this data to then calibrate the two cameras together and find their extrinsic parameters # In[1]: # reload library get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') # In[56]: import numpy as np import cv2 import cv2.aruco as aruco from matplotlib import pyplot as plt from opencv_camera import StereoCamera from opencv_camera import coverage from opencv_camera import visualizeDistortion from opencv_camera import drawEpipolarLines from opencv_camera import mosaic from colorama import Fore from helpers import blurry from helpers import Calibrate from helpers import get_images np.set_printoptions(precision=3) np.set_printoptions(suppress=True) # # ChArUco Target Board # # ## Dictionary # # The markers are generated according to a *dictionary* which defines the number of horizontal and vertical bits for each marker and the total number of valid values that are contained. For example, `aruco.DICT_4X4_50` defines a 4x4 2D marker with 50 unique marker ids in it. Once the dictionary is created, individual markers can be drawn. # # ```python # dictionary = aruco.Dictionary_get(aruco.DICT_4X4_50) # dictionary.drawMarker(id, sidePixels[, _img[, borderBits]]) -> _img # ``` # # ## Boards # # The board structure has the following useful members/methods: # # - `chessboardCorners` # - `create` # - `dictionary` # - `draw` # - `getChessboardSize` # - `getMarkerLength` # - `getSquareLength` # - `ids` # - `nearestMarkerCorners` # - `nearestMarkerIdx` # - `objPoints` # # Found markers can be drawn on an image using `aruco.drawDetectedMarkers(img, corners, ids)`. # In[4]: dictionary = aruco.Dictionary_get(aruco.DICT_4X4_50) x = 5 # horizontal y = 7 # vertical sqr = 0.014 # solid black squares mrk = 0.0105 # markers, must be smaller than squares board = aruco.CharucoBoard_create( x,y, sqr, mrk, dictionary) tgt = board.draw((1000,1000)) plt.imshow(tgt, cmap="gray") plt.axis("off") plt.title("Target 5x7, DICT_4X4_50"); cv2.imwrite("board.png", tgt) # ## Get Calibration Images # In[57]: # the left/right images are stored together as one image. We # need to read in each image and cut it in half to separate out # the left/right imgsL, imgsR = get_images("aruco-imgs-2/*.png") # In[63]: plt.figure(figsize=(10,10)) mos = mosaic(imgsL) plt.imshow(mos, cmap="gray") plt.title(f"{len(imgsL)} Left Images @ {imgsL[0].shape}") plt.axis("off"); # In[8]: # Ideally we don't want any blurry images getting into the dataset for i, (l, r) in enumerate(zip(imgsL, imgsR)): thres = 200 blurl, vall = blurry(l, thres) cl = Fore.RED if blurl is True else "" blurr, valr = blurry(r, thres) cr = Fore.RED if blurr is True else "" if blurl or blurr: print(f"{cl}{i:3}: left: {blurl},{vall}{Fore.RESET}", end=" ") print(f"{cr}right: {blurr},{valr}{Fore.RESET}", end=" ") print(f"threshold ratio: {(vall - thres)/thres:.2f} / {(valr - thres)/thres:.2f}") # In[9]: # I think background is giving blurry warnings, not calibration target num = 7 plt.figure(figsize=(20,5)) plt.imshow(np.hstack((imgsL[num], imgsR[num])), cmap="gray") plt.axis("off") plt.title(num); # # Camera Intrinsics # In[11]: # left camera parameters cal = Calibrate(dictionary, board) rms1, M1, d1, r1, t1, objpoints, imgpoints_l = cal.calibrate(imgsL) # print(len(objpoints), objpoints[0].shape, len(imgpoints_l), imgpoints_l[0].shape) # In[12]: print('RMS:', rms1, 'px') print('Camera Matrix:', M1) print('Dist Coeffs:', d1) i=5 # select image id plt.figure(figsize=(20,10)) img_undist = cv2.undistort(imgsL[i],M1,d1,None) plt.subplot(1,2,1) plt.imshow(imgsL[i], cmap='gray') plt.title("Raw image @ {}".format(imgsL[i].shape)) plt.axis("off") plt.subplot(1,2,2) plt.imshow(img_undist, cmap='gray') plt.title("Corrected image") plt.axis("off"); # In[13]: # right camera parameters rms2, M2, d2, r2, t2, objpoints, imgpoints_r = cal.calibrate(imgsR) # In[14]: print('RMS:', rms2, 'px') print('Camera Matrix:', M2) print('Dist Coeffs:', d2) i=5 # select image id plt.figure(figsize=(20,10)) img_undist = cv2.undistort(imgsR[i],M2,d2,None) plt.subplot(1,2,1) plt.imshow(imgsR[i], cmap='gray') plt.title("Raw image @ {}".format(imgsR[i].shape)) plt.axis("off") plt.subplot(1,2,2) plt.imshow(img_undist, cmap='gray') plt.title("Corrected image") plt.axis("off"); # In[17]: # draw image points mapped across camera focal plane plt.figure(figsize=(20,10)) plt.subplot(121);plt.imshow(coverage(imgsL[0].shape[:2], np.vstack(imgpoints_l)));plt.axis("off"); plt.subplot(122);plt.imshow(coverage(imgsR[0].shape[:2], np.vstack(imgpoints_r)));plt.axis("off"); # In[18]: h,w = imgsL[0].shape[:2] visualizeDistortion(M1, d1, h, w) h,w = imgsR[0].shape[:2] visualizeDistortion(M2, d2, h, w); # In[64]: # eimg = drawEpipolarLines(imgpoints_l[0],imgpoints_r[0],imgsL[0],imgsR[0]) # # Camera Extrinsics # In[20]: # some markers are obscured, we need to weed out the # points that are not in BOTH the left and right images objpts = [] rpts = [] lpts = [] for o,l,r in zip(objpoints,imgpoints_l,imgpoints_r): # must have all the same number of points for calibration if o.shape[:2] == l.shape[:2] == r.shape[:2]: objpts.append(o) rpts.append(r) lpts.append(l) else: print("bad points:", o.shape, l.shape, r.shape) # In[48]: print('obj pts', len(objpts)) print('imgpoints left', len(lpts)) print('imgpoints right', len(rpts)) flags = 0 # flags |= cv2.CALIB_FIX_INTRINSIC # flags |= cv2.CALIB_FIX_PRINCIPAL_POINT flags |= cv2.CALIB_USE_INTRINSIC_GUESS # flags |= cv2.CALIB_FIX_FOCAL_LENGTH flags |= cv2.CALIB_ZERO_DISPARITY # flags |= cv2.CALIB_FIX_ASPECT_RATIO # flags |= cv2.CALIB_ZERO_TANGENT_DIST # flags |= cv2.CALIB_RATIONAL_MODEL # flags |= cv2.CALIB_SAME_FOCAL_LENGTH # flags |= cv2.CALIB_FIX_K3 # flags |= cv2.CALIB_FIX_K4 # flags |= cv2.CALIB_FIX_K5 stereocalib_criteria = ( cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 100, 1e-5) h, w = imgsL[0].shape[:2] ret, M1, d1, M2, d2, R, T, E, F = cv2.stereoCalibrate( objpts, lpts, rpts, M1, d1, M2, d2, (w,h), criteria=stereocalib_criteria, flags=flags) print('-'*50) # print('Image: {}x{}'.format(*imgs_l[0].shape[:2])) # print('{}: {}'.format(marker_type, marker_size)) print('Intrinsic Camera Parameters') print('-'*50) print(' [Camera 1]') # print(' cameraMatrix_1', M1) print(' f(x,y): {:.1f} {:.1f} px'.format(M1[0,0], M1[1,1])) print(' principlePoint(x,y): {:.1f} {:.1f} px'.format(M1[0,2], M1[1,2])) print(' distCoeffs', d1[0]) print(' [Camera 2]') # print(' cameraMatrix_2', M2) print(' f(x,y): {:.1f} {:.1f} px'.format(M2[0,0], M2[1,1])) print(' principlePoint(x,y): {:.1f} {:.1f} px'.format(M2[0,2], M2[1,2])) print(' distCoeffs', d2[0]) print('-'*50) print('Extrinsic Camera Parameters') print('-'*50) print(' Rotation between left/right camera\n', R) print(' Translation between left/right camera[meter]', T.T) print(' Essential matrix\n', E) print(' Fundamental matrix\n', F) # In[46]: # just playing around with calculating the fundamental matrix from opencv_camera import findFundamentalMat ff = findFundamentalMat(M1,M2,R,T, True) print(ff) print(ff/ff[2,2]) # what is above # In[22]: sc = StereoCamera(M1,d1,M2,d2,R,T.T,F,E) print(sc) # # Yaml # In[23]: sc.to_yaml("camera.yml") # In[24]: get_ipython().system('cat camera.yml') # In[25]: mm = StereoCamera.from_yaml("camera.yml") print(mm) # In[26]: print(mm.p1()) print(mm.p2()) # In[ ]: # In[ ]: # In[ ]: # # Appendix A: Camera Intrinsic Calibration Code # In[29]: from jtb import getCodeUrl, getCodeFile, getCodeImport # In[30]: getCodeImport(Calibrate) # # Appendix B: Blurry # In[31]: getCodeImport(blurry) # In[ ]: