0.雙目立體視覺的基本建立步驟
- a)雙目標定(samples/cpp/stereo_calib.cpp),由一套操作完成。
- b)圖像根據標定結果進行極線矯正(stereoRectify 函數)
- c)在每條極線上尋找對應點(視差)(也有很多種選擇,StereoMatcher)
- d)根據視差轉換為點云(cv2.reprojectImageTo3D)
- e)點云存儲(samples/python/stereo_match.py/write_ply)和顯示
1. 雙目棋盤格標定詳解
1.1 c++例子中標定的函數:
StereoCalib(imagelist, boardSize, squareSize, false, true, showRectified);
- 需要一系列的圖像
- 標定板格子的個數(比如8*6)
- 標定板格子的尺寸(比如20mm)
- displayCorners 是否顯示角點
- useCalibrated 是否使用標定結果
- showRectified 是否展示矯正結果
1.2 標定的流程
- 找到亞像素的角點,imagePoints[0]和imagePoints[1],分別對應左右兩圖;
findChessboardCorners
cornerSubPix
- 構建標定板的點坐標,objectPoints
objectPoints[i].push_back(Point3f(k*squareSize, j*squareSize, 0));
3.分別得到兩個相機的初始CameraMatrix
Mat cameraMatrix[2], distCoeffs[2];
cameraMatrix[0] = initCameraMatrix2D(objectPoints,imagePoints[0],imageSize,0);
cameraMatrix[1] = initCameraMatrix2D(objectPoints,imagePoints[1],imageSize,0);
4.雙目視覺進行標定
Mat R, T, E, F;
double rms = stereoCalibrate(objectPoints, imagePoints[0], imagePoints[1],
cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1],
imageSize, R, T, E, F,
CALIB_FIX_ASPECT_RATIO +
CALIB_ZERO_TANGENT_DIST +
CALIB_USE_INTRINSIC_GUESS +
CALIB_SAME_FOCAL_LENGTH +
CALIB_RATIONAL_MODEL +
CALIB_FIX_K3 + CALIB_FIX_K4 + CALIB_FIX_K5,
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 100, 1e-5) );
- 標定精度的衡量,這部分注釋就夠了
// CALIBRATION QUALITY CHECK
// because the output fundamental matrix implicitly
// includes all the output information,
// we can check the quality of calibration using the
// epipolar geometry constraint: m2^t*F*m1=0
- 保存標定結果
略
- 矯正一張圖像看看,是否可以
Mat R1, R2, P1, P2, Q; // 說明
Rect validRoi[2];
stereoRectify(cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1],
imageSize, R, T, R1, R2, P1, P2, Q,
CALIB_ZERO_DISPARITY, 1, imageSize, &validRoi[0], &validRoi[1]);
//Precompute maps for cv::remap(),構建映射圖
initUndistortRectifyMap(cameraMatrix[0], distCoeffs[0], R1, P1, imageSize, CV_16SC2, rmap[0][0], rmap[0][1]);
initUndistortRectifyMap(cameraMatrix[1], distCoeffs[1], R2, P2, imageSize, CV_16SC2, rmap[1][0], rmap[1][1]);
// 讀圖,矯正
Mat img = imread(goodImageList[i*2+k], 0); // 為何要用黑白圖呢?
Mat rimg, cimg;
remap(img, rimg, rmap[k][0], rmap[k][1], INTER_LINEAR);
cvtColor(rimg, cimg, COLOR_GRAY2BGR);
1.3 python的實現代碼
# 0.基本配置
show_corners = False
image_number = 13
board_size = (9, 6) # 也就是boardSize
square_Size = 20
image_lists = [] # 存儲獲取到的圖像
image_points = [] # 存儲圖像的點
# 1.讀圖,找角點
image_dir = "/home/wukong/opencv-4.1.0/samples/data"
image_names = []
[image_names.append(image_dir + "/left%02d.jpg" % i) for i in
[1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14]] # 沒有10,坑爹
[image_names.append(image_dir + "/right%02d.jpg" % i) for i in [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14]]
print(len(image_names))
for image_name in image_names:
print(image_name)
image = cv2.imread(image_name, 0)
found, corners = cv2.findChessboardCorners(image, board_size) # 粗查找角點
if not found:
print("ERROR(no corners):" + image_name)
return None
# 展示結果
if show_corners:
vis = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
cv2.drawChessboardCorners(vis, board_size, corners, found)
cv2.imwrite(image_name.split(os.sep)[-1], vis)
cv2.namedWindow("xxx", cv2.WINDOW_NORMAL)
cv2.imshow("xxx", vis)
cv2.waitKey()
term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.01)
cv2.cornerSubPix(image, corners, (11, 11), (-1, -1), term) # 精定位角點
image_points.append(corners.reshape(-1, 2))
image_lists.append(image)
# 2. 構建標定板的點坐標,objectPoints
object_points = np.zeros((np.prod(board_size), 3), np.float32)
object_points[:, :2] = np.indices(board_size).T.reshape(-1, 2)
object_points *= square_Size
object_points = [object_points] * image_number
# object_points = np.repeat(object_points[np.newaxis, :], 13, axis=0)
# print(object_points.shape)
# 3. 分別得到兩個相機的初始CameraMatrix
h, w = image_lists[0].shape
camera_matrix = list()
camera_matrix.append(cv2.initCameraMatrix2D(object_points, image_points[:image_number], (w, h), 0))
camera_matrix.append(cv2.initCameraMatrix2D(object_points, image_points[image_number:], (w, h), 0))
# 4. 雙目視覺進行標定
term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 100, 1e-5)
retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = \
cv2.stereoCalibrate(object_points, image_points[:image_number], image_points[image_number:], camera_matrix[0],
None, camera_matrix[1], None, (w, h),
flags=cv2.CALIB_FIX_ASPECT_RATIO | cv2.CALIB_ZERO_TANGENT_DIST | cv2.CALIB_USE_INTRINSIC_GUESS |
cv2.CALIB_SAME_FOCAL_LENGTH | cv2.CALIB_RATIONAL_MODEL | cv2.CALIB_FIX_K3 | cv2.CALIB_FIX_K4 | cv2.CALIB_FIX_K5,
criteria=term)
# 5. 標定精度的衡量, TODO
# 6. 保存標定結果 TODO
# 7. 矯正一張圖像看看,是否完成了極線矯正
R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = \
cv2.stereoRectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, (w, h), R, T)
map1_1, map1_2 = cv2.initUndistortRectifyMap(cameraMatrix1, distCoeffs1, R1, P1, (w, h), cv2.CV_16SC2)
map2_1, map2_2 = cv2.initUndistortRectifyMap(cameraMatrix2, distCoeffs2, R2, P2, (w, h), cv2.CV_16SC2)
start_time = time.time()
result1 = cv2.remap(image_lists[0], map1_1, map1_2, cv2.INTER_LINEAR)
result2 = cv2.remap(image_lists[image_number], map2_1, map2_2, cv2.INTER_LINEAR)
print("變形處理時間%f(s)" % (time.time() - start_time))
result = np.concatenate((result1, result2), axis=1)
result[::20, :] = 0
cv2.imwrite("rec.png", result)
極線矯正結果
整個結果看著還行哈。
2.圖像根據標定結果進行極線矯正(stereoRectify 函數)
根據標定結果,放置新的相機
- 確認新的虛擬相機位置,滿足極線平行關系
- 構造映射map
- 執行map 的變換 remap
R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = \
cv2.stereoRectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, (w, h), R, T)
map1_1, map1_2 = cv2.initUndistortRectifyMap(cameraMatrix1, distCoeffs1, R1, P1, (w, h), cv2.CV_16SC2)
map2_1, map2_2 = cv2.initUndistortRectifyMap(cameraMatrix2, distCoeffs2, R2, P2, (w, h), cv2.CV_16SC2)
result1 = cv2.remap(image_lists[0], map1_1, map1_2, cv2.INTER_LINEAR)
result2 = cv2.remap(image_lists[image_number], map2_1, map2_2, cv2.INTER_LINEAR)
3.在每條極線上尋找對應點(視差)
方法有很多
立體匹配的方法
StereoBM, block matching 算法,像素級別的位移,速度快
StereoSGBM,semi-global block matching算法,亞像素的精度,速度慢很多了,實時應用是不考慮的
StereoBeliefPropagation,據說是把這個問題當做了Markov隨機場處理的,所以可以用信念傳播的機制求解,這個目前尚未精通。也是要處理和學習的點。#TODO
4.根據視差轉換為點云(cv2.reprojectImageTo3D)
只需要一步操作就完成了,很簡單
points = cv2.reprojectImageTo3D(disparity, Q)
5. 點云存儲和顯示
略,這些個在opencv/example/python中,應該都可以查看到。