stitch_img_key_point.py 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630
  1. import sys
  2. import logging
  3. from pathlib import Path
  4. import cv2
  5. import numpy as np
  6. import os
  7. import time
  8. from typing import Optional, Tuple, List
  9. from dataclasses import dataclass
  10. from fry_project_classes.blend_type_mixin import BlendTypeMixin
  11. data_center_algo_inner_signals_obj = None
  12. @dataclass
  13. class FeatureMatchResult:
  14. """特征匹配结果的数据类"""
  15. keypoints1: List[cv2.KeyPoint]
  16. keypoints2: List[cv2.KeyPoint]
  17. matches: List[cv2.DMatch]
  18. transform_matrix: Optional[np.ndarray]
  19. match_score: float
  20. offset_x: int
  21. offset_y: int
  22. class ImageStitcherKeyPoint(BlendTypeMixin):
  23. """基于特征点的图像拼接器"""
  24. def __init__(self, estimate_overlap_pixels=800,
  25. center_ratio=0.8,
  26. stitch_type="vertical",
  27. blend_type='half_importance',
  28. debug=False,
  29. debug_dir='debug_output',
  30. min_matches=10,
  31. feature_detector='akaze',
  32. blend_ratio: float = 0.3,
  33. combine_detectors=False):
  34. """
  35. 初始化拼图器
  36. 参数:
  37. estimate_overlap_pixels: 预估重叠区域像素数
  38. center_ratio: 中心区域比例
  39. stitch_type: 拼接方式 ('vertical' 或 'horizontal')
  40. blend_type: 融合方式 ('half_importance', 'right_first', 'half_importance_add_weight')
  41. debug: 是否开启调试模式
  42. debug_dir: 调试图片保存目录
  43. min_matches: 最小匹配点数
  44. feature_detector: 特征检测器类型 ('akaze', 'sift', 'orb', 'brisk', 'combine')
  45. combine_detectors: 是否组合使用多个检测器
  46. """
  47. if data_center_algo_inner_signals_obj is not None:
  48. print("警告", f"拼图方法:关键点")
  49. self.estimate_overlap_pixels = estimate_overlap_pixels
  50. self.estimate_non_overlap_pixels = None
  51. self.center_ratio = center_ratio
  52. self.blend_type = blend_type
  53. self.stitch_type = stitch_type
  54. self.debug = debug
  55. self.init_debug = debug_dir
  56. self.min_matches = min_matches
  57. self.blend_ratio = blend_ratio
  58. if self.debug:
  59. self.debug_dir = f"{self.init_debug}_{self.stitch_type}_{self.blend_type}"
  60. os.makedirs(self.debug_dir, exist_ok=True)
  61. # 创建特征检测器和描述符计算器
  62. self.detector = cv2.AKAZE_create()
  63. # 创建特征匹配器
  64. self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
  65. # 匹配得分和变换矩阵
  66. self.best_score = -1
  67. self.match_score = -1
  68. self.transform_matrix = None
  69. # 预估的重叠区域大小
  70. self.real_overlap_width = None
  71. # 初始化特征检测器
  72. self.feature_detector = feature_detector.lower()
  73. self.combine_detectors = combine_detectors
  74. self.detectors = self._init_feature_detectors()
  75. def _init_feature_detectors(self):
  76. """初始化特征检测器"""
  77. detectors = {}
  78. try:
  79. # AKAZE检测器
  80. detectors['akaze'] = cv2.AKAZE_create()
  81. # SIFT检测器 (需要opencv-contrib-python)
  82. detectors['sift'] = cv2.SIFT_create()
  83. # ORB检测器
  84. detectors['orb'] = cv2.ORB_create(nfeatures=2000,
  85. scaleFactor=1.2,
  86. nlevels=8)
  87. # BRISK检测器
  88. detectors['brisk'] = cv2.BRISK_create()
  89. except Exception as e:
  90. error_info = f"Warning: Some detectors could not be initialized: {str(e)}"
  91. if data_center_algo_inner_signals_obj is not None:
  92. print("警告", error_info)
  93. if not detectors:
  94. raise ValueError("No feature detectors could be initialized")
  95. return detectors
  96. def _get_detector_and_matcher(self, detector_name):
  97. """获取特征检测器和对应的特征匹配器"""
  98. if detector_name not in self.detectors:
  99. raise ValueError(f"Unsupported detector: {detector_name}")
  100. detector = self.detectors[detector_name]
  101. # 根据检测器类型选择合适的特征匹配器
  102. if detector_name in ['sift', 'surf']:
  103. # L2范数更适合SIFT和SURF
  104. matcher = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
  105. else:
  106. # 汉明距离更适合二进制描述符(AKAZE, ORB, BRISK)
  107. matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
  108. return detector, matcher
  109. def detect_and_match_features(self, img1: np.ndarray, img2: np.ndarray) -> FeatureMatchResult:
  110. """使用选定的特征检测器检测并匹配特征点"""
  111. try:
  112. if self.feature_detector == 'combine':
  113. return self._detect_and_match_combined(img1, img2)
  114. else:
  115. return self._detect_and_match_single(img1, img2, self.feature_detector)
  116. except Exception as e:
  117. if data_center_algo_inner_signals_obj is not None:
  118. print("警告",
  119. f"Feature detection and matching failed: {str(e)}")
  120. raise
  121. def _detect_and_match_single(self, img1: np.ndarray, img2: np.ndarray,
  122. detector_name: str) -> FeatureMatchResult:
  123. """使用单个检测器进行特征检测和匹配"""
  124. detector, matcher = self._get_detector_and_matcher(detector_name)
  125. # 检测特征点和计算描述符
  126. keypoints1, descriptors1 = detector.detectAndCompute(img1, None)
  127. keypoints2, descriptors2 = detector.detectAndCompute(img2, None)
  128. if self.debug:
  129. # 绘制特征点
  130. img1_kp = cv2.drawKeypoints(img1, keypoints1, None, (255, 0, 0))
  131. img2_kp = cv2.drawKeypoints(img2, keypoints2, None, (255, 0, 0))
  132. self.save_debug_image(img1_kp, f'keypoints_img1_{detector_name}')
  133. self.save_debug_image(img2_kp, f'keypoints_img2_{detector_name}')
  134. feature_match_result = self._match_features(keypoints1, keypoints2, descriptors1, descriptors2,
  135. matcher, detector_name, img1=img1, img2=img2)
  136. return feature_match_result
  137. def _detect_and_match_combined(self, img1: np.ndarray, img2: np.ndarray) -> FeatureMatchResult:
  138. """组合使用多个检测器进行特征检测和匹配"""
  139. all_results = []
  140. # 对每个检测器分别进行特征检测和匹配
  141. for detector_name, detector in self.detectors.items():
  142. try:
  143. # 使用当前检测器进行特征检测和匹配
  144. matcher = self._get_detector_and_matcher(detector_name)[1]
  145. kp1, desc1 = detector.detectAndCompute(img1, None)
  146. kp2, desc2 = detector.detectAndCompute(img2, None)
  147. if desc1 is not None and desc2 is not None:
  148. # 执行特征匹配
  149. matches = matcher.match(desc1, desc2)
  150. matches = sorted(matches, key=lambda x: x.distance)
  151. # 选择最佳匹配
  152. good_matches = matches[:min(50, len(matches))]
  153. if len(good_matches) >= self.min_matches:
  154. if self.debug:
  155. # 绘制当前检测器的特征点和匹配结果
  156. img1_kp = cv2.drawKeypoints(img1, kp1, None, (255, 0, 0))
  157. img2_kp = cv2.drawKeypoints(img2, kp2, None, (255, 0, 0))
  158. self.save_debug_image(img1_kp, f'match_120_keypoints_img1_{detector_name}')
  159. self.save_debug_image(img2_kp, f'match_140_keypoints_img2_{detector_name}')
  160. match_img = cv2.drawMatches(img1, kp1, img2, kp2, good_matches, None,
  161. flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
  162. self.save_debug_image(match_img, f'match_160_feature_matches_{detector_name}')
  163. # 计算变换矩阵
  164. src_pts = np.float32([kp1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
  165. dst_pts = np.float32([kp2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
  166. transform_matrix, mask = cv2.estimateAffinePartial2D(src_pts, dst_pts)
  167. # 计算匹配得分
  168. match_score = np.sum(mask) / len(mask)
  169. # 添加到结果列表
  170. all_results.append({
  171. 'keypoints1': kp1,
  172. 'keypoints2': kp2,
  173. 'matches': good_matches,
  174. 'transform_matrix': transform_matrix,
  175. 'match_score': match_score,
  176. 'detector_name': detector_name
  177. })
  178. if self.debug:
  179. with open(os.path.join(self.debug_dir, f'match_220_match_info_{detector_name}.txt'),
  180. 'w') as f:
  181. f.write(f"Number of keypoints in img1: {len(kp1)}\n")
  182. f.write(f"Number of keypoints in img2: {len(kp2)}\n")
  183. f.write(f"Number of matches: {len(matches)}\n")
  184. f.write(f"Number of good matches: {len(good_matches)}\n")
  185. f.write(f"Match score: {match_score}\n")
  186. if transform_matrix is not None:
  187. f.write(f"Transform matrix:\n{transform_matrix}\n")
  188. except Exception as e:
  189. if data_center_algo_inner_signals_obj is not None:
  190. print("警告",
  191. f"Warning: Detection failed for {detector_name}: {str(e)}")
  192. continue
  193. if not all_results:
  194. raise ValueError("No successful feature detection and matching results")
  195. # 选择得分最高的结果
  196. best_result = max(all_results, key=lambda x: x['match_score'])
  197. # 计算最佳结果的偏移量
  198. offset_x = int(round(best_result['transform_matrix'][0, 2]))
  199. offset_y = int(round(best_result['transform_matrix'][1, 2]))
  200. # 更新类的属性
  201. self.match_score = best_result['match_score']
  202. self.transform_matrix = best_result['transform_matrix']
  203. if self.debug:
  204. with open(os.path.join(self.debug_dir, 'match_240_best_detector_info.txt'), 'w') as f:
  205. f.write(f"Best detector: {best_result['detector_name']}\n")
  206. f.write(f"Best match score: {best_result['match_score']}\n")
  207. return FeatureMatchResult(
  208. keypoints1=best_result['keypoints1'],
  209. keypoints2=best_result['keypoints2'],
  210. matches=best_result['matches'],
  211. transform_matrix=best_result['transform_matrix'],
  212. match_score=best_result['match_score'],
  213. offset_x=offset_x,
  214. offset_y=offset_y
  215. )
  216. def find_homography(self, kp1, kp2, good_matches):
  217. """计算单应性矩阵"""
  218. if len(good_matches) < 4:
  219. return None, None
  220. src_pts = np.float32([kp1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
  221. dst_pts = np.float32([kp2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
  222. H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
  223. if self.debug:
  224. # 保存匹配信息
  225. with open(os.path.join(self.debug_dir, 'match_320_homography_info.txt'), 'w') as f:
  226. f.write(f"Homography matrix:\n{H}\n")
  227. f.write(f"Number of inliers: {np.sum(mask)}\n")
  228. return H, mask
  229. def _match_features(self, keypoints1, keypoints2, descriptors1, descriptors2,
  230. matcher, detector_name, img1=None, img2=None, is_overlap=True) -> FeatureMatchResult:
  231. """特征点匹配的通用处理"""
  232. if descriptors1 is None or descriptors2 is None:
  233. raise ValueError(f"No descriptors found using {detector_name}")
  234. # 1. 执行特征匹配
  235. matches = matcher.match(descriptors1, descriptors2)
  236. # 2. 计算距离统计
  237. distances = np.array([m.distance for m in matches])
  238. mean_dist = np.mean(distances)
  239. std_dist = np.std(distances)
  240. # 3. 筛选好的匹配
  241. threshold = mean_dist - 0.7 * std_dist
  242. good_matches = [m for m in matches if m.distance < threshold]
  243. if len(good_matches) < self.min_matches:
  244. good_matches = matches[:min(50, len(matches))]
  245. if self.debug and img1 is not None and img2 is not None:
  246. # 在重叠区域上显示匹配
  247. overlap_match_img = cv2.drawMatches(img1, keypoints1, img2, keypoints2,
  248. good_matches, None,
  249. flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
  250. self.save_debug_image(overlap_match_img, f'match_420_overlap_matches_{detector_name}')
  251. # 计算变换矩阵
  252. src_pts = np.float32([keypoints1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
  253. dst_pts = np.float32([keypoints2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
  254. # RANSAC
  255. transform_matrix, mask = cv2.estimateAffinePartial2D(
  256. src_pts, dst_pts,
  257. method=cv2.RANSAC,
  258. ransacReprojThreshold=3.0
  259. )
  260. # 筛选内点
  261. inliers = mask.ravel() == 1
  262. good_matches = [good_matches[i] for i in range(len(good_matches)) if inliers[i]]
  263. # 使用内点重新计算变换矩阵
  264. src_pts = np.float32([keypoints1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
  265. dst_pts = np.float32([keypoints2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
  266. transform_matrix, mask = cv2.estimateAffinePartial2D(src_pts, dst_pts)
  267. # 计算得分和偏移
  268. match_score = np.sum(mask) / len(mask)
  269. offset_x = int(round(transform_matrix[0, 2]))
  270. offset_y = int(round(transform_matrix[1, 2]))
  271. # 这两个之间有16和11的位移非常好解释
  272. if self.debug:
  273. with open(os.path.join(self.debug_dir, f'match_520_match_info_{detector_name}.txt'), 'w') as f:
  274. f.write(f"Transform matrix:\n{transform_matrix}\n")
  275. f.write(f"Offsets: ({offset_x}, {offset_y})\n")
  276. f.write(f"Match score: {match_score}\n")
  277. f.write(f"Number of initial matches: {len(matches)}\n")
  278. f.write(f"Number of good matches: {len(good_matches)}\n")
  279. return FeatureMatchResult(
  280. keypoints1=keypoints1,
  281. keypoints2=keypoints2,
  282. matches=good_matches,
  283. transform_matrix=transform_matrix,
  284. match_score=match_score,
  285. offset_x=offset_x,
  286. offset_y=offset_y
  287. )
  288. def save_debug_image(self, img, name, normalize=False):
  289. """保存调试图片"""
  290. try:
  291. if self.debug:
  292. save_path = os.path.join(self.debug_dir, f"{name}.jpg")
  293. if normalize:
  294. img_normalized = cv2.normalize(img, None, 0, 255, cv2.NORM_MINMAX)
  295. cv2.imwrite(save_path, img_normalized)
  296. else:
  297. cv2.imwrite(save_path, img)
  298. if data_center_algo_inner_signals_obj is not None:
  299. print("信息",
  300. f"Debug: Saved {save_path}")
  301. return True, f"save_debug_image 成功: {save_path}"
  302. else:
  303. return False, "debug mode is not enabled"
  304. except Exception as e:
  305. msg = f"save_debug_image出现bug: {str(e)}"
  306. if data_center_algo_inner_signals_obj is not None:
  307. print("警告",
  308. msg)
  309. return False, msg
  310. def pad_image(self, img: np.ndarray, target_width: int = None, target_height: int = None) -> np.ndarray:
  311. """将图片填充到目标尺寸"""
  312. if self.debug:
  313. self.save_debug_image(img, 'pad_120_before_padding')
  314. current_height, current_width = img.shape[:2]
  315. target_width = target_width if target_width is not None else current_width
  316. target_height = target_height if target_height is not None else current_height
  317. if current_width == target_width and current_height == target_height:
  318. return img
  319. # 创建黑色背景
  320. padded_img = np.zeros((target_height, target_width, 3), dtype=np.uint8)
  321. # 将原图放在中心位置
  322. y_offset = (target_height - current_height) // 2
  323. x_offset = (target_width - current_width) // 2
  324. padded_img[y_offset:y_offset + current_height,
  325. x_offset:x_offset + current_width] = img
  326. if self.debug:
  327. self.save_debug_image(padded_img, 'pad_320_after_padding')
  328. return padded_img
  329. def split_image(self, img, is_left_top=True):
  330. """分割图片为重叠区域和非重叠区域"""
  331. height, width = img.shape[:2]
  332. overlap_width = min(self.estimate_overlap_pixels, width // 2)
  333. non_overlap_width = width - overlap_width
  334. if is_left_top:
  335. non_overlap_region = img[:, :non_overlap_width]
  336. overlap_region = img[:, non_overlap_width:]
  337. if self.debug:
  338. self.save_debug_image(non_overlap_region, 'sp_120_left_top_non_overlap')
  339. self.save_debug_image(overlap_region, 'sp_140_left_top_overlap')
  340. else:
  341. overlap_region = img[:, :overlap_width]
  342. non_overlap_region = img[:, overlap_width:]
  343. if self.debug:
  344. self.save_debug_image(overlap_region, 'sp_220_right_bottom_overlap')
  345. self.save_debug_image(non_overlap_region, 'sp_240_right_bottom_non_overlap')
  346. return overlap_region, non_overlap_region
  347. def stitch_horizontal(self, left_img: np.ndarray, right_img: np.ndarray) -> np.ndarray:
  348. """水平拼接两张图片"""
  349. height, width = left_img.shape[:2]
  350. left_height, left_width = left_img.shape[:2]
  351. overlap_width = min(self.estimate_overlap_pixels, width // 2)
  352. non_overlap_width = width - overlap_width
  353. # 1. 分割重叠区域
  354. left_overlap, left_non_overlap = self.split_image(left_img, is_left_top=True)
  355. right_overlap, right_non_overlap = self.split_image(right_img, is_left_top=False)
  356. if self.debug:
  357. # 保存重叠区域的图像,用于调试
  358. self.save_debug_image(left_overlap, 'h_120_left_overlap_region')
  359. self.save_debug_image(right_overlap, 'h_140_right_overlap_region')
  360. # 2. 特征检测和匹配(只在重叠区域进行)
  361. match_result = self.detect_and_match_features(left_overlap, right_overlap)
  362. offset_x = match_result.offset_x # -16
  363. offset_y = match_result.offset_y # -11
  364. match_score = match_result.match_score
  365. # 匹配点就是右图最左上点的那个点
  366. # 右图0,0对应的是左图的 16,11
  367. # 所以重叠区域为
  368. self.real_overlap_width = overlap_width + offset_x # 445,用模板匹配算的值也是445
  369. real_overlap_width = self.real_overlap_width
  370. # 计算右图相对于左图的y方向的偏移量
  371. y_offset_right2left = offset_y * (-1)
  372. # # 计算最终图像尺寸
  373. stitch_img_width = left_img.shape[1] + right_img.shape[1] - self.real_overlap_width
  374. stitch_img_height = max(left_img.shape[0], right_img.shape[0])
  375. if self.debug:
  376. with open(os.path.join(self.debug_dir, 'h_320_alignment_info.txt'), 'w') as f:
  377. f.write(f"match_result.offset_x: {match_result.offset_x}\n")
  378. f.write(f"match_result.offset_y: {match_result.offset_y}\n")
  379. f.write(f"match_score: {match_score}\n")
  380. f.write(f"real_overlap_width: {self.real_overlap_width}\n")
  381. f.write(f"y_offset_right2left: {y_offset_right2left}\n")
  382. f.write(f"stitch_img_width: {stitch_img_width}\n")
  383. f.write(f"stitch_img_height: {stitch_img_height}\n")
  384. if self.blend_type == 'half_importance':
  385. blend_stitch_img = self.blend_half_importance(left_img, right_img, stitch_img_width, stitch_img_height,
  386. y_offset_right2left, real_overlap_width)
  387. elif self.blend_type == 'right_first':
  388. # 右边优先的拼接方式
  389. blend_stitch_img = self.blend_right_first(left_img, right_img, stitch_img_width, stitch_img_height,
  390. y_offset_right2left)
  391. elif self.blend_type == 'left_first':
  392. blend_stitch_img = self.blend_left_first(left_img, right_img,
  393. stitch_img_width,
  394. stitch_img_height,
  395. y_offset_right2left,
  396. real_overlap_width)
  397. elif self.blend_type == 'half_importance_add_weight':
  398. blend_stitch_img = self.blend_half_importance_add_weight(left_img, right_img,
  399. stitch_img_width,
  400. stitch_img_height,
  401. y_offset_right2left,
  402. real_overlap_width,
  403. blend_ratio=self.blend_ratio)
  404. else:
  405. # 左边优先的拼接方式
  406. blend_stitch_img = None
  407. if self.debug:
  408. self.save_debug_image(blend_stitch_img, 'h_520_horizontal_stitch_img')
  409. return blend_stitch_img
  410. def stitch_vertical(self, top_img: np.ndarray, bottom_img: np.ndarray) -> np.ndarray:
  411. """垂直拼接两张图片"""
  412. try:
  413. # 将图片旋转后调用水平拼接
  414. top_rotated = cv2.rotate(top_img, cv2.ROTATE_90_COUNTERCLOCKWISE)
  415. bottom_rotated = cv2.rotate(bottom_img, cv2.ROTATE_90_COUNTERCLOCKWISE)
  416. if self.debug:
  417. self.save_debug_image(top_rotated, 'v_120_top_rotated')
  418. self.save_debug_image(bottom_rotated, 'v_140_bottom_rotated')
  419. result_rotated = self.stitch_horizontal(top_rotated, bottom_rotated)
  420. # 将结果旋转回来
  421. result = cv2.rotate(result_rotated, cv2.ROTATE_90_CLOCKWISE)
  422. if self.debug:
  423. self.save_debug_image(result, 'v_520_final_result_vertical')
  424. return result
  425. except Exception as e:
  426. error_info = f"Vertical stitching failed: {str(e)}"
  427. if data_center_algo_inner_signals_obj is not None:
  428. print("警告", error_info)
  429. raise
  430. def stitch_main(self, img1: np.ndarray, img2: np.ndarray) -> Tuple[np.ndarray, float]:
  431. """主拼接方法"""
  432. try:
  433. # 根据拼接类型选择不同的拼接方式
  434. if self.stitch_type == 'horizontal':
  435. # 确保两张图片高度相同
  436. max_height = max(img1.shape[0], img2.shape[0])
  437. img1 = self.pad_image(img1, target_height=max_height)
  438. img2 = self.pad_image(img2, target_height=max_height)
  439. result = self.stitch_horizontal(img1, img2)
  440. else: # vertical
  441. # 确保两张图片宽度相同
  442. max_width = max(img1.shape[1], img2.shape[1])
  443. img1 = self.pad_image(img1, target_width=max_width)
  444. img2 = self.pad_image(img2, target_width=max_width)
  445. result = self.stitch_vertical(img1, img2)
  446. self.best_score = self.match_score
  447. return result
  448. except Exception as e:
  449. error_info = f"Image stitching failed: {str(e)}"
  450. if data_center_algo_inner_signals_obj is not None:
  451. print("警告", error_info)
  452. raise
  453. # 测试代码
  454. if __name__ == '__main__':
  455. pass
  456. # 设置调试目录和重叠区域估计
  457. timestamp = time.strftime("%Y%m%d_%H%M%S")
  458. root_path = r"\_250115_Stitch_Image_TemplateMatch\test_images"
  459. root_path_obj = Path(root_path).absolute()
  460. stitch_type = "horizontal"
  461. debug_dir_str = str(root_path_obj / f'debug_{timestamp}_{stitch_type}')
  462. debug_dir_obj = Path(debug_dir_str).absolute()
  463. estimate_overlap_ratio = 0.45
  464. estimate_overlap_pixels = int(round(1024 * estimate_overlap_ratio))
  465. # 创建特征点拼接器实例
  466. stitcher = ImageStitcherKeyPoint(
  467. estimate_overlap_pixels=estimate_overlap_pixels,
  468. center_ratio=0.8,
  469. # stitch_type="horizontal",
  470. stitch_type="vertical",
  471. blend_type='half_importance_add_weight',
  472. debug=True,
  473. debug_dir=debug_dir_str,
  474. feature_detector='combine', # 可选: 'akaze', 'sift', 'orb', 'brisk', 'combine'
  475. blend_ratio=0.5,
  476. combine_detectors=False
  477. )
  478. # 读取测试图片
  479. img_left_name = "20250123_162407_0001.jpg"
  480. img_right_name = "20250123_162409_0002.jpg"
  481. img_bottom_name = "20250123_162422_0007.jpg"
  482. img_left_path = str(root_path_obj / img_left_name)
  483. img_right_path = str(root_path_obj / img_right_name)
  484. img_bottom_path = str(root_path_obj / img_bottom_name)
  485. img_left = cv2.imread(img_left_path)
  486. img_right = cv2.imread(img_right_path)
  487. img_bottom = cv2.imread(img_bottom_path)
  488. if img_left is None or img_right is None:
  489. print("Error: Could not read one or both images")
  490. sys.exit(1)
  491. # 记录开始时间
  492. start_time = time.time()
  493. try:
  494. # 执行拼接
  495. result_img = stitcher.stitch_main(img_left, img_bottom)
  496. # 保存结果
  497. save_final_image_path = str(debug_dir_obj / 'result_img.jpg')
  498. cv2.imwrite(save_final_image_path, result_img)
  499. # 计算并打印处理时间
  500. end_time = time.time()
  501. print(f"拼接完成!")
  502. print(f"处理时间: {end_time - start_time:.2f} 秒")
  503. print(f"匹配得分: {stitcher.best_score:.4f}")
  504. print(f"结果已保存为: {save_final_image_path}")
  505. except Exception as e:
  506. print(f"拼接过程中出错: {str(e)}")
  507. sys.exit(1)