AnlaAnla vor 9 Monaten
Commit
964887be5b

+ 137 - 0
Test/API测试.py

@@ -0,0 +1,137 @@
+import requests
+import os
+import io
+import zipfile
+from PIL import Image, ImageDraw
+
+# --- 配置 ---
+# 请确保你的 FastAPI 服务器正在运行,并修改此处的地址和端口
+BASE_URL = "http://127.0.0.1:7745/api"
+SINGLE_STITCH_URL = f"{BASE_URL}/stitch/"
+BATCH_STITCH_URL = f"{BASE_URL}/stitch/batch/"
+
+
+# --- 测试函数 1:单个拼图接口 ---
+
+def single_puzzle_api(zipfile_path):
+    """
+    测试 /stitch/ 接口 (单个拼图)
+    使用 模板匹配法 (template_match)
+    """
+    print("--- 1. 开始测试: 单个拼图接口 (/stitch) ---")
+
+    # 1. 准备请求数据
+    form_data = {
+        'method': 'template_match',
+        'num_cols': 4,
+        'num_rows': 6,
+        'overlap_h': 405,
+        'overlap_v': 440,
+        'tm_blend_type': 'half_importance_add_weight',
+        'tm_light_compensation': True,
+    }
+
+    try:
+        # 打开文件
+        with open(zipfile_path, "rb") as f:
+            files = {
+                'zip_file': (os.path.basename(zipfile_path), f, 'application/zip')
+            }
+            # 4. 发送 POST 请求
+            print("向服务器发送请求...")
+            response = requests.post(SINGLE_STITCH_URL, data=form_data, files=files)  # 设置较长超时
+
+            # 5. 处理响应
+            print(f"服务器响应状态码: {response.status_code}")
+
+        if response.status_code == 200:
+            # 检查响应头,确认是图片
+            content_type = response.headers.get('content-type')
+            print(f"响应内容类型: {content_type}")
+
+            if 'image/jpeg' in content_type:
+                # 将返回的图片内容保存到本地文件
+                output_filename = "stitched_single_result.jpg"
+                with open(output_filename, "wb") as f:
+                    f.write(response.content)
+                print(f"✅ 成功! 拼接后的大图已保存为: {output_filename}")
+            else:
+                print(f"❌ 失败! 期望得到 'image/jpeg',但收到了 '{content_type}'")
+        else:
+            # 如果接口返回错误,打印错误详情
+            print(f"❌ 请求失败! 错误信息: {response.text}")
+
+    except requests.exceptions.RequestException as e:
+        print(f"❌ 请求异常! 无法连接到服务器: {e}")
+
+    print("-" * 40 + "\n")
+
+
+# --- 测试函数 2:批量拼图接口 ---
+
+def batch_puzzle_api(zipfile_path):
+    """
+    测试 /stitch/batch 接口 (批量拼图)
+    使用 点匹配法 (key_point)
+    """
+    print("--- 2. 开始测试: 批量拼图接口 (/stitch/batch) ---")
+
+    # 1. 准备请求数据 (这次我们测试 key_point 方法)
+    form_data = {
+        'method': 'template_match',
+        'num_cols': 4,
+        'num_rows': 6,
+        'overlap_h': 405,
+        'overlap_v': 440,
+        'tm_blend_type': 'half_importance_add_weight',
+        'tm_light_compensation': True,
+    }
+
+    try:
+        # 打开文件
+        with open(zipfile_path, "rb") as f:
+            files = {
+                'zip_file': (os.path.basename(zipfile_path), f, 'application/zip')
+            }
+            # 4. 发送 POST 请求
+            print("向服务器发送请求...")
+            response = requests.post(BATCH_STITCH_URL, data=form_data, files=files)  # 批量处理可能更耗时
+
+        # 5. 处理响应
+        print(f"服务器响应状态码: {response.status_code}")
+
+        if response.status_code == 200:
+            content_type = response.headers.get('content-type')
+            print(f"响应内容类型: {content_type}")
+
+            if 'application/zip' in content_type:
+                # 将返回的ZIP文件保存到本地
+                output_filename = "stitched_batch_result.zip"
+                with open(output_filename, "wb") as f:
+                    f.write(response.content)
+                print(f"✅ 成功! 包含拼接结果的ZIP包已保存为: {output_filename}")
+
+                # (可选) 解压并检查结果
+                try:
+                    extract_dir = "batch_results_unzipped"
+                    os.makedirs(extract_dir, exist_ok=True)
+                    with zipfile.ZipFile(output_filename, 'r') as zf:
+                        zf.extractall(extract_dir)
+                    print(f"  - 结果已自动解压到 '{extract_dir}' 文件夹,包含文件: {os.listdir(extract_dir)}")
+                except Exception as e:
+                    print(f"  - 解压返回的ZIP文件时出错: {e}")
+            else:
+                print(f"❌ 失败! 期望得到 'application/zip',但收到了 '{content_type}'")
+        else:
+            print(f"❌ 请求失败! 错误信息: {response.text}")
+
+    except requests.exceptions.RequestException as e:
+        print(f"❌ 请求异常! 无法连接到服务器: {e}")
+
+    print("-" * 40 + "\n")
+
+
+if __name__ == "__main__":
+    # 依次运行两个测试函数
+    single_puzzle_api(r"C:\Code\ML\Project\StitchImageServer\temp\_250801_1043_0001.zip")
+    batch_puzzle_api(r"C:\Code\ML\Project\StitchImageServer\temp\Input.zip")

+ 78 - 0
Test/batch_resize.py

@@ -0,0 +1,78 @@
+import cv2
+import glob
+import os
+import time
+from pathlib import Path
+from concurrent.futures import ProcessPoolExecutor, as_completed
+from tqdm import tqdm
+
+
+def resize_image(img_path: str, target_size: tuple):
+    try:
+        # 读取图片
+        img = cv2.imread(img_path)
+
+        # 检查图片是否成功读取
+        if img is None:
+            print(f"警告: 无法读取图片 {img_path},已跳过。")
+            return None
+
+        # 缩放图片
+        resized_img = cv2.resize(img, target_size)
+
+        # 保存缩放后的图片
+        cv2.imwrite(str(img_path), resized_img)
+
+        return str(img_path)
+
+    except Exception as e:
+        # 捕获其他潜在错误
+        print(f"处理图片 {img_path} 时发生错误: {e}")
+        return None
+
+
+def main():
+    INPUT_PATTERN = r'C:\Code\ML\Project\StitchImageServer\temp\Input\*\*.jpg'
+
+    # 目标尺寸 (宽度, 高度)
+    TARGET_SIZE = (1024, 1024)
+
+    # 使用的进程数,None表示自动使用所有可用的CPU核心
+    # 你也可以手动设置为一个整数,例如 4
+    MAX_WORKERS = 4
+
+    # 获取所有图片路径
+    img_paths = glob.glob(INPUT_PATTERN)
+
+    if not img_paths:
+        print(f"在模式 '{INPUT_PATTERN}' 下未找到任何图片。")
+        return
+
+    print(f"找到 {len(img_paths)} 张图片待处理。")
+
+    # --- 3. 使用进程池并行处理 ---
+    start_time = time.time()
+    processed_count = 0
+
+    # 创建一个进程池
+    with ProcessPoolExecutor(max_workers=MAX_WORKERS) as executor:
+        futures = {executor.submit(resize_image, path, TARGET_SIZE): path for path in img_paths}
+
+        # 使用 tqdm 创建进度条,并处理已完成的任务
+        for future in tqdm(as_completed(futures), total=len(img_paths), desc="批量缩放图片"):
+            result = future.result()
+            if result:
+                # 如果任务成功返回了输出路径,则计数加一
+                processed_count += 1
+
+    end_time = time.time()
+
+    # --- 4. 打印总结 ---
+    print("\n--- 任务完成 ---")
+    print(f"成功处理了 {processed_count} / {len(img_paths)} 张图片。")
+    print(f"总耗时: {end_time - start_time:.2f} 秒。")
+
+
+# 这个保护措施对于多进程编程至关重要
+if __name__ == '__main__':
+    main()

+ 160 - 0
Test/key_point_test.py

@@ -0,0 +1,160 @@
+import cv2
+import os
+import time
+from pathlib import Path
+import re
+from tqdm import tqdm
+
+# 导入您提供的拼接器类
+from fry_project_classes.stitch_img_key_point import ImageStitcherKeyPoint
+
+
+def natural_sort_key(s):
+    """
+    提供自然排序的键,例如 '2.jpg' 会排在 '10.jpg' 之前。
+    """
+    return [int(text) if text.isdigit() else text.lower() for text in re.split(r'(\d+)', str(s))]
+
+
+def stitch_img(IMAGE_DIR, OUTPUT_DIR, NUM_COLS: int, NUM_ROWS: int,
+               ESTIMATE_OVERLAP_HORIZONTAL_PIXELS: int, ESTIMATE_OVERLAP_VERTICAL_PIXELS: int,
+               BLEND_TYPE: str, FeatureDetector: str,
+               DEBUG_MODE: bool):
+    OUTPUT_DIR.mkdir(exist_ok=True)  # 创建输出文件夹
+
+    # --- 2. 加载并排序图片 ---
+
+    print("--- 图像拼接开始 ---")
+    print(f"配置: {NUM_ROWS}行 x {NUM_COLS}列")
+    print(f"图片目录: {IMAGE_DIR}")
+    print(f"输出目录: {OUTPUT_DIR}")
+    print(f"水平重叠预估: {ESTIMATE_OVERLAP_HORIZONTAL_PIXELS}px, 垂直重叠预估: {ESTIMATE_OVERLAP_VERTICAL_PIXELS}px")
+    print(f"融合模式: {BLEND_TYPE}, 特征检测器类型: {FeatureDetector}")
+
+    # --- 2. 加载并排序图片 ---
+    image_paths = sorted(list(IMAGE_DIR.glob("*.jpg")), key=natural_sort_key)
+
+    if len(image_paths) != NUM_COLS * NUM_ROWS:
+        print(f"错误: 找到 {len(image_paths)} 张图片, 但预期需要 {NUM_COLS * NUM_ROWS} 张。")
+        return
+
+    # --- 3. 阶段一:水平拼接每一行 ---
+    stitched_rows = []
+    print("\n--- 阶段一: 水平拼接每一行 ---")
+
+    for i in tqdm(range(NUM_ROWS), desc="处理行"):
+        row_start_index = i * NUM_COLS
+        row_image_paths = image_paths[row_start_index: row_start_index + NUM_COLS]
+
+        # 加载行的第一张图片
+        current_row_image = cv2.imread(str(row_image_paths[0]))
+        if current_row_image is None:
+            print(f"错误: 无法读取图片 {row_image_paths[0]}")
+            continue
+
+        # 依次将该行的后续图片拼接到右侧
+        for j in range(1, NUM_COLS):
+            # 为每次拼接实例化一个新的Stitcher对象,以隔离调试文件夹
+            stitcher_h = ImageStitcherKeyPoint(
+                estimate_overlap_pixels=ESTIMATE_OVERLAP_HORIZONTAL_PIXELS,
+                stitch_type="horizontal",
+                blend_type=BLEND_TYPE,
+                feature_detector=FeatureDetector,
+                blend_ratio=0.5,
+                debug=DEBUG_MODE,
+                debug_dir=str(OUTPUT_DIR / f'debug_h_row{i + 1}_col{j}vs{j + 1}')
+            )
+
+            next_image = cv2.imread(str(row_image_paths[j]))
+            if next_image is None:
+                print(f"错误: 无法读取图片 {row_image_paths[j]}")
+                break
+
+            current_row_image = stitcher_h.stitch_main(current_row_image, next_image)
+
+        # 保存拼接好的行
+        row_output_path = OUTPUT_DIR / f"stitched_row_{i + 1}.jpg"
+        cv2.imwrite(str(row_output_path), current_row_image)
+        stitched_rows.append(current_row_image)
+        tqdm.write(f"第 {i + 1} 行拼接完成, 已保存至 {row_output_path}")
+
+    # --- 4. 阶段二:垂直拼接所有行 ---
+    print("\n--- 阶段二: 垂直拼接所有行 ---")
+    if not stitched_rows:
+        print("错误: 没有成功拼接的行,无法进行垂直拼接。")
+        return
+
+    final_image = stitched_rows[0]
+
+    for i in tqdm(range(1, NUM_ROWS), desc="拼接行"):
+        # 实例化垂直拼接器
+        stitcher_v = ImageStitcherKeyPoint(
+            estimate_overlap_pixels=ESTIMATE_OVERLAP_VERTICAL_PIXELS,
+            stitch_type="vertical",
+            blend_type=BLEND_TYPE,
+            feature_detector=FeatureDetector,
+            blend_ratio=0.5,
+            debug=DEBUG_MODE,
+            debug_dir=str(OUTPUT_DIR / f'debug_v_row{i}vs{i + 1}')
+        )
+
+        next_row_image = stitched_rows[i]
+        final_image = stitcher_v.stitch_main(final_image, next_row_image)
+
+    # --- 5. 保存最终结果 ---
+    final_output_path = OUTPUT_DIR / "final_stitched_image.jpg"
+    cv2.imwrite(str(final_output_path), final_image)
+
+    print("\n--- 所有拼接任务完成!---")
+    print(f"最终的全景图已保存至: {final_output_path}")
+
+
+def main():
+    """
+    主执行函数
+    """
+    # --- 1. 配置参数 ---
+
+    # 图片和输出目录设置
+    IMAGE_DIR = Path(r"C:\Code\ML\Project\StitchImageServer\temp\Input\_250801_1141_0029")
+    # OUTPUT_DIR = Path(r"C:\Code\ML\Project\StitchImageServer\temp\output")
+
+    # 拼图网格设置
+    NUM_COLS = 4
+    NUM_ROWS = 6
+
+    # !!!关键拼接参数,您可能需要根据实际图片进行调整!!!
+    # 预估水平方向重叠的像素数。如果您的图片宽1920像素,重叠25%,则该值为 1920 * 0.25 ≈ 480
+    ESTIMATE_OVERLAP_HORIZONTAL_PIXELS = 500 * 4
+
+    # 预估垂直方向重叠的像素数。如果您的图片高1080像素,重叠25%,则该值为 1080 * 0.25 ≈ 270
+    ESTIMATE_OVERLAP_VERTICAL_PIXELS = 500 * 4
+
+
+    blend_type_list = ["half_importance", "right_first", "left_first", "half_importance_add_weight"]
+    # BLEND_TYPE = 'blend_half_importance_partial_HSV'
+
+
+    # 是否开启调试模式(会生成大量中间过程图片,用于分析问题)
+    DEBUG_MODE = False
+
+    for i, BLEND_TYPE in enumerate(blend_type_list):
+        base_dir_path = r"C:\Code\ML\Project\StitchImageServer\temp\output"
+        img_dir_name = f"{i}_{BLEND_TYPE}"
+        OUTPUT_DIR = Path(os.path.join(base_dir_path, img_dir_name))
+
+        one_img_time = time.time()
+        stitch_img(IMAGE_DIR=IMAGE_DIR, OUTPUT_DIR=OUTPUT_DIR, NUM_COLS=NUM_COLS, NUM_ROWS=NUM_ROWS,
+                   ESTIMATE_OVERLAP_HORIZONTAL_PIXELS=ESTIMATE_OVERLAP_HORIZONTAL_PIXELS,
+                   ESTIMATE_OVERLAP_VERTICAL_PIXELS=ESTIMATE_OVERLAP_VERTICAL_PIXELS,
+                   BLEND_TYPE=BLEND_TYPE, FeatureDetector="combine",
+                   DEBUG_MODE=DEBUG_MODE)
+        print(f"{BLEND_TYPE}: {time.time() - one_img_time}")
+
+
+
+if __name__ == '__main__':
+    start_time = time.time()
+    main()
+    end_time = time.time()
+    print(f"\n总耗时: {end_time - start_time:.2f} 秒")

+ 196 - 0
Test/key_point_多线程_test.py

@@ -0,0 +1,196 @@
+import cv2
+import os
+import time
+from pathlib import Path
+import re
+from tqdm import tqdm
+import concurrent.futures  # 导入并发库
+
+# 导入您提供的拼接器类
+from fry_project_classes.stitch_img_key_point import ImageStitcherKeyPoint
+
+
+def natural_sort_key(s):
+    """
+    提供自然排序的键,例如 '2.jpg' 会排在 '10.jpg' 之前。
+    """
+    return [int(text) if text.isdigit() else text.lower() for text in re.split(r'(\d+)', str(s))]
+
+
+# --- 新增:用于并行处理的"任务单元"函数 ---
+def stitch_single_row_keypoint(row_index, row_image_paths, stitch_params):
+    """
+    负责使用基于关键点的方法拼接单一一行的图片。这个函数将在独立的进程中运行。
+
+    Args:
+        row_index (int): 当前行的索引(从0开始)。
+        row_image_paths (list): 这一行所有图片的路径列表。
+        stitch_params (dict): 包含所有拼接所需参数的字典。
+
+    Returns:
+        tuple: 包含行索引和拼接完成的图像 (row_index, stitched_row_image)。
+    """
+    # 从参数字典中解包
+    NUM_COLS = len(row_image_paths)
+    OUTPUT_DIR = stitch_params['OUTPUT_DIR']
+    ESTIMATE_OVERLAP_HORIZONTAL_PIXELS = stitch_params['ESTIMATE_OVERLAP_HORIZONTAL_PIXELS']
+    BLEND_TYPE = stitch_params['BLEND_TYPE']
+    FeatureDetector = stitch_params['FeatureDetector']
+    DEBUG_MODE = stitch_params['DEBUG_MODE']
+
+    # 加载行的第一张图片
+    current_row_image = cv2.imread(str(row_image_paths[0]))
+    if current_row_image is None:
+        print(f"错误: 无法读取图片 {row_image_paths[0]}")
+        return row_index, None
+
+    # 依次将该行的后续图片拼接到右侧
+    for j in range(1, NUM_COLS):
+        stitcher_h = ImageStitcherKeyPoint(
+            estimate_overlap_pixels=ESTIMATE_OVERLAP_HORIZONTAL_PIXELS,
+            stitch_type="horizontal",
+            blend_type=BLEND_TYPE,
+            feature_detector=FeatureDetector,
+            blend_ratio=0.5,
+            debug=DEBUG_MODE,
+            debug_dir=str(OUTPUT_DIR / f'debug_h_row{row_index + 1}_col{j}vs{j + 1}')
+        )
+
+        next_image = cv2.imread(str(row_image_paths[j]))
+        if next_image is None:
+            print(f"错误: 无法读取图片 {row_image_paths[j]}")
+            return row_index, current_row_image
+
+        current_row_image = stitcher_h.stitch_main(current_row_image, next_image)
+
+    # 返回拼接结果和行索引,以便主进程能按正确顺序排列
+    return row_index, current_row_image
+
+
+# --- 优化后的主拼接函数 ---
+def stitch_img(IMAGE_DIR, OUTPUT_DIR, NUM_COLS: int, NUM_ROWS: int,
+               ESTIMATE_OVERLAP_HORIZONTAL_PIXELS: int, ESTIMATE_OVERLAP_VERTICAL_PIXELS: int,
+               BLEND_TYPE: str, FeatureDetector: str,
+               DEBUG_MODE: bool):
+    OUTPUT_DIR.mkdir(exist_ok=True)
+
+    print("--- 图像拼接开始 ---")
+    print(f"配置: {NUM_ROWS}行 x {NUM_COLS}列")
+    print(f"图片目录: {IMAGE_DIR}")
+    print(f"输出目录: {OUTPUT_DIR}")
+    print(f"水平重叠预估: {ESTIMATE_OVERLAP_HORIZONTAL_PIXELS}px, 垂直重叠预估: {ESTIMATE_OVERLAP_VERTICAL_PIXELS}px")
+    print(f"融合模式: {BLEND_TYPE}, 特征检测器类型: {FeatureDetector}")
+
+    image_paths = sorted(list(IMAGE_DIR.glob("*.jpg")), key=natural_sort_key)
+    if len(image_paths) != NUM_COLS * NUM_ROWS:
+        print(f"错误: 找到 {len(image_paths)} 张图片, 但预期需要 {NUM_COLS * NUM_ROWS} 张。")
+        return
+
+    # --- 3. 阶段一:并行水平拼接每一行 (核心优化点) ---
+    print("\n--- 阶段一: 并行水平拼接每一行 ---")
+
+    # 将所有固定参数打包成字典,方便传递给子进程
+    stitch_params = {
+        'OUTPUT_DIR': OUTPUT_DIR,
+        'ESTIMATE_OVERLAP_HORIZONTAL_PIXELS': ESTIMATE_OVERLAP_HORIZONTAL_PIXELS,
+        'BLEND_TYPE': BLEND_TYPE,
+        'FeatureDetector': FeatureDetector,
+        'DEBUG_MODE': DEBUG_MODE
+    }
+
+    stitched_rows = [None] * NUM_ROWS  # 预分配列表,用于按顺序存放结果
+
+    with concurrent.futures.ProcessPoolExecutor() as executor:
+        futures = []
+        for i in range(NUM_ROWS):
+            row_start_index = i * NUM_COLS
+            row_image_paths = image_paths[row_start_index: row_start_index + NUM_COLS]
+            future = executor.submit(stitch_single_row_keypoint, i, row_image_paths, stitch_params)
+            futures.append(future)
+
+        for future in tqdm(concurrent.futures.as_completed(futures), total=NUM_ROWS, desc="处理行"):
+            try:
+                row_index, result_image = future.result()
+                if result_image is not None:
+                    stitched_rows[row_index] = result_image
+                    row_output_path = OUTPUT_DIR / f"stitched_row_{row_index + 1}.jpg"
+                    cv2.imwrite(str(row_output_path), result_image)
+                    tqdm.write(f"第 {row_index + 1} 行拼接完成, 已保存至 {row_output_path}")
+                else:
+                    tqdm.write(f"第 {row_index + 1} 行拼接失败。")
+            except Exception as exc:
+                tqdm.write(f"一个行拼接任务生成了异常: {exc}")
+
+    if any(row is None for row in stitched_rows):
+        print("错误: 存在拼接失败的行,无法进行垂直拼接。")
+        return
+
+    # --- 4. 阶段二:垂直拼接所有行 (保持串行) ---
+    print("\n--- 阶段二: 垂直拼接所有行 ---")
+
+    final_image = stitched_rows[0]
+
+    for i in tqdm(range(1, NUM_ROWS), desc="拼接行"):
+        stitcher_v = ImageStitcherKeyPoint(
+            estimate_overlap_pixels=ESTIMATE_OVERLAP_VERTICAL_PIXELS,
+            stitch_type="vertical",
+            blend_type=BLEND_TYPE,
+            feature_detector=FeatureDetector,
+            blend_ratio=0.5,
+            debug=DEBUG_MODE,
+            debug_dir=str(OUTPUT_DIR / f'debug_v_row{i}vs{i + 1}')
+        )
+        next_row_image = stitched_rows[i]
+        final_image = stitcher_v.stitch_main(final_image, next_row_image)
+
+    # --- 5. 保存最终结果 ---
+    final_output_path = OUTPUT_DIR / "final_stitched_image.jpg"
+    cv2.imwrite(str(final_output_path), final_image)
+
+    print("\n--- 所有拼接任务完成!---")
+    print(f"最终的全景图已保存至: {final_output_path}")
+
+
+def main():
+    """
+    主执行函数
+    """
+    # --- 1. 配置参数 ---
+    IMAGE_DIR = Path(r"C:\Code\ML\Project\StitchImageServer\temp\Input\_250801_1146_0034")
+    NUM_COLS = 4
+    NUM_ROWS = 6
+    # 预估重叠像素
+    ESTIMATE_OVERLAP_HORIZONTAL_PIXELS = 405
+    ESTIMATE_OVERLAP_VERTICAL_PIXELS = 440
+
+    # 默认为 half_importance_add_weight 和 combine
+    blend_type_list = ['half_importance', 'right_first', "half_importance_add_weight"]
+    feature_list = ['sift', 'orb', 'brisk', 'combine']
+    DEBUG_MODE = False
+    for BLEND_TYPE in blend_type_list:
+        for i, feature_type in enumerate(feature_list):
+            base_dir_path = r"C:\Code\ML\Project\StitchImageServer\temp\key_output"  # 建议为keypoint方法用一个新目录
+            img_dir_name = f"{i}_{BLEND_TYPE}_{feature_type}"
+            OUTPUT_DIR = Path(os.path.join(base_dir_path, img_dir_name))
+
+            print("\n" + "=" * 80)
+            print(f"开始测试配置: {img_dir_name}")
+
+            one_img_time = time.time()
+            stitch_img(IMAGE_DIR=IMAGE_DIR, OUTPUT_DIR=OUTPUT_DIR, NUM_COLS=NUM_COLS, NUM_ROWS=NUM_ROWS,
+                       ESTIMATE_OVERLAP_HORIZONTAL_PIXELS=ESTIMATE_OVERLAP_HORIZONTAL_PIXELS,
+                       ESTIMATE_OVERLAP_VERTICAL_PIXELS=ESTIMATE_OVERLAP_VERTICAL_PIXELS,
+                       BLEND_TYPE=BLEND_TYPE, FeatureDetector=feature_type,
+                       DEBUG_MODE=DEBUG_MODE)
+
+            print(f"\n--- 单次配置完成 ---")
+            print(f"用时: {time.time() - one_img_time:.2f} 秒, 配置: {img_dir_name}")
+            print("=" * 80)
+
+
+if __name__ == '__main__':
+    start_time = time.time()
+    main()
+    end_time = time.time()
+
+    print(f"\n总耗时: {end_time - start_time:.2f} 秒")

+ 180 - 0
Test/template_match_test.py

@@ -0,0 +1,180 @@
+import cv2
+import os
+import time
+from pathlib import Path
+import re
+from tqdm import tqdm
+
+# 导入您提供的拼接器类
+from fry_project_classes.stitch_img_template_match import ImageStitcherTemplateMatch
+
+
+def natural_sort_key(s):
+    """
+    提供自然排序的键,例如 '2.jpg' 会排在 '10.jpg' 之前。
+    """
+    return [int(text) if text.isdigit() else text.lower() for text in re.split(r'(\d+)', str(s))]
+
+
+def stitch_img(IMAGE_DIR, OUTPUT_DIR, NUM_COLS: int, NUM_ROWS: int,
+               ESTIMATE_OVERLAP_HORIZONTAL_PIXELS: int, ESTIMATE_OVERLAP_VERTICAL_PIXELS: int,
+               BLEND_TYPE: str, LIGHT_COMPENSATION: bool,
+               DEBUG_MODE: bool):
+    OUTPUT_DIR.mkdir(exist_ok=True)  # 创建输出文件夹
+
+    # --- 2. 加载并排序图片 ---
+
+    print("--- 图像拼接开始 ---")
+    print(f"配置: {NUM_ROWS}行 x {NUM_COLS}列")
+    print(f"图片目录: {IMAGE_DIR}")
+    print(f"输出目录: {OUTPUT_DIR}")
+    print(f"水平重叠预估: {ESTIMATE_OVERLAP_HORIZONTAL_PIXELS}px, 垂直重叠预估: {ESTIMATE_OVERLAP_VERTICAL_PIXELS}px")
+    print(f"融合模式: {BLEND_TYPE}, 光照补偿: {'启用' if LIGHT_COMPENSATION else '禁用'}")
+
+    # --- 2. 加载并排序图片 ---
+    image_paths = sorted(list(IMAGE_DIR.glob("*.jpg")), key=natural_sort_key)
+
+    if len(image_paths) != NUM_COLS * NUM_ROWS:
+        print(f"错误: 找到 {len(image_paths)} 张图片, 但预期需要 {NUM_COLS * NUM_ROWS} 张。")
+        return
+
+    # --- 3. 阶段一:水平拼接每一行 ---
+    stitched_rows = []
+    print("\n--- 阶段一: 水平拼接每一行 ---")
+
+    for i in tqdm(range(NUM_ROWS), desc="处理行"):
+        row_start_index = i * NUM_COLS
+        row_image_paths = image_paths[row_start_index: row_start_index + NUM_COLS]
+
+        # 加载行的第一张图片
+        current_row_image = cv2.imread(str(row_image_paths[0]))
+        if current_row_image is None:
+            print(f"错误: 无法读取图片 {row_image_paths[0]}")
+            continue
+
+        # 依次将该行的后续图片拼接到右侧
+        for j in range(1, NUM_COLS):
+            # 为每次拼接实例化一个新的Stitcher对象,以隔离调试文件夹
+            stitcher_h = ImageStitcherTemplateMatch(
+                estimate_overlap_pixels=ESTIMATE_OVERLAP_HORIZONTAL_PIXELS,
+                stitch_type="horizontal",
+                blend_type=BLEND_TYPE,
+                light_uniformity_compensation_enabled=LIGHT_COMPENSATION,
+                light_uniformity_compensation_width=30,  # 光照补偿的计算宽度
+                debug=DEBUG_MODE,
+                debug_dir=str(OUTPUT_DIR / f'debug_h_row{i + 1}_col{j}vs{j + 1}')
+            )
+
+            next_image = cv2.imread(str(row_image_paths[j]))
+            if next_image is None:
+                print(f"错误: 无法读取图片 {row_image_paths[j]}")
+                break
+
+            current_row_image = stitcher_h.stitch_main(current_row_image, next_image)
+
+        # 保存拼接好的行
+        row_output_path = OUTPUT_DIR / f"stitched_row_{i + 1}.jpg"
+        cv2.imwrite(str(row_output_path), current_row_image)
+        stitched_rows.append(current_row_image)
+        tqdm.write(f"第 {i + 1} 行拼接完成, 已保存至 {row_output_path}")
+
+    # --- 4. 阶段二:垂直拼接所有行 ---
+    print("\n--- 阶段二: 垂直拼接所有行 ---")
+    if not stitched_rows:
+        print("错误: 没有成功拼接的行,无法进行垂直拼接。")
+        return
+
+    final_image = stitched_rows[0]
+
+    for i in tqdm(range(1, NUM_ROWS), desc="拼接行"):
+        # 实例化垂直拼接器
+        stitcher_v = ImageStitcherTemplateMatch(
+            estimate_overlap_pixels=ESTIMATE_OVERLAP_VERTICAL_PIXELS,
+            stitch_type="vertical",
+            blend_type=BLEND_TYPE,
+            light_uniformity_compensation_enabled=LIGHT_COMPENSATION,
+            light_uniformity_compensation_width=30,
+            debug=DEBUG_MODE,
+            debug_dir=str(OUTPUT_DIR / f'debug_v_row{i}vs{i + 1}')
+        )
+
+        next_row_image = stitched_rows[i]
+        final_image = stitcher_v.stitch_main(final_image, next_row_image)
+
+    # --- 5. 保存最终结果 ---
+    final_output_path = OUTPUT_DIR / "final_stitched_image.jpg"
+    cv2.imwrite(str(final_output_path), final_image)
+
+    print("\n--- 所有拼接任务完成!---")
+    print(f"最终的全景图已保存至: {final_output_path}")
+
+
+def main():
+    """
+    主执行函数
+    """
+    # --- 1. 配置参数 ---
+
+    # 图片和输出目录设置
+    IMAGE_DIR = Path(r"C:\Code\ML\Project\StitchImageServer\temp\input\_250801_1142_0030")
+    # OUTPUT_DIR = Path(r"C:\Code\ML\Project\StitchImageServer\temp\output")
+
+    # 拼图网格设置
+    NUM_COLS = 4
+    NUM_ROWS = 6
+
+    # !!!关键拼接参数,您可能需要根据实际图片进行调整!!!
+    # 预估水平方向重叠的像素数。如果您的图片宽1920像素,重叠25%,则该值为 1920 * 0.25 ≈ 480
+    # 预估垂直方向重叠的像素数。如果您的图片高1080像素,重叠25%,则该值为 1080 * 0.25 ≈ 270
+    estimate_overlap_ratio = 0.45
+    ESTIMATE_OVERLAP_HORIZONTAL_PIXELS = int(round(1024 * estimate_overlap_ratio))
+    ESTIMATE_OVERLAP_VERTICAL_PIXELS = int(round(1024 * estimate_overlap_ratio))
+
+    # 选择融合模式。'blend_half_importance_partial_HSV' 是效果最好但最慢的模式之一
+    '''
+    前五个都不行
+    half_importance,right_first,left_first 0星
+    ⭐half_importance_add_weight 2星, 49秒
+    half_importance_global_brightness 0星, 49秒
+    half_importance_partial_brightness 还行, 4星, 速度适中 ,99秒
+    blend_half_importance_partial_HV 不错 5星, 慢, 107秒
+    blend_half_importance_partial_SV 不错, 5星, 慢, 108秒
+    blend_half_importance_partial_HSV 很不错, 5星, 很慢, 120秒
+    ⭐blend_half_importance_partial_brightness_add_weight: 5星, 106秒
+    '''
+
+    blend_type_list = ["half_importance_add_weight",
+                       "half_importance_global_brightness", "half_importance_partial_brightness",
+                       "blend_half_importance_partial_HV", "blend_half_importance_partial_SV",
+                       "blend_half_importance_partial_HSV", "blend_half_importance_partial_brightness_add_weight"]
+    # BLEND_TYPE = 'blend_half_importance_partial_HSV'
+
+    # 是否开启光照补偿(推荐开启以获得更好效果)
+    LIGHT_COMPENSATION = True
+
+    # 是否开启调试模式(会生成大量中间过程图片,用于分析问题)
+    DEBUG_MODE = False
+
+    for i, BLEND_TYPE in enumerate(blend_type_list):
+        base_dir_path = r"C:\Code\ML\Project\StitchImageServer\temp\output"
+        img_dir_name = f"{i}_{BLEND_TYPE}"
+        OUTPUT_DIR = Path(os.path.join(base_dir_path, img_dir_name))
+
+        one_img_time = time.time()
+        stitch_img(IMAGE_DIR=IMAGE_DIR, OUTPUT_DIR=OUTPUT_DIR, NUM_COLS=NUM_COLS, NUM_ROWS=NUM_ROWS,
+                   ESTIMATE_OVERLAP_HORIZONTAL_PIXELS=ESTIMATE_OVERLAP_HORIZONTAL_PIXELS,
+                   ESTIMATE_OVERLAP_VERTICAL_PIXELS=ESTIMATE_OVERLAP_VERTICAL_PIXELS,
+                   BLEND_TYPE=BLEND_TYPE, LIGHT_COMPENSATION=LIGHT_COMPENSATION,
+                   DEBUG_MODE=DEBUG_MODE)
+        print()
+        print("_"*20)
+        print(f"单个用时: {BLEND_TYPE}: {time.time() - one_img_time}")
+        print("_"*20)
+
+
+
+if __name__ == '__main__':
+    start_time = time.time()
+    main()
+    end_time = time.time()
+    print(f"\n总耗时: {end_time - start_time:.2f} 秒")

+ 210 - 0
Test/template_match_多线程_test.py

@@ -0,0 +1,210 @@
+import cv2
+import os
+import time
+from pathlib import Path
+import re
+from tqdm import tqdm
+import concurrent.futures
+
+from fry_project_classes.stitch_img_template_match import ImageStitcherTemplateMatch
+
+
+def natural_sort_key(s):
+    return [int(text) if text.isdigit() else text.lower() for text in re.split(r'(\d+)', str(s))]
+
+
+# --- 新增:用于并行处理的"任务单元"函数 ---
+def stitch_single_row(row_index, row_image_paths, stitch_params):
+    """
+    负责拼接单一一行的图片。这个函数将在独立的进程中运行。
+
+    Args:
+        row_index (int): 当前行的索引(从0开始),用于日志和调试文件命名。
+        row_image_paths (list): 这一行所有图片的路径列表。
+        stitch_params (dict): 包含所有拼接所需参数的字典。
+
+    Returns:
+        tuple: 包含行索引和拼接完成的图像 (row_index, stitched_row_image)。
+    """
+    # 从参数字典中解包
+    NUM_COLS = len(row_image_paths)
+    OUTPUT_DIR = stitch_params['OUTPUT_DIR']
+    ESTIMATE_OVERLAP_HORIZONTAL_PIXELS = stitch_params['ESTIMATE_OVERLAP_HORIZONTAL_PIXELS']
+    BLEND_TYPE = stitch_params['BLEND_TYPE']
+    LIGHT_COMPENSATION = stitch_params['LIGHT_COMPENSATION']
+    DEBUG_MODE = stitch_params['DEBUG_MODE']
+
+    # 加载行的第一张图片
+    current_row_image = cv2.imread(str(row_image_paths[0]))
+    if current_row_image is None:
+        print(f"错误: 无法读取图片 {row_image_paths[0]}")
+        return row_index, None
+
+    # 依次将该行的后续图片拼接到右侧
+    for j in range(1, NUM_COLS):
+        stitcher_h = ImageStitcherTemplateMatch(
+            estimate_overlap_pixels=ESTIMATE_OVERLAP_HORIZONTAL_PIXELS,
+            stitch_type="horizontal",
+            blend_type=BLEND_TYPE,
+            light_uniformity_compensation_enabled=LIGHT_COMPENSATION,
+            light_uniformity_compensation_width=30,
+            debug=DEBUG_MODE,
+            # 注意调试目录的命名,确保不同进程不会写入同一个文件夹
+            debug_dir=str(OUTPUT_DIR / f'debug_h_row{row_index + 1}_col{j}vs{j + 1}')
+        )
+
+        next_image = cv2.imread(str(row_image_paths[j]))
+        if next_image is None:
+            print(f"错误: 无法读取图片 {row_image_paths[j]}")
+            # 如果中间一张图片读取失败,返回当前已拼接的部分
+            return row_index, current_row_image
+
+        current_row_image = stitcher_h.stitch_main(current_row_image, next_image)
+
+    # 返回拼接结果和行索引,以便主进程能按正确顺序排列
+    return row_index, current_row_image
+
+
+# --- 优化后的主拼接函数 ---
+def stitch_img(IMAGE_DIR, OUTPUT_DIR, NUM_COLS: int, NUM_ROWS: int,
+               ESTIMATE_OVERLAP_HORIZONTAL_PIXELS: int, ESTIMATE_OVERLAP_VERTICAL_PIXELS: int,
+               BLEND_TYPE: str, LIGHT_COMPENSATION: bool,
+               DEBUG_MODE: bool):
+    OUTPUT_DIR.mkdir(exist_ok=True)
+
+    print("--- 图像拼接开始 ---")
+    print(f"配置: {NUM_ROWS}行 x {NUM_COLS}列")
+    print(f"图片目录: {IMAGE_DIR}")
+    print(f"输出目录: {OUTPUT_DIR}")
+    print(f"水平重叠预估: {ESTIMATE_OVERLAP_HORIZONTAL_PIXELS}px, 垂直重叠预估: {ESTIMATE_OVERLAP_VERTICAL_PIXELS}px")
+    print(f"融合模式: {BLEND_TYPE}, 光照补偿: {'启用' if LIGHT_COMPENSATION else '禁用'}")
+
+    # --- 2. 加载并排序图片 ---
+    image_paths = sorted(list(IMAGE_DIR.glob("*.jpg")), key=natural_sort_key)
+
+    if len(image_paths) != NUM_COLS * NUM_ROWS:
+        print(f"错误: 找到 {len(image_paths)} 张图片, 但预期需要 {NUM_COLS * NUM_ROWS} 张。")
+        return
+
+    # --- 3. 阶段一:并行水平拼接每一行 (核心优化点) ---
+    print("\n--- 阶段一: 并行水平拼接每一行 ---")
+
+    # 准备传递给每个进程的参数
+    stitch_params = {
+        'OUTPUT_DIR': OUTPUT_DIR,
+        'ESTIMATE_OVERLAP_HORIZONTAL_PIXELS': ESTIMATE_OVERLAP_HORIZONTAL_PIXELS,
+        'BLEND_TYPE': BLEND_TYPE,
+        'LIGHT_COMPENSATION': LIGHT_COMPENSATION,
+        'DEBUG_MODE': DEBUG_MODE
+    }
+
+    stitched_rows = [None] * NUM_ROWS  # 预先分配列表,用于按顺序存放结果
+
+    # 使用进程池执行器
+    with concurrent.futures.ProcessPoolExecutor() as executor:
+        # 提交所有行的拼接任务
+        futures = []
+        for i in range(NUM_ROWS):
+            row_start_index = i * NUM_COLS
+            row_image_paths = image_paths[row_start_index: row_start_index + NUM_COLS]
+            # 提交任务到进程池
+            future = executor.submit(stitch_single_row, i, row_image_paths, stitch_params)
+            futures.append(future)
+
+        # 使用tqdm来显示进度条,并收集结果
+        # as_completed会在任务完成时立即返回,这比直接等待所有任务更具响应性
+        for future in tqdm(concurrent.futures.as_completed(futures), total=NUM_ROWS, desc="处理行"):
+            try:
+                row_index, result_image = future.result()
+                if result_image is not None:
+                    stitched_rows[row_index] = result_image
+                    # 保存拼接好的行
+                    row_output_path = OUTPUT_DIR / f"stitched_row_{row_index + 1}.jpg"
+                    cv2.imwrite(str(row_output_path), result_image)
+                    tqdm.write(f"第 {row_index + 1} 行拼接完成, 已保存至 {row_output_path}")
+                else:
+                    tqdm.write(f"第 {row_index + 1} 行拼接失败。")
+            except Exception as exc:
+                tqdm.write(f"一个行拼接任务生成了异常: {exc}")
+
+    # 检查是否有失败的行
+    if any(row is None for row in stitched_rows):
+        print("错误: 存在拼接失败的行,无法进行垂直拼接。")
+        return
+
+    # --- 4. 阶段二:垂直拼接所有行 (这部分保持串行) ---
+    print("\n--- 阶段二: 垂直拼接所有行 ---")
+
+    final_image = stitched_rows[0]
+
+    for i in tqdm(range(1, NUM_ROWS), desc="拼接行"):
+        stitcher_v = ImageStitcherTemplateMatch(
+            estimate_overlap_pixels=ESTIMATE_OVERLAP_VERTICAL_PIXELS,
+            stitch_type="vertical",
+            blend_type=BLEND_TYPE,
+            light_uniformity_compensation_enabled=LIGHT_COMPENSATION,
+            light_uniformity_compensation_width=30,
+            debug=DEBUG_MODE,
+            debug_dir=str(OUTPUT_DIR / f'debug_v_row{i}vs{i + 1}')
+        )
+
+        next_row_image = stitched_rows[i]
+        final_image = stitcher_v.stitch_main(final_image, next_row_image)
+
+    # --- 5. 保存最终结果 ---
+    final_output_path = OUTPUT_DIR / "final_stitched_image.jpg"
+    cv2.imwrite(str(final_output_path), final_image)
+
+    print("\n--- 所有拼接任务完成!---")
+    print(f"最终的全景图已保存至: {final_output_path}")
+
+
+def main():
+    """
+    主执行函数
+    """
+    # --- 1. 配置参数 ---
+
+    # 图片和输出目录设置
+    IMAGE_DIR = Path(r"C:\Code\ML\Project\StitchImageServer\temp\Input\_250801_1146_0034")
+
+    # 拼图网格设置
+    NUM_COLS = 4
+    NUM_ROWS = 6
+
+    # 预估重叠像素
+    ESTIMATE_OVERLAP_HORIZONTAL_PIXELS = 405
+    ESTIMATE_OVERLAP_VERTICAL_PIXELS = 440
+
+    # 融合模式列表
+    # 默认 half_importance_add_weight
+    blend_type_list = ["half_importance_add_weight",
+                       "half_importance_global_brightness", "half_importance_partial_brightness",
+                       "blend_half_importance_partial_HV", "blend_half_importance_partial_SV",
+                       "blend_half_importance_partial_HSV", "blend_half_importance_partial_brightness_add_weight"]
+
+    LIGHT_COMPENSATION = True
+    DEBUG_MODE = False
+
+    for i, BLEND_TYPE in enumerate(blend_type_list):
+        base_dir_path = r"C:\Code\ML\Project\StitchImageServer\temp\output"
+        img_dir_name = f"{i}_{BLEND_TYPE}"
+        OUTPUT_DIR = Path(os.path.join(base_dir_path, img_dir_name))
+
+        one_img_time = time.time()
+        stitch_img(IMAGE_DIR=IMAGE_DIR, OUTPUT_DIR=OUTPUT_DIR, NUM_COLS=NUM_COLS, NUM_ROWS=NUM_ROWS,
+                   ESTIMATE_OVERLAP_HORIZONTAL_PIXELS=ESTIMATE_OVERLAP_HORIZONTAL_PIXELS,
+                   ESTIMATE_OVERLAP_VERTICAL_PIXELS=ESTIMATE_OVERLAP_VERTICAL_PIXELS,
+                   BLEND_TYPE=BLEND_TYPE, LIGHT_COMPENSATION=LIGHT_COMPENSATION,
+                   DEBUG_MODE=DEBUG_MODE)
+        print()
+        print("_" * 20)
+        print(f"单个用时: {img_dir_name}: {time.time() - one_img_time}")
+        print("_" * 20)
+
+
+if __name__ == '__main__':
+    start_time = time.time()
+    main()
+    end_time = time.time()
+    print(f"\n总耗时: {end_time - start_time:.2f} 秒")

+ 9 - 0
Test/test01.py

@@ -0,0 +1,9 @@
+import shutil
+from pathlib import Path
+
+path = Path(__file__).parent.absolute()
+save_path = path.joinpath('123.zip')
+print(path)
+
+shutil.make_archive(str(save_path.with_suffix('')), 'zip', r"C:\Code\ML\Project\StitchImageServer\temp\output")
+print('end')

+ 0 - 0
app/__init__.py


+ 208 - 0
app/api/stitch.py

@@ -0,0 +1,208 @@
+import os
+import shutil
+import uuid
+import zipfile
+import logging
+from pathlib import Path
+
+from fastapi import APIRouter, UploadFile, File, Form, HTTPException, BackgroundTasks
+from fastapi.responses import FileResponse, JSONResponse
+
+# 导入我们的核心逻辑和数据模型
+from app.core import stitcher_keypoint, stitcher_template
+from app.schemas import StitchingMethod, KeypointFeatureDetector, KeypointBlendType, TemplateBlendType
+
+from utils.utils import cleanup_temp_folder
+
+router = APIRouter(prefix="/stitch", tags=['拼图'])
+
+TEMP_DIR = Path("_temp_work")
+TEMP_DIR.mkdir(exist_ok=True)
+logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
+
+
+@router.post("/", response_class=FileResponse, summary="单个拼图接口")
+async def stitch_single_puzzle(
+        background_tasks: BackgroundTasks,
+        zip_file: UploadFile = File(..., description="包含一个文件夹的ZIP压缩包,文件夹内有24张小图。"),
+        # --- 通用参数 ---
+        method: StitchingMethod = Form(StitchingMethod.TEMPLATE_MATCH, description="选择拼图方法"),
+        num_cols: int = Form(4, description="拼图的列数"),
+        num_rows: int = Form(6, description="拼图的行数"),
+        overlap_h: int = Form(405, description="预估的水平重叠像素"),
+        overlap_v: int = Form(440, description="预估的垂直重叠像素"),
+        # --- 点匹配法 (key_point) 特定参数 ---
+        kp_blend_type: KeypointBlendType = Form(KeypointBlendType.COMBINE, description="[点匹配] 融合模式"),
+        kp_feature_detector: KeypointFeatureDetector = Form(KeypointFeatureDetector.SIFT,
+                                                            description="[点匹配] 特征检测器"),
+        # --- 模板匹配法 (template_match) 特定参数 ---
+        tm_blend_type: TemplateBlendType = Form(TemplateBlendType.HALF_IMPORTANCE_ADD_WEIGHT,
+                                                description="[模板匹配] 融合模式"),
+        tm_light_compensation: bool = Form(True, description="[模板匹配] 是否启用光照补偿")
+):
+    """
+    上传一个包含24张小图的文件夹的ZIP压缩包,接口会将其拼接成一张大图并返回。
+
+    - **zip_file**: 必须是.zip格式,内部应仅包含一个文件夹,该文件夹内含所有待拼接的.jpg图片。
+    - **返回**: 拼接成功后,返回拼接好的图片文件,文件名与ZIP包内的文件夹名相同。
+    """
+    request_id = str(uuid.uuid4())
+    session_dir = TEMP_DIR / request_id
+    session_dir.mkdir()
+    background_tasks.add_task(cleanup_temp_folder, session_dir)
+
+    zip_path = session_dir / zip_file.filename
+    with open(zip_path, "wb") as buffer:
+        shutil.copyfileobj(zip_file.file, buffer)
+
+    extracted_dir = session_dir / "extracted"
+    extracted_dir.mkdir()
+
+    try:
+        with zipfile.ZipFile(zip_path, 'r') as zf:
+            zf.extractall(extracted_dir)
+    except zipfile.BadZipFile:
+        raise HTTPException(status_code=400, detail="上传的文件不是有效的ZIP格式。")
+
+    image_dir = extracted_dir
+    output_dir = session_dir / "output"
+
+    # 根据选择的方法调用不同的拼接函数
+    stitched_image_path = None
+    if method == StitchingMethod.KEY_POINT:
+        stitched_image_path = stitcher_keypoint.stitch_img(
+            IMAGE_DIR=image_dir, OUTPUT_DIR=output_dir, NUM_COLS=num_cols, NUM_ROWS=num_rows,
+            ESTIMATE_OVERLAP_HORIZONTAL_PIXELS=overlap_h, ESTIMATE_OVERLAP_VERTICAL_PIXELS=overlap_v,
+            BLEND_TYPE=kp_blend_type.value, FeatureDetector=kp_feature_detector.value,
+            DEBUG_MODE=False
+        )
+    elif method == StitchingMethod.TEMPLATE_MATCH:
+        stitched_image_path = stitcher_template.stitch_img(
+            IMAGE_DIR=image_dir, OUTPUT_DIR=output_dir, NUM_COLS=num_cols, NUM_ROWS=num_rows,
+            ESTIMATE_OVERLAP_HORIZONTAL_PIXELS=overlap_h, ESTIMATE_OVERLAP_VERTICAL_PIXELS=overlap_v,
+            BLEND_TYPE=tm_blend_type.value, LIGHT_COMPENSATION=tm_light_compensation,
+            DEBUG_MODE=False
+        )
+
+    if not stitched_image_path or not stitched_image_path.exists():
+        raise HTTPException(status_code=500, detail=f"图片拼接失败,请检查服务器日志(请求ID: {request_id})。")
+
+    # 使用原始文件夹名命名输出图片
+    final_filename = f"{image_dir.name}.jpg"
+    final_filepath = stitched_image_path.rename(stitched_image_path.parent / final_filename)
+
+    return FileResponse(
+        path=final_filepath,
+        filename=final_filename,
+        media_type='image/jpeg'
+    )
+
+
+@router.post("/batch", response_class=FileResponse, summary="批量拼图接口")
+async def stitch_batch_puzzles(
+        background_tasks: BackgroundTasks,
+        zip_file: UploadFile = File(..., description="包含多个拼图文件夹的ZIP压缩包。"),
+        # 参数与单个拼图接口相同
+        method: StitchingMethod = Form(StitchingMethod.TEMPLATE_MATCH, description="选择拼图方法"),
+        num_cols: int = Form(4, description="拼图的列数"),
+        num_rows: int = Form(6, description="拼图的行数"),
+        overlap_h: int = Form(405, description="预估的水平重叠像素"),
+        overlap_v: int = Form(440, description="预估的垂直重叠像素"),
+        kp_blend_type: KeypointBlendType = Form(KeypointBlendType.COMBINE, description="[点匹配] 融合模式"),
+        kp_feature_detector: KeypointFeatureDetector = Form(KeypointFeatureDetector.SIFT,
+                                                            description="[点匹配] 特征检测器"),
+        tm_blend_type: TemplateBlendType = Form(TemplateBlendType.HALF_IMPORTANCE_ADD_WEIGHT,
+                                                description="[模板匹配] 融合模式"),
+        tm_light_compensation: bool = Form(True, description="[模板匹配] 是否启用光照补偿"),
+):
+    """
+    上传一个包含多个拼图文件夹的ZIP压缩包,接口会处理所有文件夹,并将结果打包成一个新的ZIP返回。
+
+    - **zip_file**: 必须是.zip格式,内部可以有多个文件夹,每个文件夹都包含待拼接的图片。
+    - **返回**: 一个ZIP压缩包,里面是所有拼接好的图片,每张图片以其对应的原文件夹名命名。
+    """
+    request_id = str(uuid.uuid4())
+    session_dir = TEMP_DIR / request_id
+    session_dir.mkdir()
+    background_tasks.add_task(cleanup_temp_folder, session_dir)
+
+    zip_path = session_dir / zip_file.filename
+    with open(zip_path, "wb") as buffer:
+        shutil.copyfileobj(zip_file.file, buffer)
+
+    extracted_dir = session_dir / "extracted"
+    extracted_dir.mkdir()
+    try:
+        with zipfile.ZipFile(zip_path, 'r') as zf:
+            zf.extractall(extracted_dir)
+    except zipfile.BadZipFile:
+        raise HTTPException(status_code=400, detail="上传的文件不是有效的ZIP格式。")
+
+    puzzle_folders = [d for d in extracted_dir.iterdir() if d.is_dir()]
+    if not puzzle_folders:
+        raise HTTPException(status_code=400, detail="ZIP包中未找到任何拼图文件夹。")
+
+    batch_output_dir = session_dir / "batch_output"
+    batch_output_dir.mkdir()
+
+    processed_count = 0
+    failed_folders = []
+
+    for image_dir in puzzle_folders:
+        logging.info(f"--- 开始处理批量任务中的文件夹: {image_dir.name} ---")
+        # 为每个子任务创建一个独立的输出目录
+        single_output_dir = session_dir / "single_output"
+        if single_output_dir.exists():
+            shutil.rmtree(single_output_dir)  # 清理上一次循环的输出
+
+        stitched_image_path = None
+        try:
+            if method == StitchingMethod.KEY_POINT:
+                stitched_image_path = stitcher_keypoint.stitch_img(
+                    IMAGE_DIR=image_dir, OUTPUT_DIR=single_output_dir,
+                    NUM_COLS=num_cols, NUM_ROWS=num_rows,
+                    ESTIMATE_OVERLAP_HORIZONTAL_PIXELS=overlap_h,
+                    ESTIMATE_OVERLAP_VERTICAL_PIXELS=overlap_v,
+                    BLEND_TYPE=kp_blend_type.value, FeatureDetector=kp_feature_detector.value,
+                    DEBUG_MODE=False
+                )
+            elif method == StitchingMethod.TEMPLATE_MATCH:
+                stitched_image_path = stitcher_template.stitch_img(
+                    IMAGE_DIR=image_dir, OUTPUT_DIR=single_output_dir,
+                    NUM_COLS=num_cols, NUM_ROWS=num_rows,
+                    ESTIMATE_OVERLAP_HORIZONTAL_PIXELS=overlap_h,
+                    ESTIMATE_OVERLAP_VERTICAL_PIXELS=overlap_v,
+                    BLEND_TYPE=tm_blend_type.value, LIGHT_COMPENSATION=tm_light_compensation,
+                    DEBUG_MODE=False
+                )
+
+            if stitched_image_path and stitched_image_path.exists():
+                # 将成功的结果移到最终的批量输出目录
+                target_path = batch_output_dir / f"{image_dir.name}.jpg"
+                shutil.move(str(stitched_image_path), str(target_path))
+                processed_count += 1
+            else:
+                logging.error(f"文件夹 {image_dir.name} 拼接失败。")
+                failed_folders.append(image_dir.name)
+        except Exception as e:
+            logging.error(f"处理文件夹 {image_dir.name} 时发生严重错误: {e}")
+            failed_folders.append(image_dir.name)
+
+    if processed_count == 0:
+        detail_msg = f"所有 {len(puzzle_folders)} 个文件夹都拼接失败。失败列表: {failed_folders}"
+        raise HTTPException(status_code=500, detail=detail_msg)
+
+    # 将最终结果打包成ZIP
+    output_zip_path = session_dir / "stitched_results.zip"
+    shutil.make_archive(str(output_zip_path.with_suffix('')), 'zip', batch_output_dir)
+
+    if failed_folders:
+        logging.warning(f"批量任务完成,但有 {len(failed_folders)} 个文件夹失败: {failed_folders}")
+        # 可以在响应头中添加自定义信息来通知客户端部分失败
+        # response.headers["X-Failed-Folders"] = ",".join(failed_folders)
+
+    return FileResponse(
+        path=output_zip_path,
+        filename="stitched_results.zip",
+        media_type='application/zip'
+    )

+ 107 - 0
app/core/stitcher_keypoint.py

@@ -0,0 +1,107 @@
+import cv2
+from pathlib import Path
+import concurrent.futures
+import logging
+from tqdm import tqdm
+
+from fry_project_classes.stitch_img_key_point import ImageStitcherKeyPoint
+from utils.utils import natural_sort_key
+
+logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
+
+
+def stitch_single_row_keypoint(row_index, row_image_paths, stitch_params):
+    NUM_COLS = len(row_image_paths)
+    OUTPUT_DIR = stitch_params['OUTPUT_DIR']
+    ESTIMATE_OVERLAP_HORIZONTAL_PIXELS = stitch_params['ESTIMATE_OVERLAP_HORIZONTAL_PIXELS']
+    BLEND_TYPE = stitch_params['BLEND_TYPE']
+    FeatureDetector = stitch_params['FeatureDetector']
+    DEBUG_MODE = stitch_params['DEBUG_MODE']
+
+    current_row_image = cv2.imread(str(row_image_paths[0]))
+    if current_row_image is None:
+        logging.error(f"错误: 无法读取图片 {row_image_paths[0]}")
+        return row_index, None
+
+    for j in range(1, NUM_COLS):
+        stitcher_h = ImageStitcherKeyPoint(
+            estimate_overlap_pixels=ESTIMATE_OVERLAP_HORIZONTAL_PIXELS,
+            stitch_type="horizontal",
+            blend_type=BLEND_TYPE,
+            feature_detector=FeatureDetector,
+            blend_ratio=0.5,
+            debug=DEBUG_MODE,
+            debug_dir=str(OUTPUT_DIR / f'debug_h_row{row_index + 1}_col{j}vs{j + 1}')
+        )
+        next_image = cv2.imread(str(row_image_paths[j]))
+        if next_image is None:
+            logging.error(f"错误: 无法读取图片 {row_image_paths[j]}")
+            return row_index, current_row_image
+        current_row_image = stitcher_h.stitch_main(current_row_image, next_image)
+    return row_index, current_row_image
+
+
+def stitch_img(IMAGE_DIR: Path, OUTPUT_DIR: Path, NUM_COLS: int, NUM_ROWS: int,
+               ESTIMATE_OVERLAP_HORIZONTAL_PIXELS: int, ESTIMATE_OVERLAP_VERTICAL_PIXELS: int,
+               BLEND_TYPE: str, FeatureDetector: str,
+               DEBUG_MODE: bool) -> Path | None:
+    """
+    基于关键点的图像拼接函数。
+    成功时返回最终图像的路径,失败时返回 None。
+    """
+    OUTPUT_DIR.mkdir(exist_ok=True, parents=True)
+
+    logging.info("--- [关键点] 图像拼接开始 ---")
+    logging.info(f"配置: {NUM_ROWS}行 x {NUM_COLS}列, 图片目录: {IMAGE_DIR}")
+
+    image_paths = sorted(list(IMAGE_DIR.glob("*.jpg")), key=natural_sort_key)
+    if len(image_paths) != NUM_COLS * NUM_ROWS:
+        logging.error(f"错误: 找到 {len(image_paths)} 张图片, 但预期需要 {NUM_COLS * NUM_ROWS} 张。")
+        return None
+
+
+    # --- 阶段一:并行水平拼接每一行 ---
+    logging.info("--- 阶段一: 并行水平拼接每一行 ---")
+    stitch_params = {
+        'OUTPUT_DIR': OUTPUT_DIR,
+        'ESTIMATE_OVERLAP_HORIZONTAL_PIXELS': ESTIMATE_OVERLAP_HORIZONTAL_PIXELS,
+        'BLEND_TYPE': BLEND_TYPE,
+        'FeatureDetector': FeatureDetector,
+        'DEBUG_MODE': DEBUG_MODE
+    }
+    stitched_rows = [None] * NUM_ROWS
+    with concurrent.futures.ProcessPoolExecutor() as executor:
+        futures = [executor.submit(stitch_single_row_keypoint, i, image_paths[i * NUM_COLS: i * NUM_COLS + NUM_COLS],
+                                   stitch_params) for i in range(NUM_ROWS)]
+        for future in tqdm(concurrent.futures.as_completed(futures), total=NUM_ROWS, desc="[关键点]处理行"):
+            try:
+                row_index, result_image = future.result()
+                if result_image is not None:
+                    stitched_rows[row_index] = result_image
+                else:
+                    logging.warning(f"第 {row_index + 1} 行拼接失败。")
+            except Exception as exc:
+                logging.error(f"一个行拼接任务生成了异常: {exc}")
+
+    if any(row is None for row in stitched_rows):
+        logging.error("错误: 存在拼接失败的行,无法进行垂直拼接。")
+        return None
+
+    # --- 阶段二:垂直拼接所有行 ---
+    logging.info("--- 阶段二: 垂直拼接所有行 ---")
+    final_image = stitched_rows[0]
+    for i in tqdm(range(1, NUM_ROWS), desc="[关键点]拼接行"):
+        stitcher_v = ImageStitcherKeyPoint(
+            estimate_overlap_pixels=ESTIMATE_OVERLAP_VERTICAL_PIXELS, stitch_type="vertical",
+            blend_type=BLEND_TYPE, feature_detector=FeatureDetector, blend_ratio=0.5,
+            debug=DEBUG_MODE, debug_dir=str(OUTPUT_DIR / f'debug_v_row{i}vs{i + 1}')
+        )
+        next_row_image = stitched_rows[i]
+        final_image = stitcher_v.stitch_main(final_image, next_row_image)
+
+    # --- 保存并返回结果 ---
+    final_output_path = OUTPUT_DIR / "final_stitched_image.jpg"
+    cv2.imwrite(str(final_output_path), final_image)
+    logging.info(f"--- [关键点] 拼接任务完成!最终图已暂存至: {final_output_path} ---")
+
+    return final_output_path

+ 106 - 0
app/core/stitcher_template.py

@@ -0,0 +1,106 @@
+import cv2
+from pathlib import Path
+import concurrent.futures
+import logging
+from tqdm import tqdm
+
+from fry_project_classes.stitch_img_template_match import ImageStitcherTemplateMatch
+from utils.utils import natural_sort_key
+
+logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
+
+
+def stitch_single_row(row_index, row_image_paths, stitch_params):
+    NUM_COLS = len(row_image_paths)
+    OUTPUT_DIR = stitch_params['OUTPUT_DIR']
+    ESTIMATE_OVERLAP_HORIZONTAL_PIXELS = stitch_params['ESTIMATE_OVERLAP_HORIZONTAL_PIXELS']
+    BLEND_TYPE = stitch_params['BLEND_TYPE']
+    LIGHT_COMPENSATION = stitch_params['LIGHT_COMPENSATION']
+    DEBUG_MODE = stitch_params['DEBUG_MODE']
+
+    current_row_image = cv2.imread(str(row_image_paths[0]))
+    if current_row_image is None:
+        logging.error(f"错误: 无法读取图片 {row_image_paths[0]}")
+        return row_index, None
+
+    for j in range(1, NUM_COLS):
+        stitcher_h = ImageStitcherTemplateMatch(
+            estimate_overlap_pixels=ESTIMATE_OVERLAP_HORIZONTAL_PIXELS,
+            stitch_type="horizontal",
+            blend_type=BLEND_TYPE,
+            light_uniformity_compensation_enabled=LIGHT_COMPENSATION,
+            debug=DEBUG_MODE,
+            debug_dir=str(OUTPUT_DIR / f'debug_h_row{row_index + 1}_col{j}vs{j + 1}')
+        )
+        next_image = cv2.imread(str(row_image_paths[j]))
+        if next_image is None:
+            logging.error(f"错误: 无法读取图片 {row_image_paths[j]}")
+            return row_index, current_row_image
+        current_row_image = stitcher_h.stitch_main(current_row_image, next_image)
+    return row_index, current_row_image
+
+
+def stitch_img(IMAGE_DIR: Path, OUTPUT_DIR: Path, NUM_COLS: int, NUM_ROWS: int,
+               ESTIMATE_OVERLAP_HORIZONTAL_PIXELS: int, ESTIMATE_OVERLAP_VERTICAL_PIXELS: int,
+               BLEND_TYPE: str, LIGHT_COMPENSATION: bool,
+               DEBUG_MODE: bool) -> Path | None:
+    """
+    基于模板匹配的图像拼接函数。
+    成功时返回最终图像的路径,失败时返回 None。
+    """
+    OUTPUT_DIR.mkdir(exist_ok=True, parents=True)
+
+    logging.info("--- [模板匹配] 图像拼接开始 ---")
+    logging.info(f"配置: {NUM_ROWS}行 x {NUM_COLS}列, 图片目录: {IMAGE_DIR}")
+
+    image_paths = sorted(list(IMAGE_DIR.glob("*.jpg")), key=natural_sort_key)
+    if len(image_paths) != NUM_COLS * NUM_ROWS:
+        logging.error(f"错误: 找到 {len(image_paths)} 张图片, 但预期需要 {NUM_COLS * NUM_ROWS} 张。")
+        return None
+
+    # --- 阶段一:并行水平拼接每一行 ---
+    logging.info("--- 阶段一: 并行水平拼接每一行 ---")
+    stitch_params = {
+        'OUTPUT_DIR': OUTPUT_DIR,
+        'ESTIMATE_OVERLAP_HORIZONTAL_PIXELS': ESTIMATE_OVERLAP_HORIZONTAL_PIXELS,
+        'BLEND_TYPE': BLEND_TYPE,
+        'LIGHT_COMPENSATION': LIGHT_COMPENSATION,
+        'DEBUG_MODE': DEBUG_MODE
+    }
+    stitched_rows = [None] * NUM_ROWS
+    with concurrent.futures.ProcessPoolExecutor() as executor:
+        futures = [
+            executor.submit(stitch_single_row, i, image_paths[i * NUM_COLS: i * NUM_COLS + NUM_COLS], stitch_params) for
+            i in range(NUM_ROWS)]
+        for future in tqdm(concurrent.futures.as_completed(futures), total=NUM_ROWS, desc="[模板]处理行"):
+            try:
+                row_index, result_image = future.result()
+                if result_image is not None:
+                    stitched_rows[row_index] = result_image
+                else:
+                    logging.warning(f"第 {row_index + 1} 行拼接失败。")
+            except Exception as exc:
+                logging.error(f"一个行拼接任务生成了异常: {exc}")
+
+    if any(row is None for row in stitched_rows):
+        logging.error("错误: 存在拼接失败的行,无法进行垂直拼接。")
+        return None
+
+    # --- 阶段二:垂直拼接所有行 ---
+    logging.info("--- 阶段二: 垂直拼接所有行 ---")
+    final_image = stitched_rows[0]
+    for i in tqdm(range(1, NUM_ROWS), desc="[模板]拼接行"):
+        stitcher_v = ImageStitcherTemplateMatch(
+            estimate_overlap_pixels=ESTIMATE_OVERLAP_VERTICAL_PIXELS, stitch_type="vertical",
+            blend_type=BLEND_TYPE, light_uniformity_compensation_enabled=LIGHT_COMPENSATION,
+            debug=DEBUG_MODE, debug_dir=str(OUTPUT_DIR / f'debug_v_row{i}vs{i + 1}')
+        )
+        next_row_image = stitched_rows[i]
+        final_image = stitcher_v.stitch_main(final_image, next_row_image)
+
+    # --- 保存并返回结果 ---
+    final_output_path = OUTPUT_DIR / "final_stitched_image.jpg"
+    cv2.imwrite(str(final_output_path), final_image)
+    logging.info(f"--- [模板匹配] 拼接任务完成!最终图已暂存至: {final_output_path} ---")
+
+    return final_output_path

+ 25 - 0
app/main.py

@@ -0,0 +1,25 @@
+from fastapi import FastAPI
+from fastapi.middleware.cors import CORSMiddleware
+import logging
+
+from app.api.stitch import router as stitch_router
+
+logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
+
+# --- 应用初始化 ---
+app = FastAPI(
+    title="拼图API",
+    description="一个提供单张或批量图片拼接功能的服务。"
+)
+
+
+app.add_middleware(
+    CORSMiddleware,
+    allow_origins=["*"],
+    allow_credentials=True,
+    allow_methods=["*"],
+    allow_headers=["*"],
+)
+
+
+app.include_router(stitch_router, prefix="/api")

+ 34 - 0
app/schemas.py

@@ -0,0 +1,34 @@
+from enum import Enum
+
+
+class StitchingMethod(str, Enum):
+    """拼接方法枚举"""
+    KEY_POINT = "key_point"
+    TEMPLATE_MATCH = "template_match"
+
+
+class KeypointFeatureDetector(str, Enum):
+    """关键点检测器类型"""
+    SIFT = "sift"
+    ORB = "orb"
+    BRISK = "brisk"
+    COMBINE = "combine"
+
+
+class KeypointBlendType(str, Enum):
+    """关键点融合类型"""
+    HALF_IMPORTANCE = "half_importance"
+    RIGHT_FIRST = "right_first"
+    HALF_IMPORTANCE_ADD_WEIGHT = "half_importance_add_weight"
+    COMBINE = "combine"
+
+
+class TemplateBlendType(str, Enum):
+    """模板匹配融合类型"""
+    HALF_IMPORTANCE_ADD_WEIGHT = "half_importance_add_weight"
+    HALF_IMPORTANCE_GLOBAL_BRIGHTNESS = "half_importance_global_brightness"
+    HALF_IMPORTANCE_PARTIAL_BRIGHTNESS = "half_importance_partial_brightness"
+    BLEND_HALF_IMPORTANCE_PARTIAL_HV = "blend_half_importance_partial_HV"
+    BLEND_HALF_IMPORTANCE_PARTIAL_SV = "blend_half_importance_partial_SV"
+    BLEND_HALF_IMPORTANCE_PARTIAL_HSV = "blend_half_importance_partial_HSV"
+    BLEND_HALF_IMPORTANCE_PARTIAL_BRIGHTNESS_ADD_WEIGHT = "blend_half_importance_partial_brightness_add_weight"

+ 1031 - 0
fry_project_classes/blend_type_mixin.py

@@ -0,0 +1,1031 @@
+import cv2
+import numpy as np
+import os
+from fry_project_classes.fry_image_write_V03_250401 import FryImageWrite
+
+
+class BlendTypeMixin:
+    def blend_right_first(self, left_img: np.ndarray, right_img: np.ndarray,
+                          stitch_img_width: int, stitch_img_height: int,
+                          y_offset_right2left: int):
+        """"右边优先的拼接方式"""
+        # self.debug_dir = f"{self.init_debug}_{self.stitch_type}_right_first"
+        # os.makedirs(self.debug_dir, exist_ok=True)
+        # if self.debug:
+        #     if data_center_algo_inner_signals_obj is not None:
+        #         data_center_algo_inner_signals_obj.log_info_signal.emit("警告",f"混合模式:右图优先模式")
+
+        # 05、实现简单拼接逻辑
+        stitch_init_img = np.zeros((stitch_img_height, stitch_img_width, 3), dtype=np.uint8)
+        # 左图直接放进去
+        stitch_init_img[:left_img.shape[0], :left_img.shape[1]] = left_img
+        # 先高后宽
+        # 右图纠正位置之后放进去
+        if y_offset_right2left > 0:
+            stitch_init_img[y_offset_right2left:, stitch_img_width - right_img.shape[1]:] = \
+                right_img[:right_img.shape[0] - y_offset_right2left, :]
+        else:
+            stitch_init_img[:right_img.shape[0] + y_offset_right2left, stitch_img_width - right_img.shape[1]:] = \
+                right_img[abs(y_offset_right2left):, :]
+
+        # return result
+        return stitch_init_img
+
+    def blend_left_first(self, left_img: np.ndarray, right_img: np.ndarray,
+                         stitch_img_width: int, stitch_img_height: int,
+                         y_offset_right2left: int, real_overlap_width: int):
+        """"右边优先的拼接方式"""
+        # self.debug_dir = f"{self.init_debug}_{self.stitch_type}_right_first"
+        # os.makedirs(self.debug_dir, exist_ok=True)
+        # if self.debug:
+        #     if data_center_algo_inner_signals_obj is not None:
+        #         data_center_algo_inner_signals_obj.log_info_signal.emit("警告",f"混合模式:左图优先模式")
+
+        # 05、实现简单拼接逻辑
+        stitch_init_img = np.zeros((stitch_img_height, stitch_img_width, 3), dtype=np.uint8)
+        # 左图直接放进去
+        stitch_init_img[:left_img.shape[0], :left_img.shape[1]] = left_img
+
+        left_width = left_img.shape[1]
+        # 先高后宽
+        # 右图纠正位置之后放进去
+        if y_offset_right2left > 0:
+            stitch_init_img[y_offset_right2left:, left_width:] = \
+                right_img[:right_img.shape[0] - y_offset_right2left, real_overlap_width:]
+        else:
+            stitch_init_img[:right_img.shape[0] + y_offset_right2left, left_width:] = \
+                right_img[abs(y_offset_right2left):, real_overlap_width:]
+
+        # return result
+        return stitch_init_img
+
+    def blend_half_importance(self, left_img: np.ndarray, right_img: np.ndarray,
+                              stitch_img_width: int, stitch_img_height: int,
+                              y_offset_right2left: int, real_overlap_width: int,
+                              light_uniformity_compensation=False,
+                              light_uniformity_compensation_width=15,
+                              visualize=False):
+        """"左右一样重要性的拼接方式"""
+        # self.debug_dir = f"{self.init_debug}_{self.stitch_type}_half_importance"
+        # os.makedirs(self.debug_dir, exist_ok=True)
+        # if self.debug:
+        #     if data_center_algo_inner_signals_obj is not None:
+        #         data_center_algo_inner_signals_obj.log_info_signal.emit("警告",f"混合模式:左右半拼模式")
+
+        real_overlap_width_half = int(real_overlap_width / 2)
+        # 05、实现简单拼接逻辑
+        stitch_init_img = np.zeros((stitch_img_height, stitch_img_width, 3), dtype=np.uint8)
+        # 左图直接放进去
+        stitch_init_img[:left_img.shape[0], :left_img.shape[1]] = left_img
+
+        # 右图纠正位置之后放进去
+        right_img_start_x = stitch_img_width - right_img.shape[1] + real_overlap_width_half
+
+        if y_offset_right2left > 0:
+            # _250609_1448_ 右图向下放一点
+            stitch_init_img[y_offset_right2left:, right_img_start_x:] = \
+                right_img[:right_img.shape[0] - y_offset_right2left, real_overlap_width_half:]
+        else:
+            # _250609_1448_ 右图向上放一点
+            stitch_init_img[:right_img.shape[0] + y_offset_right2left, right_img_start_x:] = \
+                right_img[abs(y_offset_right2left):, real_overlap_width_half:]
+
+        # 可视化操作部分
+        if visualize:
+            green = (0, 255, 0)
+            red = (0, 0, 255)
+            blue = (0, 0, 255)
+            color3 = (255, 255, 0)
+            cv2.rectangle(stitch_init_img, (0, 0), (left_img.shape[1], left_img.shape[0]), color=green)  # 左上图
+
+            cv2.rectangle(stitch_init_img, (0, 0), (left_img.shape[1] - real_overlap_width, left_img.shape[0]),
+                          color=color3)
+
+            # 右下图
+            if y_offset_right2left > 0:
+                cv2.rectangle(stitch_init_img, (right_img_start_x, 0),
+                              (stitch_init_img.shape[1], stitch_init_img.shape[0]), color=red)
+                pass
+            else:
+                cv2.rectangle(stitch_init_img, (right_img_start_x, 0),
+                              (stitch_init_img.shape[1], stitch_init_img.shape[0]), color=blue)
+                pass
+
+        # return result
+        return stitch_init_img
+
+    def blend_half_importance_partial_brightness(self, left_img: np.ndarray, right_img: np.ndarray,
+                                                 stitch_img_width: int, stitch_img_height: int,
+                                                 y_offset_right2left: int, real_overlap_width: int,
+                                                 light_uniformity_compensation=False,
+                                                 light_uniformity_compensation_width=15,
+                                                 visualize=False):
+        """"左右一样重要性的拼接方式"""
+
+        real_overlap_width_half = int(real_overlap_width / 2)
+        # 05、实现简单拼接逻辑
+        stitch_init_img = np.zeros((stitch_img_height, stitch_img_width, 3), dtype=np.uint8)
+
+        right_img_start_x = stitch_img_width - right_img.shape[1] + real_overlap_width_half
+
+        # 如果进行图像补偿
+        if light_uniformity_compensation:
+            stitch_x_in_left = left_img.shape[1] - real_overlap_width_half
+            stitch_x_in_right = real_overlap_width_half
+            adjusted_left_img, adjusted_right_img = self.adjust_partial_brightness_for_stitching(left_img, right_img,
+                                                                                                 real_overlap_width=real_overlap_width,
+                                                                                                 y_offset_right2left=y_offset_right2left,
+                                                                                                 block_size=light_uniformity_compensation_width,
+                                                                                                 use_saturation_correct=False,
+                                                                                                 use_hue_correct=False,
+                                                                                                 weight_ratio=2.0
+                                                                                                 )
+            if self.debug:
+                self.save_debug_image(adjusted_left_img, 'light_compensation_720_left_img_adjusted')
+                self.save_debug_image(adjusted_right_img, 'light_compensation_720_right_img_adjusted')
+            left_img = adjusted_left_img
+            right_img = adjusted_right_img
+            pass
+
+        # 左图直接放进去
+        stitch_init_img[:left_img.shape[0], :left_img.shape[1]] = left_img
+
+        # 右图纠正位置之后放进去
+        if y_offset_right2left > 0:
+            stitch_init_img[y_offset_right2left:, right_img_start_x:] = \
+                right_img[:right_img.shape[0] - y_offset_right2left, real_overlap_width_half:]
+        else:
+            stitch_init_img[:right_img.shape[0] + y_offset_right2left, right_img_start_x:] = \
+                right_img[abs(y_offset_right2left):, real_overlap_width_half:]
+
+        # 可视化操作部分
+        if visualize:
+            green = (0, 255, 0)
+            red = (0, 0, 255)
+            blue = (0, 0, 255)
+            color3 = (255, 255, 0)
+            cv2.rectangle(stitch_init_img, (0, 0), (left_img.shape[1], left_img.shape[0]), color=green)  # 左上图
+
+            cv2.rectangle(stitch_init_img, (0, 0), (left_img.shape[1] - real_overlap_width, left_img.shape[0]),
+                          color=color3)
+
+            # 右下图
+            if y_offset_right2left > 0:
+                cv2.rectangle(stitch_init_img, (right_img_start_x, 0),
+                              (stitch_init_img.shape[1], stitch_init_img.shape[0]), color=red)
+                pass
+            else:
+                cv2.rectangle(stitch_init_img, (right_img_start_x, 0),
+                              (stitch_init_img.shape[1], stitch_init_img.shape[0]), color=blue)
+                pass
+
+        # return result
+        return stitch_init_img
+
+    def blend_half_importance_partial_HV(self, left_img: np.ndarray, right_img: np.ndarray,
+                                         stitch_img_width: int, stitch_img_height: int,
+                                         y_offset_right2left: int, real_overlap_width: int,
+                                         light_uniformity_compensation=False,
+                                         light_uniformity_compensation_width=15,
+                                         visualize=False):
+        """"左右一样重要性的拼接方式"""
+
+        real_overlap_width_half = int(real_overlap_width / 2)
+        # 05、实现简单拼接逻辑
+        stitch_init_img = np.zeros((stitch_img_height, stitch_img_width, 3), dtype=np.uint8)
+
+        right_img_start_x = stitch_img_width - right_img.shape[1] + real_overlap_width_half
+
+        # 如果进行图像补偿
+        if light_uniformity_compensation:
+            stitch_x_in_left = left_img.shape[1] - real_overlap_width_half
+            stitch_x_in_right = real_overlap_width_half
+            adjusted_left_img, adjusted_right_img = self.adjust_partial_brightness_for_stitching(left_img, right_img,
+                                                                                                 real_overlap_width=real_overlap_width,
+                                                                                                 y_offset_right2left=y_offset_right2left,
+                                                                                                 block_size=light_uniformity_compensation_width,
+                                                                                                 use_saturation_correct=False,
+                                                                                                 use_hue_correct=True
+                                                                                                 )
+            if self.debug:
+                self.save_debug_image(adjusted_left_img, 'light_compensation_720_left_img_adjusted')
+                self.save_debug_image(adjusted_right_img, 'light_compensation_720_right_img_adjusted')
+            left_img = adjusted_left_img
+            right_img = adjusted_right_img
+            pass
+
+        # 左图直接放进去
+        stitch_init_img[:left_img.shape[0], :left_img.shape[1]] = left_img
+
+        # 右图纠正位置之后放进去
+        if y_offset_right2left > 0:
+            stitch_init_img[y_offset_right2left:, right_img_start_x:] = \
+                right_img[:right_img.shape[0] - y_offset_right2left, real_overlap_width_half:]
+        else:
+            stitch_init_img[:right_img.shape[0] + y_offset_right2left, right_img_start_x:] = \
+                right_img[abs(y_offset_right2left):, real_overlap_width_half:]
+
+        # 可视化操作部分
+        if visualize:
+            green = (0, 255, 0)
+            red = (0, 0, 255)
+            blue = (0, 0, 255)
+            color3 = (255, 255, 0)
+            cv2.rectangle(stitch_init_img, (0, 0), (left_img.shape[1], left_img.shape[0]), color=green)  # 左上图
+
+            cv2.rectangle(stitch_init_img, (0, 0), (left_img.shape[1] - real_overlap_width, left_img.shape[0]),
+                          color=color3)
+
+            # 右下图
+            if y_offset_right2left > 0:
+                cv2.rectangle(stitch_init_img, (right_img_start_x, 0),
+                              (stitch_init_img.shape[1], stitch_init_img.shape[0]), color=red)
+                pass
+            else:
+                cv2.rectangle(stitch_init_img, (right_img_start_x, 0),
+                              (stitch_init_img.shape[1], stitch_init_img.shape[0]), color=blue)
+                pass
+
+        # return result
+        return stitch_init_img
+
+    def blend_half_importance_partial_SV(self, left_img: np.ndarray, right_img: np.ndarray,
+                                         stitch_img_width: int, stitch_img_height: int,
+                                         y_offset_right2left: int, real_overlap_width: int,
+                                         light_uniformity_compensation=False,
+                                         light_uniformity_compensation_width=15,
+                                         visualize=False):
+        """"左右一样重要性的拼接方式"""
+
+        real_overlap_width_half = int(real_overlap_width / 2)
+        # 05、实现简单拼接逻辑
+        stitch_init_img = np.zeros((stitch_img_height, stitch_img_width, 3), dtype=np.uint8)
+
+        right_img_start_x = stitch_img_width - right_img.shape[1] + real_overlap_width_half
+
+        # 如果进行图像补偿
+        if light_uniformity_compensation:
+            stitch_x_in_left = left_img.shape[1] - real_overlap_width_half
+            stitch_x_in_right = real_overlap_width_half
+            adjusted_left_img, adjusted_right_img = self.adjust_partial_brightness_for_stitching(left_img, right_img,
+                                                                                                 real_overlap_width=real_overlap_width,
+                                                                                                 y_offset_right2left=y_offset_right2left,
+                                                                                                 block_size=light_uniformity_compensation_width,
+                                                                                                 use_saturation_correct=True,
+                                                                                                 use_hue_correct=False
+                                                                                                 )
+            if self.debug:
+                self.save_debug_image(adjusted_left_img, 'light_compensation_720_left_img_adjusted')
+                self.save_debug_image(adjusted_right_img, 'light_compensation_720_right_img_adjusted')
+            left_img = adjusted_left_img
+            right_img = adjusted_right_img
+            pass
+
+        # 左图直接放进去
+        stitch_init_img[:left_img.shape[0], :left_img.shape[1]] = left_img
+
+        # 右图纠正位置之后放进去
+        if y_offset_right2left > 0:
+            stitch_init_img[y_offset_right2left:, right_img_start_x:] = \
+                right_img[:right_img.shape[0] - y_offset_right2left, real_overlap_width_half:]
+        else:
+            stitch_init_img[:right_img.shape[0] + y_offset_right2left, right_img_start_x:] = \
+                right_img[abs(y_offset_right2left):, real_overlap_width_half:]
+
+        # 可视化操作部分
+        if visualize:
+            green = (0, 255, 0)
+            red = (0, 0, 255)
+            blue = (0, 0, 255)
+            color3 = (255, 255, 0)
+            cv2.rectangle(stitch_init_img, (0, 0), (left_img.shape[1], left_img.shape[0]), color=green)  # 左上图
+
+            cv2.rectangle(stitch_init_img, (0, 0), (left_img.shape[1] - real_overlap_width, left_img.shape[0]),
+                          color=color3)
+
+            # 右下图
+            if y_offset_right2left > 0:
+                cv2.rectangle(stitch_init_img, (right_img_start_x, 0),
+                              (stitch_init_img.shape[1], stitch_init_img.shape[0]), color=red)
+                pass
+            else:
+                cv2.rectangle(stitch_init_img, (right_img_start_x, 0),
+                              (stitch_init_img.shape[1], stitch_init_img.shape[0]), color=blue)
+                pass
+
+        # return result
+        return stitch_init_img
+
+    def blend_half_importance_partial_HSV(self, left_img: np.ndarray, right_img: np.ndarray,
+                                          stitch_img_width: int, stitch_img_height: int,
+                                          y_offset_right2left: int, real_overlap_width: int,
+                                          light_uniformity_compensation=False,
+                                          light_uniformity_compensation_width=15,
+                                          visualize=False):
+        """"左右一样重要性的拼接方式"""
+
+        real_overlap_width_half = int(real_overlap_width / 2)
+        # 05、实现简单拼接逻辑
+        stitch_init_img = np.zeros((stitch_img_height, stitch_img_width, 3), dtype=np.uint8)
+
+        right_img_start_x = stitch_img_width - right_img.shape[1] + real_overlap_width_half
+
+        # 如果进行图像补偿
+        if light_uniformity_compensation:
+            stitch_x_in_left = left_img.shape[1] - real_overlap_width_half
+            stitch_x_in_right = real_overlap_width_half
+            adjusted_left_img, adjusted_right_img = self.adjust_partial_brightness_for_stitching(left_img, right_img,
+                                                                                                 real_overlap_width=real_overlap_width,
+                                                                                                 y_offset_right2left=y_offset_right2left,
+                                                                                                 block_size=light_uniformity_compensation_width,
+                                                                                                 use_saturation_correct=True,
+                                                                                                 use_hue_correct=True
+                                                                                                 )
+            if self.debug:
+                self.save_debug_image(adjusted_left_img, 'light_compensation_720_left_img_adjusted')
+                self.save_debug_image(adjusted_right_img, 'light_compensation_720_right_img_adjusted')
+            left_img = adjusted_left_img
+            right_img = adjusted_right_img
+            pass
+
+        # 左图直接放进去
+        stitch_init_img[:left_img.shape[0], :left_img.shape[1]] = left_img
+
+        # 右图纠正位置之后放进去
+        if y_offset_right2left > 0:
+            stitch_init_img[y_offset_right2left:, right_img_start_x:] = \
+                right_img[:right_img.shape[0] - y_offset_right2left, real_overlap_width_half:]
+        else:
+            stitch_init_img[:right_img.shape[0] + y_offset_right2left, right_img_start_x:] = \
+                right_img[abs(y_offset_right2left):, real_overlap_width_half:]
+
+        # 可视化操作部分
+        if visualize:
+            green = (0, 255, 0)
+            red = (0, 0, 255)
+            blue = (0, 0, 255)
+            color3 = (255, 255, 0)
+            cv2.rectangle(stitch_init_img, (0, 0), (left_img.shape[1], left_img.shape[0]), color=green)  # 左上图
+
+            cv2.rectangle(stitch_init_img, (0, 0), (left_img.shape[1] - real_overlap_width, left_img.shape[0]),
+                          color=color3)
+
+            # 右下图
+            if y_offset_right2left > 0:
+                cv2.rectangle(stitch_init_img, (right_img_start_x, 0),
+                              (stitch_init_img.shape[1], stitch_init_img.shape[0]), color=red)
+                pass
+            else:
+                cv2.rectangle(stitch_init_img, (right_img_start_x, 0),
+                              (stitch_init_img.shape[1], stitch_init_img.shape[0]), color=blue)
+                pass
+
+        # return result
+        return stitch_init_img
+
+    def adjust_partial_brightness_for_stitching(self, left_img, right_img,
+                                                real_overlap_width,
+                                                y_offset_right2left,
+                                                block_size=32,
+                                                use_saturation_correct=False,
+                                                use_hue_correct=False,
+                                                weight_ratio=2.0
+                                                ):
+        """
+        对拼接图像的左右子图的亮度进行调整
+
+        参数:
+        left_img: 左图像 (numpy array)
+        right_img: 右图像 (numpy array)
+        stitch_x_in_left: 做左图上面的拼接位置的x
+        stitch_x_in_right: 在右图上面的拼接位置的x
+        block_size:
+
+        返回:
+        亮度调整后的左右图像
+
+        算法思路:
+        我们有两张图片left_img,right_img进行水平拼接,
+        因为两张图片有x方向重叠区域 real_overlap_width
+        两张图片也有y方向的偏移 y_offset_right2left
+        所以两张图片是有真正的重叠区域的,真正的重叠区域的尺寸大概是 width = real_overlap_width,height = img_height-abs(y_offset_right2left)
+        我是可以分别在左图和右图把重叠区域弄出来的,分别为 left_overlap_region,right_overlap_region,并且他们尺寸是一致的
+        因为拍照的时候的光照是不均匀的,所以左右图的重叠区域是有亮度差异的
+        我现在的需求就是解决这种亮度差异
+        我的算法思路如下:
+        我可以把left_overlap_region和right_overlap_region分成多个block_sizexblock_size的区域,比如32x32的区域
+        对于每个区域,我都可以算出亮度均值,也就是hsv里面的v的均值,
+        然后把 left_overlap_region和right_overlap_region 都朝着这个均值调整
+        这样我们就得到了 亮度调整后的 left_overlap_region和right_overlap_region
+        再把 亮度调整后的 left_overlap_region和right_overlap_region 分别放回 adjusted_left_img 和 adjusted_right_right
+        那么我就就得到了亮度调整的 adjusted_left_img 和 adjusted_right_right
+        我就就可以用这两张图片进行拼接了
+
+        算法优化:
+        现在已经实现了基础的算法,但是还可以优化
+        如下代码中的两图的重叠有效区域为:left_effective 和 right_effective
+        然后再他们的基础上面进行网格的亮度的矫正,然后替换回原图
+        调整左右图的时候,分别是加上的 left_v_diff 和 right_v_diff
+        我觉得这里加上 left_v_diff * left_weight 和 right_v_diff * right_weight 效果会更好
+        那这两个 left_weight 和 right_weight 如何产生
+        对于左 left_effective,我希望这个 left_weight 是 0%到 200% 之间线性变化,
+        因为这样0%的区域刚好接近了左图的原图区域,100%的区域刚好接近了和右图的拼接区域
+        对于右 right_effective,我希望这个 right_weight 是 200%到 0% 之间线性变化,
+        因为这样0%的区域刚好接近了右图的原图区域,100%的区域刚好接近了和左图的拼接区域
+
+        再次优化:
+        在计算 left_block 和 right_block 的时候,如果左图或者右图里面的纯黑元素 也就是 rgb为(0,0,0)的元素超过10%
+        那么本次不进行计算,或者说直接把 left_v_diff 和 right_v_diff 设置为0
+        这是为了避免一张图是黑的,从而影响另一张图,这样会给另一张图加上黑斑
+
+        色调和饱和度方面的优化:
+        色调(H: hue),饱和度(S: saturation),亮度(V: value)。
+        如下代码中,其实只做了左右拼接图片亮度方面的调整,没有做色调和饱和度方面的调整
+        如果加上色调和饱和度方面的调整,整个拼图应该会更加优雅
+        色调和饱和度的调整思路,和亮度是一样的
+        """
+        assert left_img.shape[0] == right_img.shape[0], "左右图尺寸的高必须一样"
+
+        if self.debug:
+            print("警告",
+                  f"real_overlap_width : {real_overlap_width};  y_offset_right2left : {y_offset_right2left};  block_size : {block_size};  use_saturation_correct : {use_saturation_correct};  use_hue_correct : {use_hue_correct};  weight_ratio : {weight_ratio};  ")
+
+        img_height = left_img.shape[0]
+
+        # 创建左右图像的副本
+        adjusted_left_img = left_img.copy()
+        adjusted_right_img = right_img.copy()
+
+        if self.debug:
+            self.save_debug_image(adjusted_left_img, 'light_compensation_120_left_img_origin')
+            self.save_debug_image(adjusted_right_img, 'light_compensation_120_right_img_origin')
+
+        # 提取重叠区域
+        left_overlap = left_img[:, -real_overlap_width:]
+        right_overlap = right_img[:, :real_overlap_width]
+
+        if self.debug:
+            self.save_debug_image(left_overlap, 'light_compensation_140_left_overlap')
+            self.save_debug_image(right_overlap, 'light_compensation_140_right_overlap')
+
+        # 考虑y偏移,确定实际有效的重叠区域
+        effective_height = img_height - abs(y_offset_right2left)
+
+        if y_offset_right2left > 0:
+            left_effective = left_overlap[y_offset_right2left:, :]
+            right_effective = right_overlap[:effective_height, :]
+        else:
+            left_effective = left_overlap[:effective_height, :]
+            right_effective = right_overlap[abs(y_offset_right2left):, :]
+
+        if self.debug:
+            self.save_debug_image(left_effective, 'light_compensation_160_left_effective')
+            self.save_debug_image(right_effective, 'light_compensation_160_right_effective')
+
+        # 计算分块数量
+        num_blocks_y = effective_height // block_size
+        num_blocks_x = real_overlap_width // block_size
+
+        # 转换为HSV颜色空间
+        left_hsv = cv2.cvtColor(adjusted_left_img, cv2.COLOR_BGR2HSV)
+        right_hsv = cv2.cvtColor(adjusted_right_img, cv2.COLOR_BGR2HSV)
+
+        # 创建左右图像的权重矩阵 - 横向渐变
+        left_weights = np.zeros((real_overlap_width, 1))
+        right_weights = np.zeros((real_overlap_width, 1))
+
+        # 左图权重从0%到200%线性变化(从左到右)
+        for i in range(real_overlap_width):
+            left_weights[i] = i / (real_overlap_width - 1) * weight_ratio
+
+        # 右图权重从200%到0%线性变化(从左到右)
+        for i in range(real_overlap_width):
+            right_weights[i] = 2.0 - i / (real_overlap_width - 1) * weight_ratio
+
+        if self.debug:
+            # 可视化权重矩阵
+            left_weight_vis = (left_weights * 127.5).astype(np.uint8)
+            right_weight_vis = (right_weights * 127.5).astype(np.uint8)
+            left_weight_img = np.tile(left_weight_vis, (effective_height, 1))
+            right_weight_img = np.tile(right_weight_vis, (effective_height, 1))
+            self.save_debug_image(left_weight_img, 'light_compensation_180_left_weights')
+            self.save_debug_image(right_weight_img, 'light_compensation_180_right_weights')
+
+        # 定义黑色像素检测阈值(RGB值之和小于指定值认为是黑色)
+        black_threshold = 1  # 调整这个值以定义"黑色"
+        max_black_percentage = 0.05  # 最大可接受的黑色像素比例
+
+        # 对每个块进行亮度、色调和饱和度调整
+        for y in range(num_blocks_y):
+            for x in range(num_blocks_x):
+                # 计算当前块的位置
+                y_start = y * block_size
+                y_end = y_start + block_size
+                x_start = x * block_size
+                x_end = x_start + block_size
+
+                # 提取左右图像对应的块
+                left_block = left_effective[y_start:y_end, x_start:x_end]
+                right_block = right_effective[y_start:y_end, x_start:x_end]
+
+                # 检测黑色像素比例
+                left_black_pixels = np.sum(np.sum(left_block, axis=2) <= black_threshold)
+                right_black_pixels = np.sum(np.sum(right_block, axis=2) <= black_threshold)
+
+                left_black_percentage = left_black_pixels / (left_block.shape[0] * left_block.shape[1])
+                right_black_percentage = right_black_pixels / (right_block.shape[0] * right_block.shape[1])
+
+                # 如果任一块中黑色像素比例超过阈值,则跳过调整
+                if left_black_percentage > max_black_percentage or right_black_percentage > max_black_percentage:
+                    # if self.debug:
+                    #     log_info_signal_function("信息",
+                    #                              f"跳过块调整,黑色像素比例: 左={left_black_percentage:.2f}, 右={right_black_percentage:.2f}")
+                    continue
+
+                # 转换块到HSV颜色空间
+                left_block_hsv = cv2.cvtColor(left_block, cv2.COLOR_BGR2HSV)
+                right_block_hsv = cv2.cvtColor(right_block, cv2.COLOR_BGR2HSV)
+
+                # 计算各通道均值
+                left_h_mean = np.mean(left_block_hsv[:, :, 0])
+                right_h_mean = np.mean(right_block_hsv[:, :, 0])
+                left_s_mean = np.mean(left_block_hsv[:, :, 1])
+                right_s_mean = np.mean(right_block_hsv[:, :, 1])
+                left_v_mean = np.mean(left_block_hsv[:, :, 2])
+                right_v_mean = np.mean(right_block_hsv[:, :, 2])
+
+                # 计算目标值
+                # 对于色调H通道,使用角度平均方法
+                h_diff = ((right_h_mean - left_h_mean + 90) % 180) - 90
+                target_h_mean = (left_h_mean + h_diff / 2) % 180
+
+                # 对于饱和度S和亮度V通道,使用普通平均
+                target_s_mean = (left_s_mean + right_s_mean) / 2
+                target_v_mean = (left_v_mean + right_v_mean) / 2
+
+                # 计算调整值
+                # 色调使用角度差异
+                left_h_diff = ((target_h_mean - left_h_mean + 90) % 180) - 90
+                right_h_diff = ((target_h_mean - right_h_mean + 90) % 180) - 90
+
+                # 饱和度和亮度使用普通差异
+                left_s_diff = target_s_mean - left_s_mean
+                right_s_diff = target_s_mean - right_s_mean
+                left_v_diff = target_v_mean - left_v_mean
+                right_v_diff = target_v_mean - right_v_mean
+
+                # 获取当前块的平均权重
+                avg_left_weight = np.mean(left_weights[x_start:x_end])
+                avg_right_weight = np.mean(right_weights[x_start:x_end])
+
+                # 亮度较低区域饱和度调整应减小
+                v_factor_left = min(1.0, left_v_mean / 128)
+                v_factor_right = min(1.0, right_v_mean / 128)
+
+                # 根据权重调整差异值
+                weighted_left_h_diff = left_h_diff * avg_left_weight
+                weighted_right_h_diff = right_h_diff * avg_right_weight
+                weighted_left_s_diff = left_s_diff * avg_left_weight * v_factor_left
+                weighted_right_s_diff = right_s_diff * avg_right_weight * v_factor_right
+                weighted_left_v_diff = left_v_diff * avg_left_weight
+                weighted_right_v_diff = right_v_diff * avg_right_weight
+
+                # 计算左图中的实际位置
+                actual_left_x_start = left_img.shape[1] - real_overlap_width + x_start
+                actual_left_x_end = actual_left_x_start + block_size
+
+                # 计算右图中的实际位置
+                actual_right_x_start = x_start
+                actual_right_x_end = x_start + block_size
+
+                # 计算y方向的实际位置,考虑y偏移
+                if y_offset_right2left > 0:
+                    actual_left_y_start = y_start + y_offset_right2left
+                    actual_left_y_end = actual_left_y_start + block_size
+                    actual_right_y_start = y_start
+                    actual_right_y_end = y_start + block_size
+                else:
+                    actual_left_y_start = y_start
+                    actual_left_y_end = y_start + block_size
+                    actual_right_y_start = y_start + abs(y_offset_right2left)
+                    actual_right_y_end = actual_right_y_start + block_size
+
+                # 防止越界
+                actual_left_y_end = min(actual_left_y_end, left_hsv.shape[0])
+                actual_right_y_end = min(actual_right_y_end, right_hsv.shape[0])
+
+                # 调整左图HSV
+                # 色调H通道
+                if use_hue_correct:
+                    left_h_channel = left_hsv[actual_left_y_start:actual_left_y_end,
+                                     actual_left_x_start:actual_left_x_end,
+                                     0]
+                    left_h_channel = (left_h_channel + weighted_left_h_diff) % 180
+                    left_hsv[actual_left_y_start:actual_left_y_end, actual_left_x_start:actual_left_x_end,
+                    0] = left_h_channel
+
+                # 饱和度S通道
+                if use_saturation_correct:
+                    left_hsv[actual_left_y_start:actual_left_y_end, actual_left_x_start:actual_left_x_end, 1] = np.clip(
+                        left_hsv[actual_left_y_start:actual_left_y_end, actual_left_x_start:actual_left_x_end,
+                        1] + weighted_left_s_diff,
+                        0, 255
+                    )
+
+                # 亮度V通道
+                left_hsv[actual_left_y_start:actual_left_y_end, actual_left_x_start:actual_left_x_end, 2] = np.clip(
+                    left_hsv[actual_left_y_start:actual_left_y_end, actual_left_x_start:actual_left_x_end,
+                    2] + weighted_left_v_diff,
+                    0, 255
+                )
+
+                # 调整右图HSV
+                # 色调H通道
+                if use_hue_correct:
+                    right_h_channel = right_hsv[actual_right_y_start:actual_right_y_end,
+                                      actual_right_x_start:actual_right_x_end, 0]
+                    right_h_channel = (right_h_channel + weighted_right_h_diff) % 180
+                    right_hsv[actual_right_y_start:actual_right_y_end, actual_right_x_start:actual_right_x_end,
+                    0] = right_h_channel
+
+                # 饱和度S通道
+                if use_saturation_correct:
+                    right_hsv[actual_right_y_start:actual_right_y_end, actual_right_x_start:actual_right_x_end,
+                    1] = np.clip(
+                        right_hsv[actual_right_y_start:actual_right_y_end, actual_right_x_start:actual_right_x_end,
+                        1] + weighted_right_s_diff,
+                        0, 255
+                    )
+
+                # 亮度V通道
+                right_hsv[actual_right_y_start:actual_right_y_end, actual_right_x_start:actual_right_x_end,
+                2] = np.clip(
+                    right_hsv[actual_right_y_start:actual_right_y_end, actual_right_x_start:actual_right_x_end,
+                    2] + weighted_right_v_diff,
+                    0, 255
+                )
+
+        # 转换回BGR
+        adjusted_left_img = cv2.cvtColor(left_hsv, cv2.COLOR_HSV2BGR)
+        adjusted_right_img = cv2.cvtColor(right_hsv, cv2.COLOR_HSV2BGR)
+
+        if self.debug:
+            self.save_debug_image(adjusted_left_img, 'light_compensation_320_result_left')
+            self.save_debug_image(adjusted_right_img, 'light_compensation_320_result_right')
+
+            # 可视化调整前后的差异
+            left_diff = cv2.absdiff(left_img, adjusted_left_img)
+            right_diff = cv2.absdiff(right_img, adjusted_right_img)
+            self.save_debug_image(left_diff, 'light_compensation_330_left_diff')
+            self.save_debug_image(right_diff, 'light_compensation_330_right_diff')
+
+        return adjusted_left_img, adjusted_right_img
+
+    def blend_half_importance_partial_brightness_add_weight(self, left_img: np.ndarray, right_img: np.ndarray,
+                                                            stitch_img_width: int, stitch_img_height: int,
+                                                            y_offset_right2left: int, real_overlap_width: int,
+                                                            light_uniformity_compensation=False,
+                                                            light_uniformity_compensation_width=15,
+                                                            add_weight_rate=0.1,
+                                                            visualize=False):
+        """"左右一样重要性的拼接方式"""
+
+        real_overlap_width_half = int(real_overlap_width / 2)
+        # 05、实现简单拼接逻辑
+        stitch_init_img = np.zeros((stitch_img_height, stitch_img_width, 3), dtype=np.uint8)
+
+        right_img_start_x = stitch_img_width - right_img.shape[1] + real_overlap_width_half
+
+        # 如果进行图像补偿
+        if light_uniformity_compensation:
+            stitch_x_in_left = left_img.shape[1] - real_overlap_width_half
+            stitch_x_in_right = real_overlap_width_half
+            adjusted_left_img, adjusted_right_img = self.adjust_partial_brightness_for_stitching(
+                left_img, right_img,
+                real_overlap_width=real_overlap_width,
+                y_offset_right2left=y_offset_right2left,
+                block_size=light_uniformity_compensation_width)
+
+            if self.debug:
+                self.save_debug_image(adjusted_left_img, 'light_compensation_720_left_img_adjusted')
+                self.save_debug_image(adjusted_right_img, 'light_compensation_720_right_img_adjusted')
+            left_img = adjusted_left_img
+            right_img = adjusted_right_img
+            pass
+
+        # 左图直接放进去
+        # stitch_init_img[:left_img.shape[0], :left_img.shape[1]] = left_img
+        #
+        # # 右图纠正位置之后放进去
+        # if y_offset_right2left > 0:
+        #     stitch_init_img[y_offset_right2left:, right_img_start_x:] = \
+        #         right_img[:right_img.shape[0] - y_offset_right2left,real_overlap_width_half:]
+        # else:
+        #     stitch_init_img[:right_img.shape[0] + y_offset_right2left, right_img_start_x:] = \
+        #         right_img[abs(y_offset_right2left):,real_overlap_width_half:]
+
+        stitch_init_img = self.blend_half_importance_add_weight(
+            left_img=left_img,
+            right_img=right_img,
+            stitch_img_width=stitch_img_width,
+            stitch_img_height=stitch_img_height,
+            y_offset_right2left=y_offset_right2left,
+            real_overlap_width=real_overlap_width,
+            blend_ratio=add_weight_rate
+        )
+
+        # 可视化操作部分
+        if visualize:
+            green = (0, 255, 0)
+            red = (0, 0, 255)
+            blue = (0, 0, 255)
+            color3 = (255, 255, 0)
+            cv2.rectangle(stitch_init_img, (0, 0), (left_img.shape[1], left_img.shape[0]), color=green)  # 左上图
+
+            cv2.rectangle(stitch_init_img, (0, 0), (left_img.shape[1] - real_overlap_width, left_img.shape[0]),
+                          color=color3)
+
+            # 右下图
+            if y_offset_right2left > 0:
+                cv2.rectangle(stitch_init_img, (right_img_start_x, 0),
+                              (stitch_init_img.shape[1], stitch_init_img.shape[0]), color=red)
+                pass
+            else:
+                cv2.rectangle(stitch_init_img, (right_img_start_x, 0),
+                              (stitch_init_img.shape[1], stitch_init_img.shape[0]), color=blue)
+                pass
+
+        # return result
+        return stitch_init_img
+
+    def blend_half_importance_global_brightness(self, left_img: np.ndarray, right_img: np.ndarray,
+                                                stitch_img_width: int, stitch_img_height: int,
+                                                y_offset_right2left: int, real_overlap_width: int,
+                                                light_uniformity_compensation=False,
+                                                light_uniformity_compensation_width=15,
+                                                visualize=False):
+        """"左右一样重要性的拼接方式"""
+        # self.debug_dir = f"{self.init_debug}_{self.stitch_type}_half_importance"
+        # os.makedirs(self.debug_dir, exist_ok=True)
+        # if self.debug:
+        #     if data_center_algo_inner_signals_obj is not None:
+        #         data_center_algo_inner_signals_obj.log_info_signal.emit("警告",f"混合模式:左右半拼模式")
+
+        real_overlap_width_half = int(real_overlap_width / 2)
+        # 05、实现简单拼接逻辑
+        stitch_init_img = np.zeros((stitch_img_height, stitch_img_width, 3), dtype=np.uint8)
+        # 左图直接放进去
+        stitch_init_img[:left_img.shape[0], :left_img.shape[1]] = left_img
+
+        # 右图纠正位置之后放进去
+        right_img_start_x = stitch_img_width - right_img.shape[1] + real_overlap_width_half
+
+        # 如果进行图像补偿
+        if light_uniformity_compensation:
+            stitch_x_in_left = left_img.shape[1] - real_overlap_width_half
+            stitch_x_in_right = real_overlap_width_half
+            adjusted_right_img = self.adjust_global_brightness_for_stitching(left_img, right_img,
+                                                                             stitch_x_in_left=stitch_x_in_left,
+                                                                             stitch_x_in_right=stitch_x_in_right,
+                                                                             half_test_width=light_uniformity_compensation_width)
+            if self.debug:
+                self.save_debug_image(right_img, 'light_compensation_820_right_img_origin')
+                self.save_debug_image(adjusted_right_img, 'light_compensation_820_right_img_adjusted')
+            right_img = adjusted_right_img
+            pass
+
+        if y_offset_right2left > 0:
+            stitch_init_img[y_offset_right2left:, right_img_start_x:] = \
+                right_img[:right_img.shape[0] - y_offset_right2left, real_overlap_width_half:]
+        else:
+            stitch_init_img[:right_img.shape[0] + y_offset_right2left, right_img_start_x:] = \
+                right_img[abs(y_offset_right2left):, real_overlap_width_half:]
+
+        # 可视化操作部分
+        if visualize:
+            green = (0, 255, 0)
+            red = (0, 0, 255)
+            blue = (0, 0, 255)
+            color3 = (255, 255, 0)
+            cv2.rectangle(stitch_init_img, (0, 0), (left_img.shape[1], left_img.shape[0]), color=green)  # 左上图
+
+            cv2.rectangle(stitch_init_img, (0, 0), (left_img.shape[1] - real_overlap_width, left_img.shape[0]),
+                          color=color3)
+
+            # 右下图
+            if y_offset_right2left > 0:
+                cv2.rectangle(stitch_init_img, (right_img_start_x, 0),
+                              (stitch_init_img.shape[1], stitch_init_img.shape[0]), color=red)
+                pass
+            else:
+                cv2.rectangle(stitch_init_img, (right_img_start_x, 0),
+                              (stitch_init_img.shape[1], stitch_init_img.shape[0]), color=blue)
+                pass
+
+        # return result
+        return stitch_init_img
+
+    def adjust_global_brightness_for_stitching(self, left_img, right_img, stitch_x_in_left, stitch_x_in_right,
+                                               half_test_width=15):
+        """
+        调整拼接处的亮度差异
+
+        参数:
+        left_img: 左图像 (numpy array)
+        right_img: 右图像 (numpy array)
+        stitch_x: 拼接位置的x坐标
+        test_width: 测试区域宽度,默认50像素
+
+        返回:
+        调整后的右图像
+        """
+        # 获取测试区域
+
+        # 去掉上下 10%
+
+        assert left_img.shape[0] == right_img.shape[0], "左右图尺寸的高必须一样"
+
+        img_height = left_img.shape[0]
+        img_width = left_img.shape[1]
+        # img_height_ignore = img_height//7
+        # _250609_1106_ 这里是高方向不参与计算的尺寸大小
+        img_height_ignore = img_height // 6
+
+        # stitch_x_in_right = img_width - stitch_x_in_left
+
+        # left_test = left_img[img_height_ignore:-img_height_ignore, stitch_x_in_left-test_width:stitch_x_in_left + test_width]
+        # right_test = right_img[img_height_ignore:-img_height_ignore, stitch_x_in_right-test_width:stitch_x_in_right + test_width]
+
+        # _250609_1106_  对于左图,宽方向只取了 half_test_width 的宽度
+        left_test = left_img[img_height_ignore:-img_height_ignore, stitch_x_in_left - half_test_width:stitch_x_in_left]
+        # _250609_1107_ 对于右图,宽方向也只取了 half_test_width 的宽度
+        right_test = right_img[img_height_ignore:-img_height_ignore,
+                     stitch_x_in_right:stitch_x_in_right + half_test_width]
+
+        if self.debug:
+            self.save_debug_image(left_test, 'light_compensation_140_left_test')
+            self.save_debug_image(right_test, 'light_compensation_140_right_test')
+
+        # 转换为HSV颜色空间
+        left_hsv = cv2.cvtColor(left_test, cv2.COLOR_BGR2HSV)
+        right_hsv = cv2.cvtColor(right_test, cv2.COLOR_BGR2HSV)
+
+        # _250609_1117_亮度差异是在中间的一小部分上面的
+
+        # 计算V通道(亮度)的平均差值
+        left_v = left_hsv[:, :, 2].mean()
+        right_v = right_hsv[:, :, 2].mean()
+        v_diff = left_v - right_v
+
+        # 创建右图像的副本
+        adjusted_right = right_img.copy()
+
+        # 计算需要调整的区域宽度(右图像左侧1/3)
+        adjust_width = right_img.shape[1] // 3
+
+        # 创建渐变权重矩阵
+        gradient = np.linspace(1, 0, adjust_width)
+        gradient = np.tile(gradient, (right_img.shape[0], 1))
+
+        # 转换右图像为HSV
+        right_img_hsv = cv2.cvtColor(adjusted_right, cv2.COLOR_BGR2HSV)
+
+        # 调整V通道
+        v_adjustment = gradient * v_diff
+        # _250609_1119_ 只调整右图的左边部分
+        right_img_hsv[:, :adjust_width, 2] = np.clip(
+            right_img_hsv[:, :adjust_width, 2] + v_adjustment, 0, 255
+        )
+
+        # 转换回BGR
+        adjusted_right = cv2.cvtColor(right_img_hsv, cv2.COLOR_HSV2BGR)
+
+        if self.debug:
+            # 创建一张图片,用来显示各种信息
+            info_dict = {
+                'half_test_width': half_test_width,
+                'left_v': left_v,
+                'right_v': right_v,
+                'v_diff': v_diff,
+                'adjust_width': adjust_width,
+            }
+
+            # 创建实例
+            fry_image_write = FryImageWrite()  # 使用更大的尺寸以获得更好的效果
+
+            # 一行代码创建完整报告
+            fry_image_write.create_report("局部亮度调整", info_dict)
+
+            self.save_debug_image(fry_image_write.get_image(), 'light_compensation_160_detail_info')
+
+        return adjusted_right
+
+    def blend_half_importance_add_weight(self, left_img: np.ndarray, right_img: np.ndarray,
+                                         stitch_img_width: int, stitch_img_height: int,
+                                         y_offset_right2left: int, real_overlap_width: int,
+                                         blend_ratio: float = 0.3):
+        """重叠区域使用渐变权重的拼接方式
+        1. 先用 blend_half_importance 得到基础拼接结果
+        2. 提取左右图像的重叠区域
+        3. 对重叠区域进行权重融合
+        4. 将融合后的重叠区域覆盖到基础拼接结果上
+        """
+
+        inner_debug_model = False
+
+        # if self.debug:
+        #     log_info_signal_function("警告",f"混合模式:左右权重融合模式")
+
+        # if self.debug:
+        #     log_info_signal_function("信息",f"blend_ratio 的值为:{blend_ratio}")
+
+        if blend_ratio > 1:
+            blend_ratio = 1
+        if blend_ratio < 0.03:
+            blend_ratio = 0.03
+
+        if self.debug:
+            print("信息", f"blend_ratio 的值处理后为:{blend_ratio}")
+
+        # 1. 获取基础拼接结果
+        base_result = self.blend_half_importance(left_img, right_img,
+                                                 stitch_img_width, stitch_img_height,
+                                                 y_offset_right2left, real_overlap_width)
+
+        if self.debug:
+            self.save_debug_image(base_result, 'b_410_base_result_before_weight_blend')
+
+        # 2. 获取重叠区域
+        left_width = left_img.shape[1]
+        left_overlap = left_img[:, left_img.shape[1] - real_overlap_width:left_img.shape[1]]
+        right_overlap = right_img[:, :real_overlap_width]
+
+        if self.debug:
+            self.save_debug_image(left_overlap, 'b_422_left_overlap_region')
+            self.save_debug_image(right_overlap, 'b_424_right_overlap_region')
+
+        # 4. 根据y偏移计算有效的融合区域
+        correct_right_overlap = np.zeros((stitch_img_height, real_overlap_width, 3), dtype=np.uint8)
+        if y_offset_right2left > 0:
+            correct_right_overlap[y_offset_right2left:, :] = \
+                right_overlap[:right_overlap.shape[0] - y_offset_right2left, :]
+        else:
+            correct_right_overlap[:right_img.shape[0] + y_offset_right2left, :] = \
+                right_overlap[abs(y_offset_right2left):, :]
+
+        if self.debug:
+            self.save_debug_image(correct_right_overlap, 'b_432_correct_right_overlap')
+            self.save_debug_image(left_overlap, 'b_434_left_overlap_region')
+
+        # 获取左图和右图真实融合区域
+        real_blend_width = int(round(real_overlap_width * blend_ratio))
+        real_blend_start_x = int(round((real_overlap_width - real_blend_width) / 2))
+        left_overlap_blend_area = left_overlap[:, real_blend_start_x:real_blend_start_x + real_blend_width]
+        right_overlap_blend_area = correct_right_overlap[:, real_blend_start_x:real_blend_start_x + real_blend_width]
+
+        if self.debug:
+            self.save_debug_image(left_overlap_blend_area, 'b_442_left_overlap_blend_area')
+            self.save_debug_image(right_overlap_blend_area, 'b_444_right_overlap_blend_area')
+
+        # 3. 创建权重矩阵
+
+        weights = np.linspace(1, 0, real_blend_width)
+        weights = weights.reshape(1, -1, 1)  # 适应图像的维度 (1, width, 1)
+
+        # 5. 执行权重融合
+        blended_overlap = np.round(left_overlap_blend_area * weights + right_overlap_blend_area * (1 - weights)).astype(
+            np.uint8)
+
+        if self.debug:
+            self.save_debug_image(blended_overlap, 'b_446_blended_overlap_region')
+
+        # 把混合区域放回原重叠区域
+        final_blended_overlap = left_overlap.copy()
+        final_blended_overlap[:, real_blend_start_x:real_blend_start_x + real_blend_width] = blended_overlap[:, :]
+        final_blended_overlap[:, real_blend_start_x + real_blend_width:] = correct_right_overlap[:,
+                                                                           real_blend_start_x + real_blend_width:]
+
+        if self.debug:
+            self.save_debug_image(blended_overlap, 'b_448_final_blended_overlap')
+
+        # 6. 将融合结果放回原图
+        # 计算重叠区域在结果图中的位置
+        blended_overlap_width = final_blended_overlap.shape[1]
+        base_result[:, left_width - blended_overlap_width:left_width] = final_blended_overlap[:, :]
+
+        if self.debug:
+            self.save_debug_image(base_result, 'b_450_final_result_with_weight_blend')
+
+            # 保存调试信息
+            with open(os.path.join(self.debug_dir, 'b_520_weight_blend_info.txt'), 'w') as f:
+                f.write(f"blend_ratio: {blend_ratio}\n")
+                f.write(f"Overlap width: {real_overlap_width}\n")
+                f.write(f"real_blend_width: {real_blend_width}\n")
+                f.write(f"Y offset: {y_offset_right2left}\n")
+                f.write(f"Left overlap shape: {left_overlap.shape}\n")
+                f.write(f"Right overlap shape: {right_overlap.shape}\n")
+                f.write(f"Blended overlap shape: {blended_overlap.shape}\n")
+                f.write(f"real_blend_width: {real_blend_width}\n")
+                f.write(f"real_blend_start_x: {real_blend_start_x}\n")
+
+        return base_result

+ 392 - 0
fry_project_classes/fry_image_write_V03_250401.py

@@ -0,0 +1,392 @@
+import cv2
+import numpy as np
+from datetime import datetime
+from PIL import Image, ImageDraw, ImageFont
+from typing import Tuple, Union, Dict
+import re
+
+
+class FryImageWrite:
+    """
+    图片创建和文字添加类
+    支持英文和中文文字添加,以及基本的图形绘制
+    """
+
+    def __init__(self, width: int = 1920, height: int = 1080,
+                 background_color: Tuple[int, int, int] = (255, 255, 255)):
+        """
+        初始化图片画布
+        
+        参数:
+        width: 图片宽度
+        height: 图片高度
+        background_color: 背景颜色,BGR格式
+        """
+        self.width = width
+        self.height = height
+        self.image = np.full((height, width, 3), background_color, dtype=np.uint8)
+
+    def add_text(self, text: str, position: Tuple[int, int],
+                 font_size: int = 32, color: Tuple[int, int, int] = (0, 0, 0),
+                 font_path: str = "simhei.ttf", thickness: int = 2) -> None:
+        """
+        统一的文字添加方法,自动判断中英文并对齐文字基线
+        
+        参数:
+        text: 文本内容
+        position: 文本位置 (x, y)
+        font_size: 字体大小
+        color: 字体颜色 (B, G, R)
+        font_path: 中文字体文件路径
+        thickness: 英文字体粗细
+        """
+        x, y = position
+
+        if self.is_chinese(text):
+            # 中文文字处理
+            try:
+                # 创建临时PIL Image来计算文字尺寸
+                font = ImageFont.truetype(font_path, font_size)
+                # 获取文字的尺寸信息
+                bbox = font.getbbox(text)
+                # 调整y坐标,使文字基线对齐
+                adjusted_y = y + bbox[3] - font_size
+                self.add_text_cn(text, (x, y), font_size, color, font_path)
+            except Exception as e:
+                print(f"字体加载失败: {e}")
+                self.add_text_cn(text, (x, y), font_size, color, font_path)
+        else:
+            # 英文文字处理
+            font_scale = font_size / 32
+            # OpenCV文字基准点在左下角,需要向上偏移一个字体高度
+            # 估算字体高度并调整y坐标
+            text_size = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX,
+                                        font_scale, thickness)[0]
+            adjusted_y = y + text_size[1]  # 加上文字高度使基线对齐
+            self.add_text_en(text, (x, adjusted_y), font_scale, color, thickness)
+
+    def add_text_cn(self, text: str, position: Tuple[int, int],
+                    font_size: int = 32, color: Tuple[int, int, int] = (0, 0, 0),
+                    font_path: str = "simhei.ttf") -> None:
+        """
+        添加中文文本
+        
+        参数:
+        text: 文本内容
+        position: 文本位置 (x, y)
+        font_size: 字体大小
+        color: 字体颜色 (B, G, R)
+        font_path: 字体文件路径
+        """
+        img_pil = Image.fromarray(cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB))
+        draw = ImageDraw.Draw(img_pil)
+
+        try:
+            font = ImageFont.truetype(font_path, font_size)
+        except Exception as e:
+            print(f"字体加载失败: {e}")
+            font = ImageFont.load_default()
+
+        color_rgb = (color[2], color[1], color[0])
+
+        # 获取文字的bbox
+        bbox = font.getbbox(text)
+        # 计算文字的垂直中心位置
+        text_height = bbox[3] - bbox[1]
+        y_offset = text_height // 2
+        x, y = position
+        adjusted_position = (x, y - y_offset)
+
+        draw.text(adjusted_position, text, font=font, fill=color_rgb)
+
+        self.image = cv2.cvtColor(np.array(img_pil), cv2.COLOR_RGB2BGR)
+
+    def add_text_en(self, text: str, position: Tuple[int, int],
+                    font_scale: float = 1.0, color: Tuple[int, int, int] = (0, 0, 0),
+                    thickness: int = 2, font_face: int = cv2.FONT_HERSHEY_SIMPLEX) -> None:
+        """
+        添加英文文本
+        """
+        # 获取文字大小
+        text_size = cv2.getTextSize(text, font_face, font_scale, thickness)[0]
+        x, y = position
+
+        # 确保x和y是整数
+        x = int(x)
+        y = int(y)
+
+        # 调整y坐标,使文字垂直居中
+        adjusted_y = y - text_size[1] // 2
+
+        # 确保adjusted_y也是整数
+        adjusted_y = int(adjusted_y)
+
+        # 使用整数坐标调用putText
+        cv2.putText(self.image, text, (x, adjusted_y), font_face, font_scale,
+                    color, thickness, cv2.LINE_AA)
+
+    def is_chinese(self, text: str) -> bool:
+        """
+        判断字符串是否包含中文
+        
+        参数:
+        text: 需要判断的文本
+        
+        返回:
+        bool: 是否包含中文
+        """
+        pattern = re.compile(r'[\u4e00-\u9fff]')
+        return bool(pattern.search(text))
+
+    def add_dict_info(self, info_dict: Dict[str, str],
+                      start_position: Tuple[int, int] = (50, 50),
+                      line_spacing: int = 30,
+                      label_value_spacing: int = 150,
+                      font_size: int = 24,
+                      label_color: Tuple[int, int, int] = (0, 0, 0),
+                      value_color_map: Dict[str, Tuple[int, int, int]] = None) -> None:
+        """
+        添加字典信息到图片
+        
+        参数:
+        info_dict: 信息字典,键为标签,值为内容
+        start_position: 起始位置 (x, y)
+        line_spacing: 行间距
+        label_value_spacing: 标签和值之间的间距
+        font_size: 字体大小
+        label_color: 标签颜色
+        value_color_map: 值的颜色映射字典,比如 {"PASS": (0, 255, 0), "FAIL": (0, 0, 255)}
+        """
+        if value_color_map is None:
+            value_color_map = {}
+
+        x, y = start_position
+
+        for idx, (key, value) in enumerate(info_dict.items()):
+            current_y = y + idx * line_spacing
+
+            # 添加标签
+            self.add_text(f"{key}:", (x, current_y), font_size, label_color)
+
+            # 确定值的颜色
+            value_color = value_color_map.get(str(value), label_color)
+
+            # 添加值
+            self.add_text(str(value), (x + label_value_spacing, current_y),
+                          font_size, value_color)
+
+    def add_rectangle(self, start_point: Tuple[int, int],
+                      end_point: Tuple[int, int],
+                      color: Tuple[int, int, int] = (0, 0, 0),
+                      thickness: int = 2) -> None:
+        """
+        添加矩形
+        
+        参数:
+        start_point: 起始点 (x, y)
+        end_point: 结束点 (x, y)
+        color: 颜色 (B, G, R)
+        thickness: 线条粗细,-1表示填充
+        """
+        cv2.rectangle(self.image, start_point, end_point, color, thickness)
+
+    def add_line(self, start_point: Tuple[int, int],
+                 end_point: Tuple[int, int],
+                 color: Tuple[int, int, int] = (0, 0, 0),
+                 thickness: int = 2) -> None:
+        """
+        添加直线
+        
+        参数:
+        start_point: 起始点 (x, y)
+        end_point: 结束点 (x, y)
+        color: 颜色 (B, G, R)
+        thickness: 线条粗细
+        """
+        cv2.line(self.image, start_point, end_point, color, thickness)
+
+    def save_image(self, file_path: str) -> bool:
+        """
+        保存图片
+        
+        参数:
+        file_path: 保存路径
+        
+        返回:
+        bool: 是否保存成功
+        """
+        try:
+            cv2.imwrite(file_path, self.image)
+            return True
+        except Exception as e:
+            print(f"保存图片失败: {e}")
+            return False
+
+    def get_image(self) -> np.ndarray:
+        """
+        获取图片数组
+        
+        返回:
+        numpy.ndarray: 图片数组
+        """
+        return self.image.copy()
+
+    def create_report(self, title: str, info_dict: Dict[str, str],
+                      value_color_map: Dict[str, Tuple[int, int, int]] = None) -> None:
+        """
+        创建一个标准格式的报告
+        
+        参数:
+        title: 报告标题
+        info_dict: 信息字典,键为标签,值为内容
+        value_color_map: 值的颜色映射字典,比如 {"PASS": (0, 255, 0), "FAIL": (0, 0, 255)}
+        """
+        # 设置默认的颜色映射
+        if value_color_map is None:
+            value_color_map = {
+                "PASS": (0, 255, 0),  # 绿色
+                "FAIL": (0, 0, 255),  # 红色
+                "Good": (0, 255, 0),  # 绿色
+                "Bad": (0, 0, 255)  # 红色
+            }
+
+        # 计算边距和间距
+        margin = min(self.width, self.height) // 20  # 动态边距
+        content_width = self.width - 2 * margin
+        content_height = self.height - 2 * margin
+
+        # 计算标题大小和位置
+        title_font_size = min(content_width // len(title) if len(title) > 0 else content_width,
+                              content_height // 8)
+        title_font_size = min(72, max(36, title_font_size))  # 限制标题字体大小范围
+
+        # 标题位置居中
+        title_x = self.width // 2
+        title_y = margin + title_font_size
+
+        # 获取标题的大小来调整内容区域
+        if self.is_chinese(title):
+            font = ImageFont.truetype("simhei.ttf", title_font_size)
+            title_width = font.getbbox(title)[2]
+        else:
+            title_width = cv2.getTextSize(title, cv2.FONT_HERSHEY_SIMPLEX,
+                                          title_font_size / 32, 2)[0][0]
+
+        # 添加标题(居中)
+        self.add_text(title, (title_x - title_width // 2, title_y),
+                      title_font_size, (0, 0, 0))
+
+        # 计算内容区域的位置和大小
+        content_start_y = title_y + title_font_size + margin
+        content_area_height = self.height - content_start_y - margin
+
+        # 计算内容的字体大小和间距
+        item_count = len(info_dict)
+        font_size = min(32, max(18, int(content_area_height / (item_count * 2))))
+        line_spacing = max(font_size * 1.8, min(content_area_height / item_count, 50))
+
+        # 计算最长标签的宽度来决定label_value_spacing
+        max_label_len = max(len(str(key)) for key in info_dict.keys())
+        label_value_spacing = max(150, max_label_len * font_size // 2)
+
+        # 添加边框
+        border_margin = margin // 2
+        self.add_rectangle(
+            (border_margin, border_margin),
+            (self.width - border_margin, self.height - border_margin),
+            (0, 0, 0), 2
+        )
+
+        # 添加内容区域的分隔线
+        self.add_line(
+            (border_margin, title_y + title_font_size // 2 + margin // 2),
+            (self.width - border_margin, title_y + title_font_size // 2 + margin // 2),
+            (0, 0, 0), 1
+        )
+
+        # 添加信息字典
+        self.add_dict_info(
+            info_dict,
+            start_position=(margin * 1.5, content_start_y),
+            line_spacing=int(line_spacing),
+            label_value_spacing=label_value_spacing,
+            font_size=font_size,
+            value_color_map=value_color_map
+        )
+
+
+# 测试代码
+def test_fry_image_write():
+    """
+    测试FryImageWrite类的各项功能
+    """
+    # 创建实例
+    image_writer = FryImageWrite(800, 600)
+
+    # 测试信息字典
+    info_dict = {
+        "产品型号": "ABC-123",
+        "序列号": "SN20240101001",
+        "检测时间": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
+        "测试结果": "PASS",
+        "Product": "Camera",
+        "Status": "FAIL",
+        "温度": "25摄氏度",
+        "Quality": "Good"
+    }
+
+    # 定义值的颜色映射
+    value_color_map = {
+        "PASS": (0, 255, 0),  # 绿色
+        "FAIL": (0, 0, 255),  # 红色
+        "Good": (0, 255, 0)  # 绿色
+    }
+
+    # 添加标题
+    image_writer.add_text("测试报告", (300, 30), 48, (0, 0, 0))
+    # 添加边框
+    image_writer.add_rectangle((30, 80), (770, 580), (0, 0, 0), 2)
+
+    # 添加信息字典
+    image_writer.add_dict_info(
+        info_dict,
+        start_position=(50, 100),
+        line_spacing=40,
+        label_value_spacing=200,
+        font_size=28,
+        value_color_map=value_color_map
+    )
+
+    # 保存图片
+    image_writer.save_image("test_output.jpg")
+
+
+def test_fry_image_write2():
+    """
+    测试FryImageWrite类的各项功能
+    """
+    # 创建实例
+    image_writer = FryImageWrite(1280, 800)  # 使用更大的尺寸以获得更好的效果
+
+    # 测试信息字典
+    info_dict = {
+        "产品型号": "ABC-123",
+        "序列号": "SN20240101001",
+        "检测时间": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
+        "测试结果": "PASS",
+        "Product": "Camera",
+        "Status": "FAIL",
+        "温度": "25摄氏度",
+        "Quality": "Good"
+    }
+
+    # 一行代码创建完整报告
+    image_writer.create_report("测试报告", info_dict)
+
+    # 保存图片
+    image_writer.save_image("test_output.jpg")
+
+
+# 运行测试
+if __name__ == "__main__":
+    test_fry_image_write2()

+ 630 - 0
fry_project_classes/stitch_img_key_point.py

@@ -0,0 +1,630 @@
+import sys
+import logging
+from pathlib import Path
+
+import cv2
+import numpy as np
+import os
+import time
+from typing import Optional, Tuple, List
+from dataclasses import dataclass
+from fry_project_classes.blend_type_mixin import BlendTypeMixin
+
+data_center_algo_inner_signals_obj = None
+
+
+@dataclass
+class FeatureMatchResult:
+    """特征匹配结果的数据类"""
+    keypoints1: List[cv2.KeyPoint]
+    keypoints2: List[cv2.KeyPoint]
+    matches: List[cv2.DMatch]
+    transform_matrix: Optional[np.ndarray]
+    match_score: float
+    offset_x: int
+    offset_y: int
+
+
+class ImageStitcherKeyPoint(BlendTypeMixin):
+    """基于特征点的图像拼接器"""
+
+    def __init__(self, estimate_overlap_pixels=800,
+                 center_ratio=0.8,
+                 stitch_type="vertical",
+                 blend_type='half_importance',
+                 debug=False,
+                 debug_dir='debug_output',
+                 min_matches=10,
+                 feature_detector='akaze',
+                 blend_ratio: float = 0.3,
+                 combine_detectors=False):
+        """
+        初始化拼图器
+
+        参数:
+        estimate_overlap_pixels: 预估重叠区域像素数
+        center_ratio: 中心区域比例
+        stitch_type: 拼接方式 ('vertical' 或 'horizontal')
+        blend_type: 融合方式 ('half_importance', 'right_first', 'half_importance_add_weight')
+        debug: 是否开启调试模式
+        debug_dir: 调试图片保存目录
+        min_matches: 最小匹配点数
+        feature_detector: 特征检测器类型 ('akaze', 'sift', 'orb', 'brisk', 'combine')
+        combine_detectors: 是否组合使用多个检测器
+        """
+
+        if data_center_algo_inner_signals_obj is not None:
+            print("警告", f"拼图方法:关键点")
+
+        self.estimate_overlap_pixels = estimate_overlap_pixels
+        self.estimate_non_overlap_pixels = None
+        self.center_ratio = center_ratio
+        self.blend_type = blend_type
+        self.stitch_type = stitch_type
+        self.debug = debug
+        self.init_debug = debug_dir
+        self.min_matches = min_matches
+        self.blend_ratio = blend_ratio
+
+        if self.debug:
+            self.debug_dir = f"{self.init_debug}_{self.stitch_type}_{self.blend_type}"
+            os.makedirs(self.debug_dir, exist_ok=True)
+
+        # 创建特征检测器和描述符计算器
+        self.detector = cv2.AKAZE_create()
+        # 创建特征匹配器
+        self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
+
+        # 匹配得分和变换矩阵
+        self.best_score = -1
+        self.match_score = -1
+        self.transform_matrix = None
+
+        # 预估的重叠区域大小
+        self.real_overlap_width = None
+
+        # 初始化特征检测器
+        self.feature_detector = feature_detector.lower()
+        self.combine_detectors = combine_detectors
+        self.detectors = self._init_feature_detectors()
+
+    def _init_feature_detectors(self):
+        """初始化特征检测器"""
+        detectors = {}
+
+        try:
+            # AKAZE检测器
+            detectors['akaze'] = cv2.AKAZE_create()
+
+            # SIFT检测器 (需要opencv-contrib-python)
+            detectors['sift'] = cv2.SIFT_create()
+
+            # ORB检测器
+            detectors['orb'] = cv2.ORB_create(nfeatures=2000,
+                                              scaleFactor=1.2,
+                                              nlevels=8)
+
+            # BRISK检测器
+            detectors['brisk'] = cv2.BRISK_create()
+
+        except Exception as e:
+            error_info = f"Warning: Some detectors could not be initialized: {str(e)}"
+            if data_center_algo_inner_signals_obj is not None:
+                print("警告", error_info)
+
+        if not detectors:
+            raise ValueError("No feature detectors could be initialized")
+
+        return detectors
+
+    def _get_detector_and_matcher(self, detector_name):
+        """获取特征检测器和对应的特征匹配器"""
+        if detector_name not in self.detectors:
+            raise ValueError(f"Unsupported detector: {detector_name}")
+
+        detector = self.detectors[detector_name]
+
+        # 根据检测器类型选择合适的特征匹配器
+        if detector_name in ['sift', 'surf']:
+            # L2范数更适合SIFT和SURF
+            matcher = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
+        else:
+            # 汉明距离更适合二进制描述符(AKAZE, ORB, BRISK)
+            matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
+
+        return detector, matcher
+
+    def detect_and_match_features(self, img1: np.ndarray, img2: np.ndarray) -> FeatureMatchResult:
+        """使用选定的特征检测器检测并匹配特征点"""
+        try:
+            if self.feature_detector == 'combine':
+                return self._detect_and_match_combined(img1, img2)
+            else:
+                return self._detect_and_match_single(img1, img2, self.feature_detector)
+
+        except Exception as e:
+
+            if data_center_algo_inner_signals_obj is not None:
+                print("警告",
+                                                                        f"Feature detection and matching failed: {str(e)}")
+
+            raise
+
+    def _detect_and_match_single(self, img1: np.ndarray, img2: np.ndarray,
+                                 detector_name: str) -> FeatureMatchResult:
+        """使用单个检测器进行特征检测和匹配"""
+        detector, matcher = self._get_detector_and_matcher(detector_name)
+
+        # 检测特征点和计算描述符
+        keypoints1, descriptors1 = detector.detectAndCompute(img1, None)
+        keypoints2, descriptors2 = detector.detectAndCompute(img2, None)
+
+        if self.debug:
+            # 绘制特征点
+            img1_kp = cv2.drawKeypoints(img1, keypoints1, None, (255, 0, 0))
+            img2_kp = cv2.drawKeypoints(img2, keypoints2, None, (255, 0, 0))
+            self.save_debug_image(img1_kp, f'keypoints_img1_{detector_name}')
+            self.save_debug_image(img2_kp, f'keypoints_img2_{detector_name}')
+
+        feature_match_result = self._match_features(keypoints1, keypoints2, descriptors1, descriptors2,
+                                                    matcher, detector_name, img1=img1, img2=img2)
+
+        return feature_match_result
+
+    def _detect_and_match_combined(self, img1: np.ndarray, img2: np.ndarray) -> FeatureMatchResult:
+        """组合使用多个检测器进行特征检测和匹配"""
+        all_results = []
+
+        # 对每个检测器分别进行特征检测和匹配
+        for detector_name, detector in self.detectors.items():
+            try:
+                # 使用当前检测器进行特征检测和匹配
+                matcher = self._get_detector_and_matcher(detector_name)[1]
+                kp1, desc1 = detector.detectAndCompute(img1, None)
+                kp2, desc2 = detector.detectAndCompute(img2, None)
+
+                if desc1 is not None and desc2 is not None:
+                    # 执行特征匹配
+                    matches = matcher.match(desc1, desc2)
+                    matches = sorted(matches, key=lambda x: x.distance)
+
+                    # 选择最佳匹配
+                    good_matches = matches[:min(50, len(matches))]
+
+                    if len(good_matches) >= self.min_matches:
+                        if self.debug:
+                            # 绘制当前检测器的特征点和匹配结果
+                            img1_kp = cv2.drawKeypoints(img1, kp1, None, (255, 0, 0))
+                            img2_kp = cv2.drawKeypoints(img2, kp2, None, (255, 0, 0))
+                            self.save_debug_image(img1_kp, f'match_120_keypoints_img1_{detector_name}')
+                            self.save_debug_image(img2_kp, f'match_140_keypoints_img2_{detector_name}')
+
+                            match_img = cv2.drawMatches(img1, kp1, img2, kp2, good_matches, None,
+                                                        flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
+                            self.save_debug_image(match_img, f'match_160_feature_matches_{detector_name}')
+
+                        # 计算变换矩阵
+                        src_pts = np.float32([kp1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
+                        dst_pts = np.float32([kp2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
+                        transform_matrix, mask = cv2.estimateAffinePartial2D(src_pts, dst_pts)
+
+                        # 计算匹配得分
+                        match_score = np.sum(mask) / len(mask)
+
+                        # 添加到结果列表
+                        all_results.append({
+                            'keypoints1': kp1,
+                            'keypoints2': kp2,
+                            'matches': good_matches,
+                            'transform_matrix': transform_matrix,
+                            'match_score': match_score,
+                            'detector_name': detector_name
+                        })
+
+                        if self.debug:
+                            with open(os.path.join(self.debug_dir, f'match_220_match_info_{detector_name}.txt'),
+                                      'w') as f:
+                                f.write(f"Number of keypoints in img1: {len(kp1)}\n")
+                                f.write(f"Number of keypoints in img2: {len(kp2)}\n")
+                                f.write(f"Number of matches: {len(matches)}\n")
+                                f.write(f"Number of good matches: {len(good_matches)}\n")
+                                f.write(f"Match score: {match_score}\n")
+                                if transform_matrix is not None:
+                                    f.write(f"Transform matrix:\n{transform_matrix}\n")
+
+            except Exception as e:
+
+                if data_center_algo_inner_signals_obj is not None:
+                    print("警告",
+                                                                            f"Warning: Detection failed for {detector_name}: {str(e)}")
+
+                continue
+
+        if not all_results:
+            raise ValueError("No successful feature detection and matching results")
+
+        # 选择得分最高的结果
+        best_result = max(all_results, key=lambda x: x['match_score'])
+
+        # 计算最佳结果的偏移量
+        offset_x = int(round(best_result['transform_matrix'][0, 2]))
+        offset_y = int(round(best_result['transform_matrix'][1, 2]))
+
+        # 更新类的属性
+        self.match_score = best_result['match_score']
+        self.transform_matrix = best_result['transform_matrix']
+
+        if self.debug:
+            with open(os.path.join(self.debug_dir, 'match_240_best_detector_info.txt'), 'w') as f:
+                f.write(f"Best detector: {best_result['detector_name']}\n")
+                f.write(f"Best match score: {best_result['match_score']}\n")
+
+        return FeatureMatchResult(
+            keypoints1=best_result['keypoints1'],
+            keypoints2=best_result['keypoints2'],
+            matches=best_result['matches'],
+            transform_matrix=best_result['transform_matrix'],
+            match_score=best_result['match_score'],
+            offset_x=offset_x,
+            offset_y=offset_y
+        )
+
+    def find_homography(self, kp1, kp2, good_matches):
+        """计算单应性矩阵"""
+        if len(good_matches) < 4:
+            return None, None
+
+        src_pts = np.float32([kp1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
+        dst_pts = np.float32([kp2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
+
+        H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
+
+        if self.debug:
+            # 保存匹配信息
+            with open(os.path.join(self.debug_dir, 'match_320_homography_info.txt'), 'w') as f:
+                f.write(f"Homography matrix:\n{H}\n")
+                f.write(f"Number of inliers: {np.sum(mask)}\n")
+
+        return H, mask
+
+    def _match_features(self, keypoints1, keypoints2, descriptors1, descriptors2,
+                        matcher, detector_name, img1=None, img2=None, is_overlap=True) -> FeatureMatchResult:
+        """特征点匹配的通用处理"""
+        if descriptors1 is None or descriptors2 is None:
+            raise ValueError(f"No descriptors found using {detector_name}")
+
+        # 1. 执行特征匹配
+        matches = matcher.match(descriptors1, descriptors2)
+
+        # 2. 计算距离统计
+        distances = np.array([m.distance for m in matches])
+        mean_dist = np.mean(distances)
+        std_dist = np.std(distances)
+
+        # 3. 筛选好的匹配
+        threshold = mean_dist - 0.7 * std_dist
+        good_matches = [m for m in matches if m.distance < threshold]
+        if len(good_matches) < self.min_matches:
+            good_matches = matches[:min(50, len(matches))]
+
+        if self.debug and img1 is not None and img2 is not None:
+            # 在重叠区域上显示匹配
+            overlap_match_img = cv2.drawMatches(img1, keypoints1, img2, keypoints2,
+                                                good_matches, None,
+                                                flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
+            self.save_debug_image(overlap_match_img, f'match_420_overlap_matches_{detector_name}')
+
+        # 计算变换矩阵
+        src_pts = np.float32([keypoints1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
+        dst_pts = np.float32([keypoints2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
+
+        # RANSAC
+        transform_matrix, mask = cv2.estimateAffinePartial2D(
+            src_pts, dst_pts,
+            method=cv2.RANSAC,
+            ransacReprojThreshold=3.0
+        )
+
+        # 筛选内点
+        inliers = mask.ravel() == 1
+        good_matches = [good_matches[i] for i in range(len(good_matches)) if inliers[i]]
+
+        # 使用内点重新计算变换矩阵
+        src_pts = np.float32([keypoints1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
+        dst_pts = np.float32([keypoints2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
+        transform_matrix, mask = cv2.estimateAffinePartial2D(src_pts, dst_pts)
+
+        # 计算得分和偏移
+        match_score = np.sum(mask) / len(mask)
+        offset_x = int(round(transform_matrix[0, 2]))
+        offset_y = int(round(transform_matrix[1, 2]))
+
+        # 这两个之间有16和11的位移非常好解释
+
+        if self.debug:
+            with open(os.path.join(self.debug_dir, f'match_520_match_info_{detector_name}.txt'), 'w') as f:
+                f.write(f"Transform matrix:\n{transform_matrix}\n")
+                f.write(f"Offsets: ({offset_x}, {offset_y})\n")
+                f.write(f"Match score: {match_score}\n")
+                f.write(f"Number of initial matches: {len(matches)}\n")
+                f.write(f"Number of good matches: {len(good_matches)}\n")
+
+        return FeatureMatchResult(
+            keypoints1=keypoints1,
+            keypoints2=keypoints2,
+            matches=good_matches,
+            transform_matrix=transform_matrix,
+            match_score=match_score,
+            offset_x=offset_x,
+            offset_y=offset_y
+        )
+
+    def save_debug_image(self, img, name, normalize=False):
+        """保存调试图片"""
+        try:
+            if self.debug:
+                save_path = os.path.join(self.debug_dir, f"{name}.jpg")
+                if normalize:
+                    img_normalized = cv2.normalize(img, None, 0, 255, cv2.NORM_MINMAX)
+                    cv2.imwrite(save_path, img_normalized)
+                else:
+                    cv2.imwrite(save_path, img)
+
+                if data_center_algo_inner_signals_obj is not None:
+                    print("信息",
+                                                                            f"Debug: Saved {save_path}")
+
+                return True, f"save_debug_image 成功: {save_path}"
+            else:
+                return False, "debug mode is not enabled"
+        except Exception as e:
+            msg = f"save_debug_image出现bug: {str(e)}"
+            if data_center_algo_inner_signals_obj is not None:
+                print("警告",
+                                                                        msg)
+            return False, msg
+
+    def pad_image(self, img: np.ndarray, target_width: int = None, target_height: int = None) -> np.ndarray:
+        """将图片填充到目标尺寸"""
+        if self.debug:
+            self.save_debug_image(img, 'pad_120_before_padding')
+
+        current_height, current_width = img.shape[:2]
+        target_width = target_width if target_width is not None else current_width
+        target_height = target_height if target_height is not None else current_height
+
+        if current_width == target_width and current_height == target_height:
+            return img
+
+        # 创建黑色背景
+        padded_img = np.zeros((target_height, target_width, 3), dtype=np.uint8)
+
+        # 将原图放在中心位置
+        y_offset = (target_height - current_height) // 2
+        x_offset = (target_width - current_width) // 2
+
+        padded_img[y_offset:y_offset + current_height,
+        x_offset:x_offset + current_width] = img
+
+        if self.debug:
+            self.save_debug_image(padded_img, 'pad_320_after_padding')
+
+        return padded_img
+
+    def split_image(self, img, is_left_top=True):
+        """分割图片为重叠区域和非重叠区域"""
+        height, width = img.shape[:2]
+        overlap_width = min(self.estimate_overlap_pixels, width // 2)
+        non_overlap_width = width - overlap_width
+
+        if is_left_top:
+            non_overlap_region = img[:, :non_overlap_width]
+            overlap_region = img[:, non_overlap_width:]
+            if self.debug:
+                self.save_debug_image(non_overlap_region, 'sp_120_left_top_non_overlap')
+                self.save_debug_image(overlap_region, 'sp_140_left_top_overlap')
+        else:
+            overlap_region = img[:, :overlap_width]
+            non_overlap_region = img[:, overlap_width:]
+            if self.debug:
+                self.save_debug_image(overlap_region, 'sp_220_right_bottom_overlap')
+                self.save_debug_image(non_overlap_region, 'sp_240_right_bottom_non_overlap')
+
+        return overlap_region, non_overlap_region
+
+    def stitch_horizontal(self, left_img: np.ndarray, right_img: np.ndarray) -> np.ndarray:
+        """水平拼接两张图片"""
+
+        height, width = left_img.shape[:2]
+        left_height, left_width = left_img.shape[:2]
+
+        overlap_width = min(self.estimate_overlap_pixels, width // 2)
+        non_overlap_width = width - overlap_width
+
+        # 1. 分割重叠区域
+        left_overlap, left_non_overlap = self.split_image(left_img, is_left_top=True)
+        right_overlap, right_non_overlap = self.split_image(right_img, is_left_top=False)
+
+        if self.debug:
+            # 保存重叠区域的图像,用于调试
+            self.save_debug_image(left_overlap, 'h_120_left_overlap_region')
+            self.save_debug_image(right_overlap, 'h_140_right_overlap_region')
+
+        # 2. 特征检测和匹配(只在重叠区域进行)
+        match_result = self.detect_and_match_features(left_overlap, right_overlap)
+        offset_x = match_result.offset_x  # -16
+        offset_y = match_result.offset_y  # -11
+        match_score = match_result.match_score
+
+        # 匹配点就是右图最左上点的那个点
+        # 右图0,0对应的是左图的 16,11
+        # 所以重叠区域为
+        self.real_overlap_width = overlap_width + offset_x  # 445,用模板匹配算的值也是445
+        real_overlap_width = self.real_overlap_width
+
+        # 计算右图相对于左图的y方向的偏移量
+        y_offset_right2left = offset_y * (-1)
+
+        # # 计算最终图像尺寸
+        stitch_img_width = left_img.shape[1] + right_img.shape[1] - self.real_overlap_width
+        stitch_img_height = max(left_img.shape[0], right_img.shape[0])
+
+        if self.debug:
+            with open(os.path.join(self.debug_dir, 'h_320_alignment_info.txt'), 'w') as f:
+                f.write(f"match_result.offset_x: {match_result.offset_x}\n")
+                f.write(f"match_result.offset_y: {match_result.offset_y}\n")
+                f.write(f"match_score: {match_score}\n")
+                f.write(f"real_overlap_width: {self.real_overlap_width}\n")
+                f.write(f"y_offset_right2left: {y_offset_right2left}\n")
+                f.write(f"stitch_img_width: {stitch_img_width}\n")
+                f.write(f"stitch_img_height: {stitch_img_height}\n")
+
+        if self.blend_type == 'half_importance':
+            blend_stitch_img = self.blend_half_importance(left_img, right_img, stitch_img_width, stitch_img_height,
+                                                          y_offset_right2left, real_overlap_width)
+        elif self.blend_type == 'right_first':
+            # 右边优先的拼接方式
+            blend_stitch_img = self.blend_right_first(left_img, right_img, stitch_img_width, stitch_img_height,
+                                                      y_offset_right2left)
+        elif self.blend_type == 'left_first':
+            blend_stitch_img = self.blend_left_first(left_img, right_img,
+                                                     stitch_img_width,
+                                                     stitch_img_height,
+                                                     y_offset_right2left,
+                                                     real_overlap_width)
+        elif self.blend_type == 'half_importance_add_weight':
+            blend_stitch_img = self.blend_half_importance_add_weight(left_img, right_img,
+                                                                     stitch_img_width,
+                                                                     stitch_img_height,
+                                                                     y_offset_right2left,
+                                                                     real_overlap_width,
+                                                                     blend_ratio=self.blend_ratio)
+        else:
+            # 左边优先的拼接方式
+            blend_stitch_img = None
+
+        if self.debug:
+            self.save_debug_image(blend_stitch_img, 'h_520_horizontal_stitch_img')
+
+        return blend_stitch_img
+
+    def stitch_vertical(self, top_img: np.ndarray, bottom_img: np.ndarray) -> np.ndarray:
+        """垂直拼接两张图片"""
+        try:
+            # 将图片旋转后调用水平拼接
+            top_rotated = cv2.rotate(top_img, cv2.ROTATE_90_COUNTERCLOCKWISE)
+            bottom_rotated = cv2.rotate(bottom_img, cv2.ROTATE_90_COUNTERCLOCKWISE)
+
+            if self.debug:
+                self.save_debug_image(top_rotated, 'v_120_top_rotated')
+                self.save_debug_image(bottom_rotated, 'v_140_bottom_rotated')
+
+            result_rotated = self.stitch_horizontal(top_rotated, bottom_rotated)
+
+            # 将结果旋转回来
+            result = cv2.rotate(result_rotated, cv2.ROTATE_90_CLOCKWISE)
+
+            if self.debug:
+                self.save_debug_image(result, 'v_520_final_result_vertical')
+
+            return result
+
+        except Exception as e:
+            error_info = f"Vertical stitching failed: {str(e)}"
+            if data_center_algo_inner_signals_obj is not None:
+                print("警告", error_info)
+
+            raise
+
+    def stitch_main(self, img1: np.ndarray, img2: np.ndarray) -> Tuple[np.ndarray, float]:
+        """主拼接方法"""
+        try:
+            # 根据拼接类型选择不同的拼接方式
+            if self.stitch_type == 'horizontal':
+                # 确保两张图片高度相同
+                max_height = max(img1.shape[0], img2.shape[0])
+                img1 = self.pad_image(img1, target_height=max_height)
+                img2 = self.pad_image(img2, target_height=max_height)
+                result = self.stitch_horizontal(img1, img2)
+            else:  # vertical
+                # 确保两张图片宽度相同
+                max_width = max(img1.shape[1], img2.shape[1])
+                img1 = self.pad_image(img1, target_width=max_width)
+                img2 = self.pad_image(img2, target_width=max_width)
+                result = self.stitch_vertical(img1, img2)
+            self.best_score = self.match_score
+            return result
+
+        except Exception as e:
+            error_info = f"Image stitching failed: {str(e)}"
+
+            if data_center_algo_inner_signals_obj is not None:
+                print("警告", error_info)
+            raise
+
+
+# 测试代码
+if __name__ == '__main__':
+    pass
+    # 设置调试目录和重叠区域估计
+    timestamp = time.strftime("%Y%m%d_%H%M%S")
+    root_path = r"\_250115_Stitch_Image_TemplateMatch\test_images"
+    root_path_obj = Path(root_path).absolute()
+
+    stitch_type = "horizontal"
+    debug_dir_str = str(root_path_obj / f'debug_{timestamp}_{stitch_type}')
+    debug_dir_obj = Path(debug_dir_str).absolute()
+    estimate_overlap_ratio = 0.45
+    estimate_overlap_pixels = int(round(1024 * estimate_overlap_ratio))
+
+    # 创建特征点拼接器实例
+    stitcher = ImageStitcherKeyPoint(
+        estimate_overlap_pixels=estimate_overlap_pixels,
+        center_ratio=0.8,
+        # stitch_type="horizontal",
+        stitch_type="vertical",
+        blend_type='half_importance_add_weight',
+        debug=True,
+        debug_dir=debug_dir_str,
+        feature_detector='combine',  # 可选: 'akaze', 'sift', 'orb', 'brisk', 'combine'
+        blend_ratio=0.5,
+        combine_detectors=False
+    )
+
+    # 读取测试图片
+    img_left_name = "20250123_162407_0001.jpg"
+    img_right_name = "20250123_162409_0002.jpg"
+    img_bottom_name = "20250123_162422_0007.jpg"
+
+    img_left_path = str(root_path_obj / img_left_name)
+    img_right_path = str(root_path_obj / img_right_name)
+    img_bottom_path = str(root_path_obj / img_bottom_name)
+
+    img_left = cv2.imread(img_left_path)
+    img_right = cv2.imread(img_right_path)
+    img_bottom = cv2.imread(img_bottom_path)
+
+    if img_left is None or img_right is None:
+        print("Error: Could not read one or both images")
+        sys.exit(1)
+
+    # 记录开始时间
+    start_time = time.time()
+
+    try:
+        # 执行拼接
+        result_img = stitcher.stitch_main(img_left, img_bottom)
+
+        # 保存结果
+        save_final_image_path = str(debug_dir_obj / 'result_img.jpg')
+        cv2.imwrite(save_final_image_path, result_img)
+        # 计算并打印处理时间
+        end_time = time.time()
+        print(f"拼接完成!")
+        print(f"处理时间: {end_time - start_time:.2f} 秒")
+        print(f"匹配得分: {stitcher.best_score:.4f}")
+        print(f"结果已保存为: {save_final_image_path}")
+
+    except Exception as e:
+        print(f"拼接过程中出错: {str(e)}")
+        sys.exit(1)

+ 586 - 0
fry_project_classes/stitch_img_template_match.py

@@ -0,0 +1,586 @@
+import math
+from pathlib import Path
+
+import cv2
+import numpy as np
+import os
+import time
+
+
+def fry_cv2_imread(filename, flags=cv2.IMREAD_COLOR):
+    try:
+        with open(filename, 'rb') as f:
+            chunk = f.read()
+        chunk_arr = np.frombuffer(chunk, dtype=np.uint8)
+        img = cv2.imdecode(chunk_arr, flags)
+        if img is None:
+            error_info = f"Warning: Unable to decode image: {filename}"
+            print("警告", error_info)
+        return img
+    except IOError as e:
+        error_info = f"IOError: Unable to read file: {filename}"
+        print("错误", error_info)
+        print("错误", f"Error details: {str(e)}")
+
+        return None
+
+
+def fry_cv2_imwrite(filename, img, params=None):
+    try:
+        ext = os.path.splitext(filename)[1].lower()
+        result, encoded_img = cv2.imencode(ext, img, params)
+
+        if result:
+            with open(filename, 'wb') as f:
+                encoded_img.tofile(f)
+            return True
+        else:
+            print("警告", f"Warning: Unable to encode image: {filename}")
+            return False
+    except Exception as e:
+        print("错误", f"Error: Unable to write file: {filename}")
+        print("错误", f"Error details: {str(e)}")
+        return False
+
+
+# 覆盖 OpenCV 的原始函数
+cv2.imread = fry_cv2_imread
+cv2.imwrite = fry_cv2_imwrite
+
+from fry_project_classes.blend_type_mixin import BlendTypeMixin
+
+
+class ImageStitcherTemplateMatch(BlendTypeMixin):
+    def __init__(self, estimate_overlap_pixels=800, center_ratio=0.8,
+                 stitch_type="vertical",
+                 blend_type='half_importance',
+                 blend_ratio: float = 0.3,
+                 debug=False, debug_dir='debug_output',
+                 light_uniformity_compensation_enabled=False,
+                 light_uniformity_compensation_width=15,
+                 debug_draw_line_enabled=False
+                 ):
+        """
+        初始化拼图器
+        
+        参数:
+        overlap_pixels: 重叠区域像素数,默认800像素(预估值)
+        center_ratio: 中心区域比例,默认0.8
+        debug: 是否开启调试模式,默认False
+        debug_dir: 调试图片保存目录,默认'debug_output'
+        use_weight_blend: 是否使用加权融合,默认True
+        """
+
+        print("警告", f"拼图方法:区域")
+
+        self.estimate_overlap_pixels = estimate_overlap_pixels
+        self.estimate_non_overlap_pixels = None
+        self.center_ratio = center_ratio
+        self.blend_ratio = blend_ratio
+
+        self.blend_type = blend_type
+        self.stitch_type = stitch_type
+
+        self.debug_draw_line_enabled = debug_draw_line_enabled
+
+        self.debug = debug
+        self.init_debug = debug_dir
+
+        self.light_uniformity_compensation_enabled = light_uniformity_compensation_enabled
+        self.light_uniformity_compensation_width = light_uniformity_compensation_width
+
+        if self.debug:
+            self.debug_dir = f"{self.init_debug}_{self.stitch_type}_{self.blend_type}"
+            os.makedirs(self.debug_dir, exist_ok=True)
+            os.makedirs(self.debug_dir, exist_ok=True)
+
+        # 模板匹配参数
+        self.best_y = -1
+        self.best_x = -1
+        self.best_score = -1
+        self.template_score = -1
+
+    def save_debug_image(self, img, name, normalize=False):
+        """
+        保存调试图片
+        
+        参数:
+        img: 要保存的图片
+        name: 图片名称
+        normalize: 是否需要归一化处理(对于模板匹配结果图等)
+        """
+        try:
+            if self.debug:
+                save_path = os.path.join(self.debug_dir, f"{name}.jpg")
+                if normalize:
+                    # 归一化到0-255范围
+                    img_normalized = cv2.normalize(img, None, 0, 255, cv2.NORM_MINMAX)
+                    cv2.imwrite(save_path, img_normalized)
+                else:
+                    cv2.imwrite(save_path, img)
+
+                now_info = f"Debug: Saved {save_path}"
+
+                # print("信息", now_info)
+
+                return True, f"save_debug_image 成功: {save_path}"
+            else:
+                return False, "debug mode is not enabled"
+        except Exception as e:
+            error_info = f"save_debug_image出现bug: {str(e)}"
+            print("警告", error_info)
+
+            return False, error_info
+
+    def visualize_template_match(self, template, search_region, best_y, best_x):
+        """
+        可视化模板匹配结果
+        
+        参数:
+        template: 模板图片
+        search_region: 搜索区域
+        best_y, best_x: 最佳匹配位置
+        """
+        try:
+            if self.debug:
+                vis_img = search_region.copy()
+                h, w = template.shape[:2]
+                cv2.rectangle(vis_img, (best_x, best_y),
+                              (best_x + w, best_y + h), (0, 255, 0), 2)
+                is_ok, msg = self.save_debug_image(vis_img, 'tp_140_template_match_visualization')
+                return is_ok, msg
+            else:
+                return False, "debug mode is not enabled"
+        except Exception as e:
+            error_info = f"visualize_template_match 出现bug: {str(e)}"
+            print("警告", error_info)
+
+            return False, msg
+
+    def split_image(self, img, is_left_top=True):
+        """
+        分割图片为重叠区域和非重叠区域
+        """
+        height, width = img.shape[:2]
+        overlap_width = min(self.estimate_overlap_pixels, width // 2)
+        non_overlap_width = width - overlap_width
+
+        if is_left_top:
+            non_overlap_region = img[:, :non_overlap_width]
+            overlap_region = img[:, non_overlap_width:]
+            if self.debug:
+                self.save_debug_image(img, 'split_220_left_top_original')
+                self.save_debug_image(non_overlap_region, 'split_240_left_top_non_overlap')
+                self.save_debug_image(overlap_region, 'split_260_left_top_overlap')
+        else:
+            overlap_region = img[:, :overlap_width]
+            non_overlap_region = img[:, overlap_width:]
+            if self.debug:
+                self.save_debug_image(img, 'split_320_right_bottom_original')
+                self.save_debug_image(overlap_region, 'split_340_right_bottom_overlap')
+                self.save_debug_image(non_overlap_region, 'split_360_right_bottom_non_overlap')
+
+        return overlap_region, non_overlap_region
+
+    def get_center_region(self, img):
+        """
+        获取图片的中心区域
+        """
+        height, width = img.shape[:2]
+        margin_y = int(height * (1 - self.center_ratio) / 2)
+        margin_x = int(width * (1 - self.center_ratio) / 2)
+
+        center_region = img[margin_y:height - margin_y, margin_x:width - margin_x]
+
+        if self.debug:
+            self.save_debug_image(center_region, 'center_120_template_center_region')
+
+        return center_region, margin_x, margin_y
+
+    def template_matching(self, template, search_region):
+        """
+        模板匹配
+        """
+        result = cv2.matchTemplate(search_region, template, cv2.TM_CCOEFF_NORMED)
+        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
+        best_y, best_x = max_loc[1], max_loc[0]
+
+        now_info = f"匹配结果: 最大值{max_val}, 位置{best_x, best_y}"
+
+        print("警告", now_info)
+
+        self.best_y = best_y
+        self.best_x = best_x
+        self.best_score = max_val
+        self.template_score = max_val
+
+        # 如果存在父对象且父对象有 update_match_score 方法,发送匹配分数
+        # if hasattr(self, 'parent') and hasattr(self.parent, 'update_match_score'):
+        #     self.parent.update_match_score(max_val)
+
+        if self.debug:
+            is_ok1, opt_msg1 = self.save_debug_image(result, 'tp_120_template_matching_result', normalize=True)
+            is_ok2, opt_msg2 = self.visualize_template_match(template, search_region, best_y, best_x)
+            is_ok3, opt_msg3 = self.save_debug_image(template, 'tp_220_template')
+            is_ok4, opt_msg4 = self.save_debug_image(search_region, 'tp_240_search_region')
+
+            with open(os.path.join(self.debug_dir, 'tp_320_matching_info.txt'), 'w') as f:
+                f.write(f"Best match position: ({best_x}, {best_y})\n")
+                f.write(f"Match score: {max_val}\n")
+
+        return best_x, best_y, max_val
+
+    def pad_image(self, img: np.ndarray, target_width: int = None, target_height: int = None) -> np.ndarray:
+        """
+        将图片填充到目标尺寸
+
+        参数:
+        img: 输入图片
+        target_width: 目标宽度,如果为None则保持原宽度
+        target_height: 目标高度,如果为None则保持原高度
+
+        返回:
+        填充后的图片
+        """
+        if self.debug:
+            self.save_debug_image(img, 'pad_120_before_padding')
+
+        current_height, current_width = img.shape[:2]
+        target_width = target_width if target_width is not None else current_width
+        target_height = target_height if target_height is not None else current_height
+
+        if current_width == target_width and current_height == target_height:
+            return img
+
+        # 创建黑色背景
+        padded_img = np.zeros((target_height, target_width, 3), dtype=np.uint8)
+
+        # 将原图放在中心位置
+        y_offset = (target_height - current_height) // 2
+        x_offset = (target_width - current_width) // 2
+
+        padded_img[y_offset:y_offset + current_height,
+        x_offset:x_offset + current_width] = img
+
+        if self.debug:
+            self.save_debug_image(padded_img, 'pad_220_after_padding')
+            with open(os.path.join(self.debug_dir, 'pad_320_padding_info.txt'), 'w') as f:
+                f.write(f"Original size: {current_width}x{current_height}\n")
+                f.write(f"Target size: {target_width}x{target_height}\n")
+                f.write(f"Padding offsets: x={x_offset}, y={y_offset}\n")
+
+        return padded_img
+
+    def stitch_horizontal(self, left_img, right_img):
+        """
+        水平拼接两张图片
+        水平拼接的图片的高必须一样
+        """
+        # self.debug_dir = f"{self.init_debug}_horizontal_{self.blend_type}"
+        # os.makedirs(self.debug_dir, exist_ok=True)
+
+        # 确保两张图片高度相同
+        max_height = max(left_img.shape[0], right_img.shape[0])
+        left_img = self.pad_image(left_img, target_height=max_height)
+        right_img = self.pad_image(right_img, target_height=max_height)
+
+        # 1. 分割图片,使用预估的重叠像素
+        left_overlap, left_non_overlap = self.split_image(left_img, is_left_top=True)
+        right_overlap, right_non_overlap = self.split_image(right_img, is_left_top=False)
+
+        # 2. 获取左图重叠区域的中心部分作为模板
+        template, template_offset_x, template_offset_y = self.get_center_region(left_overlap)
+
+        # 计算模板在左图中的位置
+        self.estimate_non_overlap_pixels = left_img.shape[1] - self.estimate_overlap_pixels
+        template_in_left_x = self.estimate_non_overlap_pixels + template_offset_x
+        template_in_left_y = template_offset_y
+
+        # 3. 在右图重叠区域中进行模板匹配
+        best_x, best_y, max_val = self.template_matching(template, right_overlap)
+        template_score = max_val
+
+        # 计算模板在右图中的位置
+        template_in_right_x = best_x
+        template_in_right_y = best_y
+
+        # 真实的重叠区域的x和y 这个还不好算
+
+        # 04、计算最后拼接的图片的尺寸
+        left_width_contribution = template_in_left_x  # 左图取到模板的左上角,不包含模板
+        right_width_contribution = right_img.shape[1] - template_in_right_x  # 右图取到模板的左上角,包含模板
+        stitch_img_width = left_width_contribution + right_width_contribution
+        stitch_img_height = max(left_img.shape[0], right_img.shape[0])
+
+        # 计算右图相对于左图的y方向的偏移量
+        y_offset_right2left = template_in_left_y - template_in_right_y
+
+        # 计算真正的重叠区域:右图的左边+模板宽度+左图的右边
+        real_overlap_width = template_in_right_x + template.shape[1] + (
+                    left_img.shape[1] - template_in_left_x - template.shape[1])
+
+        if self.debug:
+            with open(os.path.join(self.debug_dir, 'h_320_alignment_info.txt'), 'w') as f:
+                f.write(f"template_in_left_x: {template_in_left_x}\n")
+                f.write(f"template_in_left_y: {template_in_left_y}\n")
+                f.write(f"template_in_right_x: {template_in_right_x}\n")
+                f.write(f"template_in_right_y: {template_in_right_y}\n")
+                f.write(f"left_width_contribution: {left_width_contribution}\n")
+                f.write(f"right_width_contribution: {right_width_contribution}\n")
+                f.write(f"stitch_img_width: {stitch_img_width}\n")
+                f.write(f"stitch_img_height: {stitch_img_height}\n")
+                f.write(f"real_overlap_width: {real_overlap_width}\n")
+
+        if self.blend_type == 'half_importance':
+            blend_stitch_img = self.blend_half_importance(left_img, right_img, stitch_img_width, stitch_img_height,
+                                                          y_offset_right2left, real_overlap_width,
+                                                          light_uniformity_compensation=self.light_uniformity_compensation_enabled,
+                                                          light_uniformity_compensation_width=self.light_uniformity_compensation_width)
+            if self.debug_draw_line_enabled:
+                blend_stitch_img_visualize = self.blend_half_importance(left_img, right_img, stitch_img_width,
+                                                                        stitch_img_height, y_offset_right2left,
+                                                                        real_overlap_width, visualize=True)
+                if self.debug:
+                    self.save_debug_image(blend_stitch_img_visualize, 'h_500_final_result_horizontal_visualize')
+        elif self.blend_type == 'right_first':
+            # 右边优先的拼接方式
+            blend_stitch_img = self.blend_right_first(left_img, right_img, stitch_img_width, stitch_img_height,
+                                                      y_offset_right2left)
+        elif self.blend_type == 'left_first':
+            blend_stitch_img = self.blend_left_first(left_img, right_img,
+                                                     stitch_img_width,
+                                                     stitch_img_height,
+                                                     y_offset_right2left,
+                                                     real_overlap_width)
+        elif self.blend_type == 'half_importance_add_weight':
+            blend_stitch_img = self.blend_half_importance_add_weight(left_img, right_img, stitch_img_width,
+                                                                     stitch_img_height, y_offset_right2left,
+                                                                     real_overlap_width,
+                                                                     blend_ratio=self.blend_ratio)
+        elif self.blend_type == 'half_importance_global_brightness':
+            blend_stitch_img = self.blend_half_importance_global_brightness(left_img, right_img, stitch_img_width,
+                                                                            stitch_img_height, y_offset_right2left,
+                                                                            real_overlap_width,
+                                                                            light_uniformity_compensation=self.light_uniformity_compensation_enabled,
+                                                                            light_uniformity_compensation_width=self.light_uniformity_compensation_width)
+            if self.debug_draw_line_enabled:
+                blend_stitch_img_visualize = self.blend_half_importance_global_brightness(left_img, right_img,
+                                                                                          stitch_img_width,
+                                                                                          stitch_img_height,
+                                                                                          y_offset_right2left,
+                                                                                          real_overlap_width,
+                                                                                          visualize=True)
+                if self.debug:
+                    self.save_debug_image(blend_stitch_img_visualize, 'h_500_final_result_horizontal_visualize')
+
+        elif self.blend_type == 'half_importance_partial_brightness':
+            blend_stitch_img = self.blend_half_importance_partial_brightness(left_img, right_img, stitch_img_width,
+                                                                             stitch_img_height, y_offset_right2left,
+                                                                             real_overlap_width,
+                                                                             light_uniformity_compensation=self.light_uniformity_compensation_enabled,
+                                                                             light_uniformity_compensation_width=self.light_uniformity_compensation_width)
+            if self.debug_draw_line_enabled:
+                blend_stitch_img_visualize = self.blend_half_importance_partial_brightness(left_img, right_img,
+                                                                                           stitch_img_width,
+                                                                                           stitch_img_height,
+                                                                                           y_offset_right2left,
+                                                                                           real_overlap_width,
+                                                                                           visualize=True)
+                if self.debug:
+                    self.save_debug_image(blend_stitch_img_visualize, 'h_500_final_result_horizontal_visualize')
+
+
+        elif self.blend_type == 'blend_half_importance_partial_HV':
+            blend_stitch_img = self.blend_half_importance_partial_HV(left_img, right_img, stitch_img_width,
+                                                                     stitch_img_height, y_offset_right2left,
+                                                                     real_overlap_width,
+                                                                     light_uniformity_compensation=self.light_uniformity_compensation_enabled,
+                                                                     light_uniformity_compensation_width=self.light_uniformity_compensation_width)
+            if self.debug_draw_line_enabled:
+                blend_stitch_img_visualize = self.blend_half_importance_partial_HV(left_img, right_img,
+                                                                                   stitch_img_width, stitch_img_height,
+                                                                                   y_offset_right2left,
+                                                                                   real_overlap_width, visualize=True)
+                if self.debug:
+                    self.save_debug_image(blend_stitch_img_visualize, 'h_500_final_result_horizontal_visualize')
+
+        elif self.blend_type == 'blend_half_importance_partial_SV':
+            blend_stitch_img = self.blend_half_importance_partial_SV(left_img, right_img, stitch_img_width,
+                                                                     stitch_img_height, y_offset_right2left,
+                                                                     real_overlap_width,
+                                                                     light_uniformity_compensation=self.light_uniformity_compensation_enabled,
+                                                                     light_uniformity_compensation_width=self.light_uniformity_compensation_width)
+            if self.debug_draw_line_enabled:
+                blend_stitch_img_visualize = self.blend_half_importance_partial_SV(left_img, right_img,
+                                                                                   stitch_img_width, stitch_img_height,
+                                                                                   y_offset_right2left,
+                                                                                   real_overlap_width, visualize=True)
+                if self.debug:
+                    self.save_debug_image(blend_stitch_img_visualize, 'h_500_final_result_horizontal_visualize')
+
+        elif self.blend_type == 'blend_half_importance_partial_HSV':
+            blend_stitch_img = self.blend_half_importance_partial_HSV(left_img, right_img, stitch_img_width,
+                                                                      stitch_img_height, y_offset_right2left,
+                                                                      real_overlap_width,
+                                                                      light_uniformity_compensation=self.light_uniformity_compensation_enabled,
+                                                                      light_uniformity_compensation_width=self.light_uniformity_compensation_width)
+            if self.debug_draw_line_enabled:
+                blend_stitch_img_visualize = self.blend_half_importance_partial_HSV(left_img, right_img,
+                                                                                    stitch_img_width, stitch_img_height,
+                                                                                    y_offset_right2left,
+                                                                                    real_overlap_width, visualize=True)
+                if self.debug:
+                    self.save_debug_image(blend_stitch_img_visualize, 'h_500_final_result_horizontal_visualize')
+
+
+
+        elif self.blend_type == 'blend_half_importance_partial_brightness_add_weight':
+            blend_stitch_img = self.blend_half_importance_partial_brightness_add_weight(
+                left_img=left_img,
+                right_img=right_img,
+                stitch_img_width=stitch_img_width,
+                stitch_img_height=stitch_img_height,
+                y_offset_right2left=y_offset_right2left,
+                real_overlap_width=real_overlap_width,
+                light_uniformity_compensation=self.light_uniformity_compensation_enabled,
+                light_uniformity_compensation_width=self.light_uniformity_compensation_width,
+                add_weight_rate=self.blend_ratio
+            )
+            if self.debug_draw_line_enabled:
+                blend_stitch_img_visualize = self.blend_half_importance_partial_brightness_add_weight(left_img=left_img,
+                                                                                                      right_img=right_img,
+                                                                                                      stitch_img_width=stitch_img_width,
+                                                                                                      stitch_img_height=stitch_img_height,
+                                                                                                      y_offset_right2left=y_offset_right2left,
+                                                                                                      real_overlap_width=real_overlap_width,
+                                                                                                      add_weight_rate=self.blend_ratio,
+                                                                                                      visualize=True)
+                if self.debug:
+                    self.save_debug_image(blend_stitch_img_visualize, 'h_500_final_result_horizontal_visualize')
+
+
+        else:
+            # 左边优先的拼接方式
+            blend_stitch_img = None
+
+        if self.debug:
+            self.save_debug_image(blend_stitch_img, 'h_500_final_result_horizontal')
+
+        # return result
+        return blend_stitch_img
+
+    def stitch_vertical(self, top_img, bottom_img):
+        """
+        垂直拼接两张图片
+        垂直拼接的图片的宽必须一样
+        """
+        # self.debug_dir = f"{self.init_debug}_vertical_{self.blend_type}"
+        # os.makedirs(self.debug_dir, exist_ok=True)
+
+        # 确保两张图片宽度相同
+        max_width = max(top_img.shape[1], bottom_img.shape[1])
+        top_img = self.pad_image(top_img, target_width=max_width)
+        bottom_img = self.pad_image(bottom_img, target_width=max_width)
+
+        if self.debug:
+            self.save_debug_image(top_img, 'v_120_top_original')
+            self.save_debug_image(bottom_img, 'v_140_bottom_original')
+
+        # 将图片逆时针旋转90度
+        top_rotated = cv2.rotate(top_img, cv2.ROTATE_90_COUNTERCLOCKWISE)
+        bottom_rotated = cv2.rotate(bottom_img, cv2.ROTATE_90_COUNTERCLOCKWISE)
+
+        if self.debug:
+            self.save_debug_image(top_rotated, 'v_220_top_rotated')
+            self.save_debug_image(bottom_rotated, 'v_240_bottom_rotated')
+
+        # 进行水平拼接
+        result_rotated = self.stitch_horizontal(top_rotated, bottom_rotated)
+
+        # 将结果顺时针旋转90度
+        result = cv2.rotate(result_rotated, cv2.ROTATE_90_CLOCKWISE)
+
+        if self.debug:
+            self.save_debug_image(result, 'v_500_final_result_vertical')
+
+        return result
+
+    def stitch_main(self, left_img, right_img):
+        if self.stitch_type == 'horizontal':
+            # 确保两张图片高度相同
+            max_height = max(left_img.shape[0], right_img.shape[0])
+            left_img = self.pad_image(left_img, target_height=max_height)
+            right_img = self.pad_image(right_img, target_height=max_height)
+
+            return self.stitch_horizontal(left_img, right_img)
+        elif self.stitch_type == 'vertical':
+            # 确保两张图片宽度相同
+            max_width = max(left_img.shape[1], right_img.shape[1])
+            left_img = self.pad_image(left_img, target_width=max_width)
+            right_img = self.pad_image(right_img, target_width=max_width)
+
+            return self.stitch_vertical(left_img, right_img)
+        else:
+            raise ValueError('Invalid stitch type, must be one of horizontal or vertical')
+
+
+def main():
+    timestamp = time.strftime("%Y%m%d_%H%M%S")
+    root_path = r"D:\_241231_fry_gitlab\LA_ai_main_CV_OpenCV\740_project\_250115_Stitch_Image_TemplateMatch\test_images"
+    root_path_obj = Path(root_path).absolute()
+
+    stitch_type = "vertical"
+    debug_dir_str = str(root_path_obj / f'debug_{timestamp}_{stitch_type}')
+    debug_dir_obj = Path(debug_dir_str).absolute()
+    estimate_overlap_ratio = 0.45
+    estimate_overlap_pixels = int(round(1024 * estimate_overlap_ratio))
+
+    # 使用加权融合
+    # stitcher_weight = ImageStitcher(
+    #     estimate_overlap_pixels=estimate_overlap_pixels, 
+    #     center_ratio=0.8, 
+    #     debug=True,
+    #     debug_dir=debug_dir+"_weight",
+    #     use_weight_blend=True
+    # )
+
+    # 读取图片
+    left_img_name = r"20250123_162407_0001.jpg"
+    right_img_name = r"20250123_162409_0002.jpg"
+    bottom_img_name = r"20250123_162422_0007.jpg"
+
+    left_img_path = str(root_path_obj / left_img_name)
+    right_img_path = str(root_path_obj / right_img_name)
+    bottom_img_path = str(root_path_obj / bottom_img_name)
+
+    left_img = cv2.imread(left_img_path)
+    right_img = cv2.imread(right_img_path)
+    top_img = left_img
+    bottom_img = cv2.imread(bottom_img_path)
+
+    start_time = time.time()
+
+    # 使用简单拼接
+    stitcher_simple = ImageStitcherTemplateMatch(
+        estimate_overlap_pixels=estimate_overlap_pixels,
+        center_ratio=0.8,
+        blend_type='half_importance_add_weight',
+        # blend_type='left_first',
+        stitch_type=stitch_type,
+        blend_ratio=0.5,
+        debug=True,
+        debug_dir=debug_dir_str,
+    )
+
+    # 竖直拼接 - 简单拼接
+    result_img = stitcher_simple.stitch_main(left_img, bottom_img)
+    save_final_image_path = str(debug_dir_obj / 'result_img.jpg')
+    cv2.imwrite(save_final_image_path, result_img)
+
+    end_time = time.time()
+
+    print(f"拼接图片耗时:{end_time - start_time:.2f}秒")
+
+
+if __name__ == '__main__':
+    main()

+ 7 - 0
run.py

@@ -0,0 +1,7 @@
+import uvicorn
+
+if __name__ == "__main__":
+    # 使用 uvicorn 启动应用
+    # reload=True 可以在开发时代码改变后自动重启服务
+    print("http://127.0.0.1:7745/docs")
+    uvicorn.run("app.main:app", host="0.0.0.0", port=7745, reload=False)

+ 23 - 0
utils/utils.py

@@ -0,0 +1,23 @@
+import re
+from pathlib import Path
+import logging
+import shutil
+
+logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
+
+
+def natural_sort_key(s):
+    """
+    提供自然排序的键,例如 '2.jpg' 会排在 '10.jpg' 之前。
+    """
+    return [int(text) if text.isdigit() else text.lower() for text in re.split(r'(\d+)', str(s))]
+
+
+def cleanup_temp_folder(folder_path: Path):
+    """在后台删除指定的临时文件夹"""
+    try:
+        if folder_path.exists() and folder_path.is_dir():
+            shutil.rmtree(folder_path)
+            logging.info(f"已清理临时文件夹: {folder_path}")
+    except Exception as e:
+        logging.error(f"清理临时文件夹 {folder_path} 时出错: {e}")