import os
import json
import base64
import shutil

from pathlib import Path
from typing import Dict, List, Optional, Union
from main_process import process_date, process_stamp, process_handwrite, process_ocr, get_page_number, process_page, process_image

from paddlex import create_model, create_pipeline
PIPELINE = create_model(model_name = "PP-OCRv5_server_rec")
PIPELINE_SEAL = create_pipeline(pipeline="seal_recognition")   
PIPELINE_OCR = create_pipeline(pipeline="OCR")


def process_seal_data(seal_data: Dict, api_data: Dict) -> List[Dict]:
    """通过置信度匹配印章数据（严格一对一匹配）"""
    results = []
    
    # 1. 准备API印章数据（按confidence降序排序）
    api_stamps = sorted(
        [
            {
                "position": item["position"],
                "confidence": float(item["confidence"]),
                "matched": False,
                "content": []  # 初始化内容为空
            }
            for item in api_data.get("data", [])
            if item.get("category") == "stamp"
        ],
        key=lambda x: -x["confidence"]
    )
    
    # 2. 准备识别结果数据（按score降序排序）
    seal_entries = []
    if seal_data.get("layout_det_res", {}).get("boxes"):
        boxes = seal_data["layout_det_res"]["boxes"]
        seal_res_list = seal_data.get("seal_res_list", [])
        
        for i, box in enumerate(boxes):
            # 获取对应的识别文本
            texts = []
            if i < len(seal_res_list):
                texts = seal_res_list[i].get("rec_texts", [])
            
            seal_entries.append({
                "content": texts,
                "score": float(box.get("score", 0)),
                "matched": False
            })
    
    # 3. 按score降序排序
    seal_entries.sort(key=lambda x: -x["score"])
    
    # 4. 先为每个api_stamp分配最匹配的seal_entry
    for api_stamp in api_stamps:
        best_match = None
        min_diff = float('inf')
        
        for seal_entry in seal_entries:
            if not seal_entry["matched"]:
                current_diff = abs(seal_entry["score"] - api_stamp["confidence"])
                if current_diff < min_diff:
                    min_diff = current_diff
                    best_match = seal_entry
        
        # 如果找到合适的匹配（差异小于0.2）
        if best_match and min_diff < 0.2:
            best_match["matched"] = True
            api_stamp["matched"] = True
            api_stamp["content"] = best_match["content"]  # 将内容关联到api_stamp
    
    # 5. 构建最终结果
    for api_stamp in api_stamps:
        x, y, w, h = api_stamp["position"]
        results.append({
            "position": [
                [x, y],
                [x + w, y],
                [x + w, y + h],
                [x, y + h]
            ],
            "content": api_stamp["content"],  # 可能是空列表
            "confidence": api_stamp["confidence"]
        })
    
    
    return results

def process_handwrite_data(handwrite_data: Dict) -> Dict:
    """处理手写识别数据"""
    return {
        "position": [],
        "content": handwrite_data.get("rec_text", ""),
        "confidence": float(handwrite_data.get("rec_score", 0))
    }


def process_date_data(date_data: Dict) -> Dict:
    """处理日期识别数据"""
    return {
        "position": [],
        "content": date_data.get("rec_text", ""),
        "confidence": float(date_data.get("rec_score", 0))
    }


def process_ocr_data(ocr_data: Dict) -> Dict:
    """处理OCR识别数据"""
    text_blocks = []
    overall_confidences = []
    
    # 确保所有数组长度一致
    texts = ocr_data.get("rec_texts", [])
    scores = ocr_data.get("rec_scores", [])
    polygons = ocr_data.get("dt_polys", [])
    
    min_length = min(len(texts), len(scores), len(polygons))
    
    for i in range(min_length):
        text = texts[i]
        score = float(scores[i]) if i < len(scores) else 0.0
        polygon = polygons[i] if i < len(polygons) else []
        
        # 标准化多边形坐标格式
        normalized_polygon = []
        if len(polygon) >= 4:  # 确保是四边形
            # 确保每个点都是[x,y]格式
            for point in polygon[:4]:  # 只取前4个点
                if len(point) == 2:
                    normalized_polygon.append([int(point[0]), int(point[1])])
                else:
                    normalized_polygon.append([0, 0])  # 无效坐标默认值
            
            # 检查多边形有效性（面积不为0）
            if (normalized_polygon[0][0] == normalized_polygon[1][0] and 
                normalized_polygon[1][1] == normalized_polygon[2][1] and
                normalized_polygon[2][0] == normalized_polygon[3][0] and
                normalized_polygon[3][1] == normalized_polygon[0][1]):
                # 如果是退化的矩形（面积为0），则忽略位置信息
                normalized_polygon = []
        text_blocks.append({
            "position": normalized_polygon if normalized_polygon else [],
            "content": text,
            "confidence": score
        })
        overall_confidences.append(score)
    

    overall_confidence = 0.0
    if overall_confidences:
        weights = [len(block["content"]) for block in text_blocks]
        total_weight = sum(weights) or 1  
        weighted_confidences = [c * w for c, w in zip(overall_confidences, weights)]
        overall_confidence = sum(weighted_confidences) / total_weight
    
    return {
        "text_blocks": text_blocks,
        "overall_confidence": round(overall_confidence, 4)  
    }


def merge_results(
    api_data: Dict,
    page_number: Dict,
    stamp_data_list: List,
    handwrite_data: List,
    date_data: List,
    ocr_data: List,
    page_num_list: List
) -> Dict:

    # 合并所有stamp_data的识别结果
    combined_seal_data = {
        "layout_det_res": {"boxes": []},
        "seal_res_list": []
    }
    
    for stamp_data in stamp_data_list:
        if "layout_det_res" in stamp_data and "boxes" in stamp_data["layout_det_res"]:
            combined_seal_data["layout_det_res"]["boxes"].extend(
                stamp_data["layout_det_res"]["boxes"]
            )
        if "seal_res_list" in stamp_data:
            combined_seal_data["seal_res_list"].extend(
                stamp_data["seal_res_list"]
            )

    # 统一处理印章数据
    seals = process_seal_data(combined_seal_data, api_data)

    # 处理其他数据
    handwritten_signatures = [process_handwrite_data(data) for data in handwrite_data]
    signature_dates = [process_date_data(data) for data in date_data]
    
    ocr_text = {"text_blocks": [], "overall_confidence": 0}
    if ocr_data:
        ocr_text = process_ocr_data(ocr_data[0])

    # 处理页码数据
    page_number_info = None
    if page_number is not None and page_number.get("code") == 0:
        page_data_list = page_number.get("data", [])
        if page_data_list:  # 检查data列表不为空
            page_data = page_data_list[0]
            x, y, w, h = page_data["position"]
            content = int(page_num_list[0]["rec_text"])
            page_number_info = {
                "position": [
                    [x, y],
                    [x + w, y],
                    [x + w, y + h],
                    [x, y + h]
                ],
                "content": content,  
                "confidence": page_data["confidence"]
            }
        else:  
            page_number_info = {
                "position": [],
                "content": [],
                "confidence": []
            }

    # 构建最终结果
    return {
        "code": 200,
        "msg": "ok",
        "data": {
            "categories": {
                "title": "",
                "page_number": page_number_info,
                "seals": seals,
                "handwritten_signatures": handwritten_signatures,
                "signature_dates": signature_dates
            },
            "ocr_text": ocr_text
        }
    }


def save_merged_result(result: Dict, output_path: str = "./merged_result.json"):
    """保存合并后的结果"""
    with open(output_path, "w", encoding="utf-8") as f:
        json.dump(result, f, ensure_ascii=False, indent=2)
    print(f"合并结果已保存到: {output_path}")


def clean_direc(directory: str) -> None:
    """清理指定目录下的所有文件"""
    if os.path.exists(directory):
        for filename in os.listdir(directory):
            file_path = os.path.join(directory, filename)
            try:
                if os.path.isfile(file_path) or os.path.islink(file_path):
                    os.unlink(file_path)
                elif os.path.isdir(file_path):
                    shutil.rmtree(file_path)
            except Exception as e:
                print(f"清理文件失败: {file_path}, 错误: {e}")
        print(f"已清空目录: {directory}")
    else:
        print(f"目录不存在: {directory}")



# def det_all(img_base64):
#     for dir in ["date", "stamp", "handwrite", "pagenum"]:
#         clean_direc(dir)
#     print("=========== 临时文件已删除 ===========")


#     api_data = process_image(img_base64)
#     page_number = get_page_number(img_base64)

#     date = process_date(PIPELINE)
#     stamp = process_stamp(PIPELINE_SEAL)
#     handwrite = process_handwrite(PIPELINE)
#     page = process_page(PIPELINE)
#     ocr = process_ocr(PIPELINE_OCR, img_base64)
    
#     merged_res = merge_results(
#         api_data,
#         page_number,
#         stamp,
#         handwrite,
#         date,
#         ocr,
#         page
#     )
    
#     return merged_res

from concurrent.futures import ThreadPoolExecutor

def det_all(img_base64):
    for dir in ["date", "stamp", "handwrite", "pagenum"]:
        clean_direc(dir)
    print("=========== 临时文件已删除 ===========")

    # 获取API数据
    api_data = process_image(img_base64)
    page_number = get_page_number(img_base64)

    # 任务列表
    tasks = [
        (process_date, [PIPELINE]),
        (process_stamp, [PIPELINE_SEAL]),
        (process_handwrite, [PIPELINE]),
        (process_page, [PIPELINE]),
        (process_ocr, [PIPELINE_OCR, img_base64])
    ]

    with ThreadPoolExecutor() as executor:
        futures = [executor.submit(func, *args) for func, args in tasks]
        results = [future.result() for future in futures]

    date, stamp, handwrite, page, ocr = results

    merged_res = merge_results(
        api_data,
        page_number,
        stamp,
        handwrite,
        date,
        ocr,
        page 
    )
    
    return json.dumps(merged_res, indent=4, ensure_ascii=False)


if __name__ == "__main__":
    import time
    start_time = time.time()
    with open("test.png", "rb") as f:
        real_base64 = base64.b64encode(f.read()).decode('utf-8')
    aa = det_all(real_base64)
    end_time = time.time()
    print(f"总耗时: {end_time - start_time:.2f}秒")

