import os
import re
import json
import time
import logging
import argparse
import pandas as pd
import base64
from multiprocessing import Pool, cpu_count
from datetime import datetime
from typing import List, Dict, Tuple, Optional
from volcenginesdkarkruntime import Ark
from new_prompt import PROMPTS, DIMENSIONS  # 导入拆分后的维度和提示词

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("image_analysis.log"),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)


def encode_image(image_path):
    try:
        with open(image_path, "rb") as image_file:
            return base64.b64encode(image_file.read()).decode('utf-8')
    except Exception as e:
        logger.error(f"图片编码失败 {image_path}: {str(e)}")
        raise


def analyze_single_dimension(
        image1_path: str,
        image2_path: str,
        prompt: str,
        model: str,
        dimension: str,  # 记录当前分析的大维度
        max_retries: int = 3,
        retry_delay: int = 2
) -> Dict:
    """针对单个大维度调用模型推理，带格式校验和重试"""
    # 图片编码（仅执行1次）
    try:
        img1_base64 = encode_image(image1_path)
        img2_base64 = encode_image(image2_path)
    except Exception as e:
        logger.error(
            f"图片编码失败（维度：{dimension}）：{os.path.basename(image1_path)} & {os.path.basename(image2_path)}")
        return {
            "dimension": dimension,
            "image1": image1_path,
            "image2": image2_path,
            "analysis": None,
            "error": f"图片编码失败：{str(e)}",
            "status": "failed",
            "timestamp": datetime.now().isoformat(),
            "model_used": model,
            "retry_count": 0
        }

    # 构造请求
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{img1_base64}"}},
                {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{img2_base64}"}},
                {"type": "text", "text": prompt}
            ]
        }
    ]

    client = Ark(
        # api_key=os.environ.get("ARK_API_KEY"),
        # base_url="https://ark.cn-beijing.volces.com/api/v3",

        # api_key="b898ea42-e9cf-4e05-b9f2-31d158fea30d",
        # base_url="https://ark.cn-beijing.volces.com/api/v3/"
        api_key = "Z5Yr0stNmxF8yFfcekeRxV3dpxXhYqkz_GPT_AK",
        base_url = "https://search.bytedance.net/gpt/openapi/online/v2/crawl"
    )



    # 重试逻辑
    for retry_idx in range(max_retries):
        current_try = retry_idx + 1
        try:
            logger.info(
                f"维度[{dimension}]第{current_try}/{max_retries}次调用模型：{model}，处理图片对：{os.path.basename(image1_path)}")

            # 调用API
            completion = client.chat.completions.create(model=model, messages=messages)
            raw_analysis = completion.choices[0].message.content

            # 校验结果格式（仅包含当前维度的小项）
            _, required_small_dims = DIMENSIONS[dimension]
            analysis_dict = json.loads(raw_analysis)

            # 检查键是否完全匹配当前维度的小项
            returned_keys = set(analysis_dict.keys())
            required_keys = set(required_small_dims)
            if returned_keys != required_keys:
                missing = required_keys - returned_keys
                extra = returned_keys - required_keys
                raise ValueError(f"键不匹配：缺少{missing}，多余{extra}")

            # 检查值是否为数字
            for key, value in analysis_dict.items():
                if not isinstance(value, (int, float)) or not (0 <= value <= 3):
                    raise ValueError(f"{key}的值{value}无效（必须是0-3的数字）")

            # 校验通过
            return {
                "dimension": dimension,
                "image1": image1_path,
                "image2": image2_path,
                "analysis": raw_analysis,
                "analysis_dict": analysis_dict,
                "status": "success",
                "timestamp": datetime.now().isoformat(),
                "model_used": model,
                "retry_count": retry_idx
            }

        except (json.JSONDecodeError, ValueError) as e:
            # 格式错误重试
            if retry_idx < max_retries - 1:
                logger.warning(f"维度[{dimension}]第{current_try}次结果无效：{str(e)}，{retry_delay}秒后重试")
                time.sleep(retry_delay)
            else:
                logger.error(f"维度[{dimension}]重试耗尽：{str(e)}，原始结果：{raw_analysis[:100]}")
                return {
                    "dimension": dimension,
                    "image1": image1_path,
                    "image2": image2_path,
                    "analysis": raw_analysis if 'raw_analysis' in locals() else None,
                    "error": str(e),
                    "status": "failed",
                    "timestamp": datetime.now().isoformat(),
                    "model_used": model,
                    "retry_count": max_retries
                }

        except Exception as e:
            # API异常不重试
            logger.error(f"维度[{dimension}]第{current_try}次调用失败：{str(e)}")
            return {
                "dimension": dimension,
                "image1": image1_path,
                "image2": image2_path,
                "analysis": None,
                "error": f"API调用失败：{str(e)}",
                "status": "failed",
                "timestamp": datetime.now().isoformat(),
                "model_used": model,
                "retry_count": retry_idx
            }


def process_pair_with_dimensions(args: Tuple[Tuple[str, str], str, List[str], int, int]) -> Dict:
    """处理单对图片的所有维度推理并合并结果"""
    (image1, image2), model, dimensions, max_retries, retry_delay = args
    logger.info(f"开始处理图片对：{os.path.basename(image1)} & {os.path.basename(image2)}，维度：{dimensions}")

    # 按维度依次推理（也可并行，此处为简化用串行）
    dimension_results = []
    for dim in dimensions:
        # 获取该维度的专用提示词
        prompt_version = f"v1_{dim}"  # 与prompts.py中的版本对应
        prompt = PROMPTS[prompt_version]
        # 调用推理
        dim_result = analyze_single_dimension(
            image1_path=image1,
            image2_path=image2,
            prompt=prompt,
            model=model,
            dimension=dim,
            max_retries=max_retries,
            retry_delay=retry_delay
        )
        dimension_results.append(dim_result)

    # 合并所有维度的分析结果
    merged_analysis = {}
    all_success = True
    error_messages = []
    for res in dimension_results:
        merged_analysis.update(res.get("analysis_dict", {}))
        if res["status"] == "failed":
            all_success = False
            error_messages.append(f"维度[{res['dimension']}]：{res['error']}")

    # 构建最终结果
    return {
        "image1": image1,
        "image2": image2,
        "analysis": json.dumps(merged_analysis) if merged_analysis else None,
        "analysis_dict": merged_analysis,
        "status": "success" if all_success else "failed",
        "error": "; ".join(error_messages) if error_messages else "",
        "timestamp": datetime.now().isoformat(),
        "model_used": model,
        "dimension_results": dimension_results  # 保留各维度原始结果
    }


def find_image_pairs(folder_path: str) -> List[Tuple[str, str]]:
    """查找图片对：核心标识相同的图片为一对，前缀是figma的作为图一，其他前缀的作为图二"""
    logger.info(f"在文件夹 {folder_path} 中查找图片对...")
    image_extensions = ('.png', '.jpg', '.jpeg', '.gif', '.bmp')

    # 定义前缀规则：key=前缀类型，value=前缀字符串
    PREFIX_RULES = {
        "figma": "figma_screenshot_",  # figma前缀（作为图一）
        "runtime": "runtimescreenshot_"  # 其他前缀（作为图二，可根据实际情况扩展）
    }
    # 反向映射：通过前缀字符串快速获取类型
    PREFIX_TO_TYPE = {v: k for k, v in PREFIX_RULES.items()}

    # 按「核心标识」分组：key=核心标识（前缀后的部分，包含后缀），value={前缀类型: 文件路径}
    core_id_groups = {}

    # 遍历文件夹中的所有文件
    for filename in os.listdir(folder_path):
        # 过滤非图片文件
        if not filename.lower().endswith(image_extensions):
            continue
        file_path = os.path.join(folder_path, filename)
        if not os.path.isfile(file_path):
            continue

        # 提取当前图片的「前缀类型」和「核心标识」
        prefix_type = None
        core_id = None
        for prefix_str, type_ in PREFIX_TO_TYPE.items():
            if filename.startswith(prefix_str):
                prefix_type = type_
                core_id = filename[len(prefix_str):]  # 核心标识 = 去掉前缀后的部分（含后缀）
                break

        # 跳过无法识别前缀的图片
        if prefix_type is None:
            logger.warning(f"无法识别前缀，跳过图片：{filename}")
            continue

        # 将图片按核心标识分组（同一核心标识下，同类型前缀只能有一个）
        if core_id not in core_id_groups:
            core_id_groups[core_id] = {}
        if prefix_type in core_id_groups[core_id]:
            logger.warning(f"核心标识 {core_id} 存在重复{prefix_type}前缀的图片，跳过：{filename}")
            continue
        core_id_groups[core_id][prefix_type] = file_path

    # 生成图片对：每个核心标识组中，figma前缀为图一，其他前缀为图二
    pairs = []
    for core_id, type_paths in core_id_groups.items():
        # 必须包含figma前缀的图片才能作为图一
        if "figma" not in type_paths:
            logger.warning(f"核心标识 {core_id} 缺少figma前缀的图片，无法组成对")
            continue

        # 匹配其他前缀的图片作为图二
        figma_path = type_paths["figma"]
        for prefix_type, other_path in type_paths.items():
            if prefix_type != "figma":
                pairs.append((figma_path, other_path))
                logger.debug(
                    f"找到图片对: {os.path.basename(figma_path)}（图一） 和 {os.path.basename(other_path)}（图二）")
                break  # 每个核心标识只匹配一个非figma前缀的图片
        else:
            logger.warning(f"核心标识 {core_id} 缺少其他前缀的图片，无法组成对")

    logger.info(f"共找到 {len(pairs)} 对有效图片")
    return pairs


class ImagePairAnalysisWorkflow:
    def __init__(self, config: Dict):
        self.config = config
        self.image_folder = config.get('image_folder', 'images')
        self.output_folder = config.get('output_folder', 'analysis_results')
        self.max_workers = config.get('max_workers', max(1, cpu_count() - 1))
        self.model = config.get('model', 'doubao-seed-1-6-flash-250828')
        self.max_retries = config.get('max_retries', 3)
        self.retry_delay = config.get('retry_delay', 2)
        # 指定需要处理的大维度（默认全量处理）
        self.dimensions = config.get('dimensions', list(DIMENSIONS.keys()))
        # 验证维度合法性
        for dim in self.dimensions:
            if dim not in DIMENSIONS:
                raise ValueError(f"无效的大维度：{dim}，可选维度：{list(DIMENSIONS.keys())}")

        os.makedirs(self.output_folder, exist_ok=True)
        self.timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")

    def batch_upload(self) -> List[Tuple[str, str]]:
        return find_image_pairs(self.image_folder)

    def parallel_scoring(self, image_pairs: List[Tuple[str, str]]) -> List[Dict]:
        if not image_pairs:
            logger.warning("没有图片对需要处理")
            return []

        logger.info(f"开始并行处理 {len(image_pairs)} 对图片，维度：{self.dimensions}，进程数：{self.max_workers}")
        start_time = time.time()

        args = [
            (pair, self.model, self.dimensions, self.max_retries, self.retry_delay)
            for pair in image_pairs
        ]

        with Pool(processes=self.max_workers) as pool:
            results = pool.map(process_pair_with_dimensions, args)

        logger.info(f"并行处理完成，耗时 {time.time() - start_time:.2f} 秒")
        return results

    def aggregate_results(self, results: List[Dict]) -> str:
        if not results:
            logger.warning("没有结果需要汇总")
            return None

        jsonl_path = os.path.join(
            self.output_folder,
            f"analysis_results_{self.timestamp}.jsonl"
        )

        with open(jsonl_path, 'w', encoding='utf-8') as f:
            for result in results:
                json.dump(result, f, ensure_ascii=False)
                f.write('\n')

        logger.info(f"结果已保存到 {jsonl_path}")
        return jsonl_path

    def parse_results(self, results: List[Dict]) -> str:
        if not results:
            logger.warning("没有结果需要解析")
            return None

        logger.info("开始解析结果并生成Excel...")
        excel_data = []
        for result in results:
            base_data = {
                "图片1": os.path.basename(result["image1"]),
                "图片2": os.path.basename(result["image2"]),
                "状态": result["status"],
                "错误信息": result.get("error", ""),
                "处理时间": result["timestamp"],
                "使用模型": result.get("model_used", ""),
                "重试次数": max(res.get("retry_count", 0) for res in result.get("dimension_results", []))
            }
            combined_data = {**base_data, **result.get("analysis_dict", {})}
            excel_data.append(combined_data)

        # 保存Excel
        df = pd.DataFrame(excel_data)
        excel_path = os.path.join(
            self.output_folder,
            f"analysis_summary_{self.timestamp}.xlsx"
        )
        df.to_excel(excel_path, index=False)
        logger.info(f"Excel汇总表已保存到 {excel_path}")


        df = pd.read_excel(excel_path)
        target_cols = ["布局", "间距", "对齐", "元素嵌套","颜色", "字体/字号", "行高", "边框/圆角", "阴影","图标引用", "图片引用","组件覆盖度"]

        col_avg = df[target_cols].mean().round(4)  # round(4)保留4位小数

        print("指定列的平均值：")
        print(col_avg)
        col_avg_dict = col_avg.to_dict()
        dimension_groups = {
            "Layout 布局": ["布局", "间距", "对齐", "元素嵌套"],
            "Style 风格检查1": ["颜色", "字体/字号", "行高", "边框/圆角", "阴影"],
            "Style 风格检查2": ["图标引用", "图片引用"],
            "元素/组件召回": ["组件覆盖度"]
        }
        # 大维度权重（可根据实际需求调整）
        big_dim_weights = {
            "Layout 布局": 0.25,
            "Style 风格检查1": 0.2,
            "Style 风格检查2": 0.25,
            "元素/组件召回": 0.3
        }
        # 1. 计算每个大维度的平均得分（基于其包含的小维度平均值）
        big_dim_scores = {}  # 存储每个大维度的平均得分
        for big_dim, small_dims in dimension_groups.items():
            # 收集该大维度下所有有效小维度的平均值（排除None值）
            valid_small_scores = []
            for dim in small_dims:
                # 从col_avg_dict中获取小维度平均值，跳过不存在或为None的情况
                if dim in col_avg_dict and col_avg_dict[dim] is not None:
                    valid_small_scores.append(col_avg_dict[dim])

            # 计算大维度平均得分（若有有效数据则取平均值，否则为0或None）
            if valid_small_scores:
                big_dim_avg = sum(valid_small_scores) / len(valid_small_scores)
                big_dim_scores[big_dim] = round(big_dim_avg, 4)  # 保留4位小数
            else:
                big_dim_scores[big_dim] = 0  # 若没有有效数据，默认得0分（可根据需求调整）

        # 2. 根据权重计算每个大维度的加权得分，并累加得到最终总分
        total_score = 0.0
        big_dim_weighted_scores = {}  # 存储每个大维度的加权得分

        for big_dim, weight in big_dim_weights.items():
            # 获取大维度的平均得分（若不存在，默认0分）
            dim_score = big_dim_scores.get(big_dim, 0)
            # 计算加权得分（大维度得分 × 权重）
            weighted_score = round(dim_score * weight, 4)
            big_dim_weighted_scores[big_dim] = weighted_score
            # 累加至总分
            total_score += weighted_score

        # 保留总分4位小数
        total_score = round(total_score, 4)

        # 输出结果
        print("\n大维度平均得分：")
        for dim, score in big_dim_scores.items():
            print(f"{dim}：{score}")

        print("\n大维度加权得分：")
        for dim, w_score in big_dim_weighted_scores.items():
            print(f"{dim}（权重{big_dim_weights[dim]}）：{w_score}")

        print(f"\n最终总分：{total_score}")
        return excel_path

    def run_workflow(self) -> Tuple[Optional[str], Optional[str]]:
        start_time = time.time()
        logger.info(f"===== 开始图片对批量分析工作流（维度：{self.dimensions}，模型：{self.model}） =====")

        try:
            image_pairs = self.batch_upload()
            if not image_pairs:
                logger.info("没有找到图片对，工作流结束")
                return None, None

            results = self.parallel_scoring(image_pairs)
            jsonl_path = self.aggregate_results(results)
            excel_path = self.parse_results(results)

            total_time = time.time() - start_time
            logger.info(f"===== 工作流完成，总耗时 {total_time:.2f} 秒 =====")
            return jsonl_path, excel_path

        except Exception as e:
            logger.error(f"工作流执行失败: {str(e)}", exc_info=True)
            raise


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='按大维度拆分的图片对批量分析工作流')
    parser.add_argument('--image_folder', type=str, default='images', help='图片文件夹路径')
    parser.add_argument('--output_folder', type=str, default='analysis_results', help='结果输出文件夹路径')
    parser.add_argument('--max_workers', type=int, default=None, help='并行工作进程数')
    parser.add_argument('--model', type=str, default='doubao-seed-1-6-flash-250828', help='指定调用的模型名称')
    parser.add_argument('--max_retries', type=int, default=3, help='每个维度的最大重试次数')
    parser.add_argument('--retry_delay', type=int, default=2, help='重试间隔时间（秒）')
    parser.add_argument('--dimensions', type=str, nargs='+', default=None,
                        help=f'指定要处理的大维度，可选值：{list(DIMENSIONS.keys())}，默认处理所有维度')

    args = parser.parse_args()

    config = {
        'image_folder': args.image_folder,
        'output_folder': args.output_folder,
        'max_workers': args.max_workers,
        'model': args.model,
        'max_retries': args.max_retries,
        'retry_delay': args.retry_delay,
        'dimensions': args.dimensions if args.dimensions is not None else list(DIMENSIONS.keys())
    }

    workflow = ImagePairAnalysisWorkflow(config)
    workflow.run_workflow()

