import os
import re
import json
import time
import logging
import argparse
import pandas as pd
from multiprocessing import Pool, cpu_count
from datetime import datetime
from typing import List, Dict, Tuple, Optional
import base64
from volcenginesdkarkruntime import Ark

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("image_analysis.log"),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)


# 定义方法将指定路径图片转为Base64编码
def encode_image(image_path):
    try:
        with open(image_path, "rb") as image_file:
            return base64.b64encode(image_file.read()).decode('utf-8')
    except Exception as e:
        logger.error(f"图片编码失败 {image_path}: {str(e)}")
        raise


def analyze_two_images(
        image1_path: str,
        image2_path: str,
        prompt: str,
        model: str,
        max_retries: int = 3,  # 最大重试次数（默认3次）
        retry_delay: int = 2  # 重试间隔（默认2秒）
) -> Dict:
    """
    调用豆包API分析两张图片，仅当返回结果与目标字典结构完全匹配时才判定为满足预期，否则重试。
    目标字典结构：{"布局":数字,"间距/对齐":数字,"元素嵌套":数字,"颜色":数字,"字体/字号":数字,"行高":数字,"边框/圆角":数字,"阴影":数字,"图标引用":数字,"图片引用":数字,"组件覆盖度":数字}
    不满足预期场景：1.无法解析为JSON；2.键数量/名称不匹配；3.值不是数字类型
    """
    # --------------------------
    # 1. 定义目标字典结构（作为校验标准）
    # --------------------------
    TARGET_KEYS = {
        "布局", "间距/对齐", "元素嵌套", "颜色", "字体/字号",
        "行高", "边框/圆角", "阴影", "图标引用", "图片引用", "组件覆盖度"
    }
    TARGET_KEY_COUNT = len(TARGET_KEYS)

    # --------------------------
    # 2. 预处理：图片编码（仅执行1次，避免重试重复消耗）
    # --------------------------
    try:
        img1_base64 = encode_image(image1_path)
        img2_base64 = encode_image(image2_path)
    except Exception as e:
        # 图片编码失败属于本地不可恢复错误，直接返回
        logger.error(
            f"图片编码失败（无需重试）：{os.path.basename(image1_path)} & {os.path.basename(image2_path)}，错误：{str(e)}")
        return {
            "image1": image1_path,
            "image2": image2_path,
            "analysis": None,
            "error": f"图片编码失败：{str(e)}",
            "status": "failed",
            "timestamp": datetime.now().isoformat(),
            "model_used": model,
            "retry_count": 0,
            "is_expected": False,
            "failure_reason": "本地图片处理错误"
        }

    # --------------------------
    # 3. 构造固定请求体（重试时复用）
    # --------------------------
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{img1_base64}"}},
                {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{img2_base64}"}},
                {"type": "text",
                 "text": prompt + "\n【强制要求】返回结果必须是严格JSON格式，键必须包含且仅包含：布局、间距/对齐、元素嵌套、颜色、字体/字号、行高、边框/圆角、阴影、图标引用、图片引用、组件覆盖度，值必须是数字（0-5），无任何额外文字！"}
            ]
        }
    ]

    # 初始化API客户端（复用，减少连接开销）
    client = Ark(
        api_key=os.environ.get("ARK_API_KEY"),
        base_url="https://ark.cn-beijing.volces.com/api/v3",
    )

    # --------------------------
    # 4. 重试逻辑：仅处理结果结构不满足的场景
    # --------------------------
    for retry_idx in range(max_retries):
        current_try = retry_idx + 1
        try:
            logger.info(
                f"第 {current_try}/{max_retries} 次调用模型：{model}，分析图片对：{os.path.basename(image1_path)} & {os.path.basename(image2_path)}")

            # 调用API
            completion = client.chat.completions.create(model=model, messages=messages)
            raw_analysis = completion.choices[0].message.content

            # --------------------------
            # 5. 结果校验：分3步判定是否满足预期
            # --------------------------
            # 步骤1：解析JSON（失败则不满足）
            try:
                analysis_dict = json.loads(raw_analysis)
            except json.JSONDecodeError as e:
                raise ValueError(f"JSON解析失败：{str(e)[:50]}...")

            # 步骤2：校验键的数量和名称（必须与目标完全一致）
            analysis_keys = set(analysis_dict.keys())
            if len(analysis_keys) != TARGET_KEY_COUNT:
                raise ValueError(
                    f"键数量不匹配：预期{TARGET_KEY_COUNT}个，实际{len(analysis_keys)}个，实际键：{sorted(analysis_keys)}")
            if analysis_keys != TARGET_KEYS:
                missing_keys = TARGET_KEYS - analysis_keys
                extra_keys = analysis_keys - TARGET_KEYS
                error_msg = ""
                if missing_keys:
                    error_msg += f"缺少键：{sorted(missing_keys)}；"
                if extra_keys:
                    error_msg += f"多余键：{sorted(extra_keys)}"
                raise ValueError(error_msg.strip("；"))

            # 步骤3：校验所有值均为数字（失败则不满足）
            non_numeric_keys = []
            for key, value in analysis_dict.items():
                if not isinstance(value, (int, float)) or pd.api.types.is_numeric_dtype(type(value)) is False:
                    non_numeric_keys.append(f"{key}（值：{value}，类型：{type(value).__name__}）")
            if non_numeric_keys:
                raise ValueError(f"值类型不匹配（需数字）：{', '.join(non_numeric_keys)}")

            # --------------------------
            # 6. 所有校验通过：返回满足预期的结果
            # --------------------------
            logger.info(f"第 {current_try} 次调用成功，结果完全匹配目标字典结构")
            return {
                "image1": image1_path,
                "image2": image2_path,
                "analysis": raw_analysis,
                "analysis_dict": analysis_dict,  # 额外返回解析后的字典，方便后续使用
                "status": "success",
                "timestamp": datetime.now().isoformat(),
                "model_used": model,
                "retry_count": retry_idx,
                "is_expected": True,
                "failure_reason": None
            }

        # --------------------------
        # 7. 捕获校验失败/API异常，判定是否重试
        # --------------------------
        except ValueError as e:
            # 场景1：结果结构不满足（JSON解析失败/键不匹配/值非数字）→ 可重试
            failure_reason = f"结果结构不满足：{str(e)}"
            if retry_idx < max_retries - 1:
                logger.warning(f"第 {current_try} 次{failure_reason}，{retry_delay}秒后重试")
                time.sleep(retry_delay)
            else:
                logger.error(f"已耗尽 {max_retries} 次重试，{failure_reason}，原始返回：{raw_analysis[:100]}...")
                return {
                    "image1": image1_path,
                    "image2": image2_path,
                    "analysis": raw_analysis if 'raw_analysis' in locals() else None,
                    "error": failure_reason,
                    "status": "failed",
                    "timestamp": datetime.now().isoformat(),
                    "model_used": model,
                    "retry_count": max_retries,
                    "is_expected": False,
                    "failure_reason": failure_reason
                }

        except Exception as e:
            # 场景2：API调用异常（网络/密钥/模型不存在等）→ 不可重试，直接返回
            error_type = type(e).__name__
            failure_reason = f"API调用异常（{error_type}）：{str(e)}"
            logger.error(f"第 {current_try} 次{failure_reason}，无需重试")
            return {
                "image1": image1_path,
                "image2": image2_path,
                "analysis": None,
                "error": failure_reason,
                "status": "failed",
                "timestamp": datetime.now().isoformat(),
                "model_used": model,
                "retry_count": retry_idx,
                "is_expected": False,
                "failure_reason": failure_reason
            }


def find_image_pairs(folder_path: str) -> List[Tuple[str, str]]:
    """从文件夹中查找成对的图片，确保组内图片按顺序排列"""
    logger.info(f"在文件夹 {folder_path} 中查找图片对...")

    # 获取所有图片文件
    image_extensions = ('.png', '.jpg', '.jpeg', '.gif', '.bmp')
    images = [
        f for f in os.listdir(folder_path)
        if f.lower().endswith(image_extensions) and os.path.isfile(os.path.join(folder_path, f))
    ]

    # 正则表达式：匹配 "name_1.png"、"img_2.png" 等格式
    pattern = re.compile(r'^(.+?)[_-](\d+|[a-z])\.(png|jpg|jpeg|gif|bmp)$', re.IGNORECASE)

    # 分组图片对
    image_groups = {}
    for image in images:
        match = pattern.match(image)
        if match:
            base_name = match.group(1)
            suffix = match.group(2)
            if base_name not in image_groups:
                image_groups[base_name] = []
            image_groups[base_name].append((suffix, image))

    # 提取并排序有效图片对
    pairs = []
    for base_name, items in image_groups.items():
        items.sort(key=lambda x: (int(x[0]) if x[0].isdigit() else x[0].lower(), x[0]))
        if len(items) == 2:
            img1 = os.path.join(folder_path, items[0][1])
            img2 = os.path.join(folder_path, items[1][1])
            pairs.append((img1, img2))
            logger.debug(f"找到图片对: {items[0][1]} 和 {items[1][1]}")
        elif len(items) > 2:
            logger.warning(f"基础名称 {base_name} 有 {len(items)} 张图片，无法形成唯一图片对，已跳过")

    logger.info(f"共找到 {len(pairs)} 对有效图片")
    return pairs


def process_pair(args: Tuple[Tuple[str, str], str, str]) -> Dict:
    """处理单个图片对的包装函数，新增model参数"""
    (image1, image2), prompt, model = args
    logger.info(f"开始处理图片对: {os.path.basename(image1)} 和 {os.path.basename(image2)}，使用模型: {model}")
    return analyze_two_images(image1, image2, prompt, model)


class ImagePairAnalysisWorkflow:
    def __init__(self, config: Dict):
        """初始化图片对分析工作流，新增model配置"""
        self.config = config
        self.image_folder = config.get('image_folder', 'images')
        self.output_folder = config.get('output_folder', 'analysis_results')
        self.max_workers = config.get('max_workers', max(1, cpu_count() - 1))
        self.prompt_version = config.get('prompt_version', 'v1')
        self.model = config.get('model', 'doubao-seed-1-6-flash-250828')  # 新增模型名称配置

        # 创建输出目录
        os.makedirs(self.output_folder, exist_ok=True)

        # 时间戳用于结果文件命名
        self.timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")

        # 加载提示词
        try:
            from prompts import get_prompt
            self.prompt = get_prompt(version=self.prompt_version)
        except ImportError:
            logger.warning("未找到prompts模块，使用默认提示词")
            self.prompt = "请分析这两张图片的内容差异，包括场景、物体和色彩，并给出详细对比。"
        except Exception as e:
            logger.warning(f"加载提示词失败: {str(e)}，使用默认提示词")
            self.prompt = "请分析这两张图片的内容差异，包括场景、物体和色彩，并给出详细对比。"

    def batch_upload(self) -> List[Tuple[str, str]]:
        """批量上传并获取图片对"""
        return find_image_pairs(self.image_folder)

    def parallel_scoring(self, image_pairs: List[Tuple[str, str]]) -> List[Dict]:
        """并行处理图片对打分，传递model参数"""
        if not image_pairs:
            logger.warning("没有图片对需要处理")
            return []

        logger.info(f"开始并行处理 {len(image_pairs)} 对图片，使用 {self.max_workers} 个工作进程，模型: {self.model}")

        start_time = time.time()

        # 准备参数：新增model参数
        args = [(pair, self.prompt, self.model) for pair in image_pairs]

        # 使用进程池并行处理
        with Pool(processes=self.max_workers) as pool:
            results = pool.map(process_pair, args)

        end_time = time.time()
        logger.info(f"并行处理完成，耗时 {end_time - start_time:.2f} 秒")

        return results

    def aggregate_results(self, results: List[Dict]) -> str:
        """汇总结果到jsonl文件"""
        if not results:
            logger.warning("没有结果需要汇总")
            return None

        logger.info("开始汇总结果...")

        jsonl_path = os.path.join(
            self.output_folder,
            f"analysis_results_{self.timestamp}.jsonl"
        )

        # 写入jsonl文件（每行一个JSON对象）
        with open(jsonl_path, 'w', encoding='utf-8') as f:
            for result in results:
                json.dump(result, f, ensure_ascii=False)
                f.write('\n')

        logger.info(f"结果已保存到 {jsonl_path}")
        return jsonl_path

    def parse_results(self, results: List[Dict]) -> str:
        """解析结果并生成Excel表格"""
        if not results:
            logger.warning("没有结果需要解析")
            return None

        logger.info("开始解析结果并生成Excel...")

        # 准备Excel数据
        excel_data = []
        for result in results:
            # 提取基础字段（图片、状态、错误、时间、使用的模型）
            base_data = {
                "图片1": os.path.basename(result["image1"]),
                "图片2": os.path.basename(result["image2"]),
                "状态": result["status"],
                "错误信息": result.get("error", ""),
                "处理时间": result["timestamp"],
                "使用模型": result.get("model_used", "")  # 新增模型列
            }

            analysis_dict = {}
            if result["status"] == "success" and result["analysis"]:
                try:
                    analysis_dict = json.loads(result["analysis"])
                except json.JSONDecodeError as e:
                    logger.error(f"解析analysis失败: {str(e)}")
                    analysis_dict = {"analysis解析错误": str(e)}

            # 合并基础字段和analysis字段
            combined_data = {**base_data, **analysis_dict}
            excel_data.append(combined_data)

        # 创建DataFrame并保存为Excel
        df = pd.DataFrame(excel_data)
        excel_path = os.path.join(
            self.output_folder,
            f"analysis_summary_{self.timestamp}.xlsx"
        )

        # 保存为Excel
        df.to_excel(excel_path, index=False)

        logger.info(f"Excel汇总表已保存到 {excel_path}")

        dimension_groups = {
            "Layout 布局": ["布局", "间距", "对齐", "元素嵌套"],
            "Style 风格检查1": ["颜色", "字体/字号", "行高", "边框/圆角","阴影"],
            "Style 风格检查2":["图标引用", "图片引用"],
            "元素/组件召回": ["组件覆盖度"]
        }
        # 大维度权重（可根据实际需求调整）
        big_dim_weights = {
            "Layout 布局": 0.25,
            "Style 风格检查1": 0.2,
            "Style 风格检查2": 0.25,
            "元素/组件召回": 0.3
        }

        # 提取有效小维度列（排除基础字段）
        base_cols = ["图片1", "图片2", "状态", "错误信息", "处理时间", "使用模型", "analysis解析错误"]
        small_dims = [col for col in df.columns if col not in base_cols]

        # 计算小维度平均值
        small_dim_avg = {}
        for dim in small_dims:
            if pd.api.types.is_numeric_dtype(df[dim]):  # 仅计算数值型列
                small_dim_avg[dim] = round(df[dim].mean(), 4)
            else:
                small_dim_avg[dim] = None  # 非数值列标记为None

        # 计算大维度平均值（所属小维度平均值的平均）
        big_dim_avg = {}
        for big_dim, small_dims_list in dimension_groups.items():
            valid_avg = [
                small_dim_avg[dim]
                for dim in small_dims_list
                if dim in small_dim_avg and small_dim_avg[dim] is not None
            ]
            if valid_avg:
                big_dim_avg[big_dim] = round(sum(valid_avg) / len(valid_avg), 4)
            else:
                big_dim_avg[big_dim] = None

        # 计算最终weighted值（大维度平均值 × 权重）
        weighted_result = {}
        total_weighted = 0.0  # 加权总和
        for big_dim in dimension_groups:
            avg = big_dim_avg[big_dim]
            weight = big_dim_weights.get(big_dim, 0.0)
            if avg is not None:
                weighted = round(avg * weight, 4)
                weighted_result[big_dim] = weighted
                total_weighted += weighted

        # 组织输出JSON结构
        output_json = {
            "timestamp": datetime.now().isoformat(),
            "model_used": self.model,  # 记录使用的模型
            "small_dimension_averages": small_dim_avg,
            "big_dimension_weighted": weighted_result,
            "total_weighted": round(total_weighted, 4)
        }

        # 打印JSON结果
        logger.info("统计结果:")
        print(json.dumps(output_json, ensure_ascii=False, indent=2))

        # 保存到JSONL文件
        jsonl_path = os.path.join(
            self.output_folder,
            f"dimension_stats_{self.timestamp}.jsonl"
        )
        with open(jsonl_path, 'a', encoding='utf-8') as f:
            json.dump(output_json, f, ensure_ascii=False)
            f.write('\n')  # JSONL格式：每行一个JSON对象

        logger.info(f"统计结果已保存到 {jsonl_path}")

        return excel_path

    def run_workflow(self) -> Tuple[Optional[str], Optional[str]]:
        """运行完整的工作流"""
        start_time = time.time()
        logger.info(f"===== 开始图片对批量分析工作流（模型: {self.model}） =====")

        try:
            # 1. 批量上传（获取图片对）
            image_pairs = self.batch_upload()
            if not image_pairs:
                logger.info("没有找到图片对，工作流结束")
                return None, None

            # 2. 并行打分
            results = self.parallel_scoring(image_pairs)

            # 3. 汇总结果到jsonl
            jsonl_path = self.aggregate_results(results)

            # 4. 解析结果并生成Excel
            excel_path = self.parse_results(results)

            total_time = time.time() - start_time
            logger.info(f"===== 图片对批量分析工作流完成，总耗时 {total_time:.2f} 秒 =====")
            logger.info(f"结果文件: {jsonl_path}")
            logger.info(f"Excel汇总: {excel_path}")

            return jsonl_path, excel_path

        except Exception as e:
            logger.error(f"工作流执行失败: {str(e)}", exc_info=True)
            raise


if __name__ == "__main__":
    # 解析命令行参数（新增--model参数）
    parser = argparse.ArgumentParser(description='图片对批量分析工作流')
    parser.add_argument('--image_folder', type=str, default='images',
                        help='图片文件夹路径')
    parser.add_argument('--output_folder', type=str, default='analysis_results',
                        help='结果输出文件夹路径')
    parser.add_argument('--max_workers', type=int, default=None,
                        help='并行工作进程数，默认使用CPU核心数-1')
    parser.add_argument('--prompt_version', type=str, default='v1',
                        help='提示词版本')
    parser.add_argument('--model', type=str,
                        default='doubao-seed-1-6-flash-250828',  # 默认模型
                        help='指定调用的模型名称，例如 doubao-pro-4k, doubao-seed-1-6-flash-250828 等')

    args = parser.parse_args()

    # 配置参数（包含model）
    config = {
        'image_folder': args.image_folder,
        'output_folder': args.output_folder,
        'max_workers': args.max_workers,
        'prompt_version': args.prompt_version,
        'model': args.model  # 新增模型配置
    }

    # 运行工作流
    workflow = ImagePairAnalysisWorkflow(config)
    workflow.run_workflow()