#!/usr/bin/env python3
"""
关键字段信息抽取 - 数据预处理模块

该模块负责：
1. 处理图像数据
2. 标注数据格式转换
3. 生成Swift训练所需的数据格式
4. 数据质量检查和验证
"""

import os
import json
import yaml
from typing import Dict, List, Any, Tuple, Optional
from pathlib import Path
import pandas as pd
from PIL import Image
import base64
from io import BytesIO
import logging

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


class UniversalDocumentProcessor:
    """通用文档数据处理器"""
    
    def __init__(self, config_path: str = "config/config.yaml"):
        """初始化通用数据处理器
        
        Args:
            config_path: 配置文件路径
        """
        self.config = self._load_config(config_path)
        self.image_dir = Path(self.config['data']['image_dir'])
        
        # 向后兼容：支持字段
        if 'contract_fields' in self.config.get('data', {}):
            self.document_fields = self.config['data']['contract_fields']
        else:
            # 使用通用字段池中的字段
            common_fields = self.config['data'].get('common_fields_pool', {})
            self.document_fields = []
            for category, fields in common_fields.items():
                self.document_fields.extend(fields)
            if not self.document_fields:
                # 默认字段
                self.document_fields = ["名称", "编号", "日期", "金额", "地址"]
        
        logger.info(f"初始化通用文档处理器: 字段数={len(self.document_fields)}")
        
    def _load_config(self, config_path: str) -> Dict:
        """加载配置文件"""
        with open(config_path, 'r', encoding='utf-8') as f:
            return yaml.safe_load(f)
    
    def _get_image_path(self, image_path: str) -> str:
        """获取相对图像路径
        
        Args:
            image_path: 图像文件路径
            
        Returns:
            相对路径字符串，用于训练数据
        """
        try:
            # 转换为相对路径
            path = Path(image_path)
            if path.is_absolute():
                # 如果是绝对路径，转换为相对于项目根目录的路径
                try:
                    relative_path = path.relative_to(Path.cwd())
                    return str(relative_path)
                except ValueError:
                    # 如果无法转换为相对路径，返回文件名
                    return path.name
            else:
                # 如果已经是相对路径，直接返回
                return str(path)
        except Exception as e:
            logger.warning(f"路径转换失败 {image_path}: {e}，使用原路径")
            return str(image_path)
    
    def create_training_prompt(self, annotation: Dict[str, str]) -> str:
        """创建训练用的提示词
        
        Args:
            annotation: 文档标注信息
            
        Returns:
            格式化的提示词字符串
        """
        # 检查是否有文档类型信息
        doc_type = annotation.get('document_type', 'unknown')
        
        # 获取实际的字段名称（除去元数据字段）
        actual_fields = [key for key in annotation.keys() 
                        if key not in ['image_name', 'sample_id', 'document_type']]
        
        # 使用实际字段或默认字段
        fields_to_extract = actual_fields if actual_fields else self.document_fields
        
        prompt = """请仔细分析这份文档图像，提取以下关键字段信息。如果某个字段在图像中找不到，请标记为"未找到"。

需要提取的字段：
"""
        
        for field in fields_to_extract:
            prompt += f"- {field}\n"
        
        prompt += """
请按照以下JSON格式输出结果：
{
"""
        for i, field in enumerate(fields_to_extract):
            comma = "," if i < len(fields_to_extract) - 1 else ""
            prompt += f'  "{field}": "具体内容或未找到"{comma}\n'
        
        prompt += "}"
        
        return prompt
    
    def create_training_response(self, annotation: Dict[str, str]) -> str:
        """创建训练用的标准回答
        
        Args:
            annotation: 文档标注信息
            
        Returns:
            JSON格式的回答字符串
        """
        # 获取实际的字段名称（除去元数据字段）
        actual_fields = [key for key in annotation.keys() 
                        if key not in ['image_name', 'sample_id', 'document_type']]
        
        # 使用实际字段构建响应
        response = {}
        if actual_fields:
            # 对于有实际字段的文档，直接使用实际数据
            for field in actual_fields:
                response[field] = annotation.get(field, "未找到")
        else:
            # 对于没有明确字段的文档，使用默认字段
            for field in self.document_fields:
                response[field] = annotation.get(field, "未找到")
        
        return json.dumps(response, ensure_ascii=False, indent=2)
    
    def process_single_sample(self, image_path: str, annotation: Dict[str, str]) -> Dict[str, Any]:
        """处理单个训练样本
        
        Args:
            image_path: 图像路径
            annotation: 标注信息
            
        Returns:
            Swift格式的训练样本
        """
        # 使用图像路径而不是base64编码
        image_path_str = self._get_image_path(image_path)
        if not image_path_str:
            return None
        
        # 创建对话格式 - 确保content都是数组格式
        conversation = [
            {
                "role": "user",
                "content": [
                    {"type": "image", "image": image_path_str},
                    {"type": "text", "text": self.create_training_prompt(annotation)}
                ]
            },
            {
                "role": "assistant", 
                "content": [
                    {"type": "text", "text": self.create_training_response(annotation)}
                ]
            }
        ]
        
        return {
            "conversation": conversation,
            "image_path": str(image_path),
            "sample_id": Path(image_path).stem
        }
    
    def convert_annotations_to_swift_format(self, 
                                          annotations_file: str, 
                                          output_file: str) -> None:
        """将标注数据转换为Swift训练格式
        
        Args:
            annotations_file: 输入的标注文件路径（支持JSON/Excel格式）
            output_file: 输出的JSONL文件路径
        """
        logger.info(f"开始转换标注数据: {annotations_file}")
        
        # 读取标注数据
        if annotations_file.endswith('.xlsx') or annotations_file.endswith('.xls'):
            df = pd.read_excel(annotations_file)
            annotations = df.to_dict('records')
        elif annotations_file.endswith('.json'):
            with open(annotations_file, 'r', encoding='utf-8') as f:
                annotations = json.load(f)
        else:
            raise ValueError("不支持的标注文件格式，请使用JSON或Excel格式")
        
        # 处理每个样本
        processed_samples = []
        for i, annotation in enumerate(annotations):
            image_path = self.image_dir / annotation.get('image_name', f"sample_{i}.jpg")
            
            if not image_path.exists():
                logger.warning(f"图像文件不存在: {image_path}")
                continue
            
            sample = self.process_single_sample(str(image_path), annotation)
            if sample:
                processed_samples.append(sample)
        
        # 保存为JSONL格式
        with open(output_file, 'w', encoding='utf-8') as f:
            for sample in processed_samples:
                f.write(json.dumps(sample, ensure_ascii=False) + '\n')
        
        logger.info(f"成功转换 {len(processed_samples)} 个样本到 {output_file}")
    
    def validate_data(self, data_file: str) -> Tuple[bool, List[str]]:
        """验证数据质量
        
        Args:
            data_file: 数据文件路径
            
        Returns:
            (是否通过验证, 错误信息列表)
        """
        errors = []
        
        try:
            with open(data_file, 'r', encoding='utf-8') as f:
                for i, line in enumerate(f):
                    try:
                        data = json.loads(line.strip())
                        
                        # 检查必要字段
                        if 'conversation' not in data:
                            errors.append(f"第{i+1}行: 缺少conversation字段")
                            continue
                        
                        conversation = data['conversation']
                        if len(conversation) != 2:
                            errors.append(f"第{i+1}行: conversation应包含2轮对话")
                        
                        # 检查用户消息格式
                        user_msg = conversation[0]
                        if user_msg['role'] != 'user':
                            errors.append(f"第{i+1}行: 第一轮对话角色应为user")
                        
                        # 检查助手消息格式
                        assistant_msg = conversation[1]
                        if assistant_msg['role'] != 'assistant':
                            errors.append(f"第{i+1}行: 第二轮对话角色应为assistant")
                        
                        # 检查content格式 - 确保both user and assistant content are arrays
                        for role_idx, msg in enumerate([user_msg, assistant_msg]):
                            role_name = "user" if role_idx == 0 else "assistant"
                            content = msg.get('content', [])
                            
                            # 确俜content是数组
                            if not isinstance(content, list):
                                errors.append(f"第{i+1}行: {role_name}的content应为数组格式")
                                continue
                            
                            # 检查content项目的格式
                            for item_idx, item in enumerate(content):
                                if not isinstance(item, dict):
                                    errors.append(f"第{i+1}行: {role_name}.content[{item_idx}]应为对象")
                                    continue
                                
                                # 检查必要字段
                                if 'type' not in item:
                                    errors.append(f"第{i+1}行: {role_name}.content[{item_idx}]缺少type字段")
                                
                                item_type = item.get('type')
                                if item_type == 'image' and 'image' not in item:
                                    errors.append(f"第{i+1}行: {role_name}.content[{item_idx}]的image类型缺少image字段")
                                elif item_type == 'text' and 'text' not in item:
                                    errors.append(f"第{i+1}行: {role_name}.content[{item_idx}]的text类型缺少text字段")
                        
                        # 检查用户消息是否包含图像和文本
                        user_content = user_msg.get('content', [])
                        has_image = any(item.get('type') == 'image' for item in user_content if isinstance(item, dict))
                        has_text = any(item.get('type') == 'text' for item in user_content if isinstance(item, dict))
                        
                        if not has_image:
                            errors.append(f"第{i+1}行: 用户消息缺少图像内容")
                        if not has_text:
                            errors.append(f"第{i+1}行: 用户消息缺少文本内容")
                        
                        # 检查助手消息是否包含文本
                        assistant_content = assistant_msg.get('content', [])
                        has_assistant_text = any(item.get('type') == 'text' for item in assistant_content if isinstance(item, dict))
                        
                        if not has_assistant_text:
                            errors.append(f"第{i+1}行: 助手消息缺少文本内容")
                            
                    except json.JSONDecodeError:
                        errors.append(f"第{i+1}行: JSON格式错误")
                        
        except FileNotFoundError:
            errors.append(f"文件不存在: {data_file}")
        
        return len(errors) == 0, errors
    
    def split_dataset(self, input_file: str, train_ratio: float = 0.8):
        """分割数据集为训练集和验证集
        
        Args:
            input_file: 输入数据文件
            train_ratio: 训练集比例
        """
        with open(input_file, 'r', encoding='utf-8') as f:
            lines = f.readlines()
        
        # 随机打乱
        import random
        random.shuffle(lines)
        
        # 分割
        split_idx = int(len(lines) * train_ratio)
        train_lines = lines[:split_idx]
        eval_lines = lines[split_idx:]
        
        # 保存训练集
        train_file = input_file.replace('.jsonl', '_train.jsonl')
        with open(train_file, 'w', encoding='utf-8') as f:
            f.writelines(train_lines)
        
        # 保存验证集
        eval_file = input_file.replace('.jsonl', '_eval.jsonl')
        with open(eval_file, 'w', encoding='utf-8') as f:
            f.writelines(eval_lines)
        
        logger.info(f"数据集分割完成: 训练集{len(train_lines)}样本, 验证集{len(eval_lines)}样本")
        
        return train_file, eval_file
    
    def fix_conversation_format(self, input_file: str, output_file: str = None) -> str:
        """修复conversation中content格式不一致的问题
        
        Args:
            input_file: 输入文件路径
            output_file: 输出文件路径，如果为None则覆盖原文件
            
        Returns:
            修复后的文件路径
        """
        if output_file is None:
            output_file = input_file
        
        fixed_lines = []
        fixed_count = 0
        
        logger.info(f"开始修复数据文件: {input_file}")
        
        try:
            with open(input_file, 'r', encoding='utf-8') as f:
                for i, line in enumerate(f):
                    try:
                        data = json.loads(line.strip())
                        
                        # 检查是否需要修复
                        if 'conversation' in data:
                            conversation = data['conversation']
                            needs_fix = False
                            
                            for msg in conversation:
                                content = msg.get('content')
                                # 如果content是字符串，需要转换为数组格式
                                if isinstance(content, str):
                                    needs_fix = True
                                    msg['content'] = [
                                        {"type": "text", "text": content}
                                    ]
                                    logger.debug(f"第{i+1}行: 修复{msg['role']}的content格式")
                            
                            if needs_fix:
                                fixed_count += 1
                        
                        fixed_lines.append(json.dumps(data, ensure_ascii=False))
                        
                    except json.JSONDecodeError as e:
                        logger.error(f"第{i+1}行 JSON解析错误: {e}")
                        # 跴过错误行
                        continue
            
            # 写入修复后的数据
            with open(output_file, 'w', encoding='utf-8') as f:
                for line in fixed_lines:
                    f.write(line + '\n')
            
            logger.info(f"数据修复完成: 共修复 {fixed_count} 个样本")
            logger.info(f"修复后数据保存到: {output_file}")
            
            return output_file
            
        except FileNotFoundError:
            logger.error(f"文件不存在: {input_file}")
            return None
        except Exception as e:
            logger.error(f"修复数据时发生错误: {e}")
            return None


def main():
    """主函数 - 数据处理流程示例"""
    processor = ContractDataProcessor()
    
    # 示例：转换标注数据
    annotations_file = "data/annotations.json"
    output_file = "data/processed_data.jsonl"
    
    if os.path.exists(annotations_file):
        processor.convert_annotations_to_swift_format(annotations_file, output_file)
        
        # 验证数据质量
        is_valid, errors = processor.validate_data(output_file)
        if is_valid:
            logger.info("数据验证通过")
            # 分割数据集
            processor.split_dataset(output_file)
        else:
            logger.error("数据验证失败:")
            for error in errors:
                logger.error(f"  - {error}")
    
    # 修复现有的训练数据格式
    train_data_file = "data/train_data_train.jsonl"
    if os.path.exists(train_data_file):
        logger.info(f"检测到现有训练数据文件: {train_data_file}")
        
        # 修复格式问题
        fixed_file = processor.fix_conversation_format(train_data_file)
        
        if fixed_file:
            # 重新验证修复后的数据
            is_valid_after_fix, errors_after_fix = processor.validate_data(fixed_file)
            
            if is_valid_after_fix:
                logger.info("✅ 数据修复成功，验证通过")
            else:
                logger.warning("⚠️  数据修复后仍有问题:")
                for error in errors_after_fix[:5]:  # 只显示前5个错误
                    logger.warning(f"  - {error}")


# 向后兼容性别名
ContractDataProcessor = UniversalDocumentProcessor


if __name__ == "__main__":
    main()