import difflib
import os
import sys
import json
import jsonlines
import requests
import fitz  # PyMuPDF
from PIL import Image
import io
import base64
from tqdm import tqdm
from pdf2image import convert_from_path
from typing import Dict, List, Optional
import torch
from transformers import BertTokenizer, BertModel
from transformers import ViTFeatureExtractor, ViTModel
import numpy as np


class MultimodalFeatureExtractor:
    def __init__(self):
        # 初始化文本和视觉特征提取器
        self.text_tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
        self.text_model = BertModel.from_pretrained('bert-base-chinese')
        self.image_feature_extractor = ViTFeatureExtractor.from_pretrained('google/vit-base-patch16-224-in21k')
        self.image_model = ViTModel.from_pretrained('google/vit-base-patch16-224-in21k')

        # 冻结模型参数（可选）
        for param in self.text_model.parameters():
            param.requires_grad = False
        for param in self.image_model.parameters():
            param.requires_grad = False

    def extract_text_features(self, text: str) -> np.ndarray:
        """使用BERT提取文本特征"""
        inputs = self.text_tokenizer(text, return_tensors="pt", truncation=True, max_length=512, padding=True)
        with torch.no_grad():
            outputs = self.text_model(**inputs)
        return outputs.last_hidden_state.mean(dim=1).squeeze().numpy()

    def extract_image_features(self, image: Image.Image) -> np.ndarray:
        """使用ViT提取图像特征"""
        inputs = self.image_feature_extractor(images=image, return_tensors="pt")
        with torch.no_grad():
            outputs = self.image_model(**inputs)
        return outputs.last_hidden_state.mean(dim=1).squeeze().numpy()


class PDFProcessor:
    def __init__(self, poppler_path):
        self.poppler_path = poppler_path
        self.feature_extractor = MultimodalFeatureExtractor()

    def extract_text(self, pdf_path: str) -> Dict[int, str]:
        """提取文本内容"""
        text_dict = {}
        try:
            doc = fitz.open(pdf_path)
            for page_num in range(len(doc)):
                page = doc.load_page(page_num)
                text = page.get_text()
                text_dict[page_num + 1] = text
            return text_dict
        except Exception as e:
            print(f"文本提取失败（{pdf_path}）：{str(e)}")
            return {}

    def extract_images(self, pdf_path: str) -> Dict[int, List[Image.Image]]:
        """改进版的图像提取方法"""
        image_dict = {}
        try:
            doc = fitz.open(pdf_path)
            for page_num in range(len(doc)):
                page = doc.load_page(page_num)
                image_list = []
                for img in page.get_images():
                    try:
                        xref = img[0]
                        base_image = doc.extract_image(xref)

                        if not base_image.get("image"):
                            continue

                        image_data = base_image["image"]
                        pad_len = len(image_data) % 4
                        if pad_len:
                            image_data += "=" * (4 - pad_len)

                        image_bytes = base64.b64decode(image_data)
                        image = Image.open(io.BytesIO(image_bytes))
                        image_list.append(image)
                    except Exception as img_err:
                        continue

                if image_list:
                    image_dict[page_num + 1] = image_list
            return image_dict
        except Exception as e:
            print(f"图像提取失败（{pdf_path}）：{str(e)}")
            return {}

    def extract_page_features(self, pdf_path: str) -> Dict[int, Dict]:
        """提取每页的多模态特征"""
        multimodal_features = {}

        # 提取文本和图像
        text_content = self.extract_text(pdf_path)
        image_content = self.extract_images(pdf_path)

        # 处理每页内容
        for page_num in text_content.keys():
            page_features = {
                "text": text_content[page_num],
                "text_features": None,
                "image_features": []
            }

            # 提取文本特征
            if text_content[page_num]:
                page_features["text_features"] = self.feature_extractor.extract_text_features(text_content[page_num])

            # 提取图像特征
            if page_num in image_content:
                for image in image_content[page_num]:
                    try:
                        img_feature = self.feature_extractor.extract_image_features(image)
                        page_features["image_features"].append(img_feature)
                    except Exception as e:
                        print(f"图像特征提取失败（第{page_num}页）：{str(e)}")
                        continue

            multimodal_features[page_num] = page_features

        return multimodal_features


class GLM4Client:
    """GLM-4-Flash模型API客户端"""

    def __init__(self, api_key: str, base_url: str = "https://open.bigmodel.cn/api/paas/v4/"):
        self.base_url = base_url
        self.api_key = api_key
        self.headers = {
            "Authorization": f"Bearer {api_key}",
            "Content-Type": "application/json"
        }

    def chat_completion(self, messages: List[Dict], model: str = "glm-4-flash") -> Optional[str]:
        """
        调用GLM-4-Flash模型进行对话
        """
        url = f"{self.base_url}chat/completions"
        payload = {
            "model": model,
            "messages": messages
        }

        try:
            response = requests.post(url, headers=self.headers, json=payload)
            response.raise_for_status()
            result = response.json()
            return result["choices"][0]["message"]["content"]
        except Exception as e:
            print(f"Error calling GLM-4 API: {str(e)}")
            return None


class IndustrialDocQASystem:
    def __init__(self, api_key: str, poppler_path=None):
        self.pdf_processor = PDFProcessor(poppler_path)
        self.glm_client = GLM4Client(api_key)
        self.feature_extractor = MultimodalFeatureExtractor()

    def process_question(self, question_data: Dict, doc_folder: str = ".", max_text_length: int = 2000) -> Dict:
        try:
            doc_name = question_data["document"]
            doc_path = os.path.join(doc_folder, doc_name)

            # 提取多模态特征
            multimodal_features = self.pdf_processor.extract_page_features(doc_path)
            if not multimodal_features:
                return {"id": question_data["id"], "answer": "无法提取文档内容"}

            # 识别问题中可能涉及的页码范围
            import re
            page_refs = set()
            matches = re.findall(r"第(\d+)(?:[-~到至]?(\d+)?)页", question_data["question"])
            for match in matches:
                start = int(match[0])
                end = int(match[1]) if match[1] else start
                page_refs.update(range(start, end + 1))

            # 构建上下文
            context_parts = []
            image_descriptions = []

            # 处理文本和图像
            for p in sorted(multimodal_features.keys()):
                if page_refs and p not in page_refs:
                    continue

                page_data = multimodal_features[p]

                # 添加文本内容
                if page_data["text"]:
                    context_parts.append(f"第{p}页文本内容:\n{page_data['text']}")

                # 处理图像
                if page_data["image_features"]:
                    image_descriptions.append(f"第{p}页包含{len(page_data['image_features'])}张技术图纸")

            full_context = "\n".join(context_parts)
            if image_descriptions:
                full_context += "\n\n文档图像信息:\n" + "\n".join(image_descriptions)

            if len(full_context) > max_text_length:
                full_context = full_context[:max_text_length]

            # 检查问题是否涉及图像内容
            question = question_data["question"]
            if any(keyword in question for keyword in ["图片", "图像", "图示", "示意图", "结构图"]):
                full_context += "\n\n注意：问题涉及文档中的图像内容，请仔细分析技术图纸的描述。"

            # 构建最终 prompt
            user_prompt = f"""你是工业专利文档多模态问答专家。请根据以下文档内容准确回答选择题。

文档内容如下：
{full_context}

问题：
{question_data['question']}

选项：
{chr(10).join(question_data.get('options', []))}

请严格只回答一个选项编号：A、B、C 或 D，不要输出其他内容。
"""

            # 提交到模型
            answer = self.glm_client.chat_completion([
                {"role": "system",
                 "content": "你是一个工业技术文档专家，需要准确回答关于专利文档的问题，特别是结合文本和技术图纸的分析。"},
                {"role": "user", "content": user_prompt}
            ])

            return {
                "id": question_data["id"],
                "answer": self._postprocess_answer(answer, question_data)
            }

        except Exception as e:
            print(f"处理问题时出错：{str(e)}")
            return {"id": question_data["id"], "answer": "处理错误"}

    def _postprocess_answer(self, answer: str, question_data: Dict) -> str:
        if not answer:
            return "无法回答"

        answer = answer.strip().upper()

        # 直接返回 A/B/C/D
        if answer in ["A", "B", "C", "D"]:
            return answer

        # 匹配选项内容
        if "options" in question_data:
            options = question_data["options"]
            choices = ["A", "B", "C", "D"]
            for idx, option_text in enumerate(options):
                opt_body = option_text.split(". ", 1)[-1]
                if opt_body.strip() in answer:
                    return choices[idx]
                # 模糊匹配
                if difflib.get_close_matches(opt_body.strip(), [answer], n=1, cutoff=0.8):
                    return choices[idx]

        return "无法判断"

    # 保留其他原有方法...
    def evaluate(self, questions_file: str, output_file: str, doc_folder: str = "."):
        """评估整个问题集并生成结果文件"""
        with jsonlines.open(questions_file) as reader, jsonlines.open(output_file, mode='w') as writer:
            for question in tqdm(reader, desc="Processing questions"):
                result = self.process_question(question, doc_folder)
                writer.write(result)

    def process_test_set(self, test_questions_file: str, output_file: str, doc_folder: str = "."):
        """处理测试集问题"""
        if not os.path.exists(doc_folder):
            os.makedirs(doc_folder)

        self.evaluate(test_questions_file, output_file, doc_folder)
        print(f"测试集测评完成，结果已保存到 {output_file}")

    def prepare_training_data(self, questions_file: str, output_dir: str = "training_data", doc_folder: str = "."):
        """
        准备模型微调训练数据
        从样例集生成训练集和验证集
        """
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)

        # 读取所有问题
        with jsonlines.open(questions_file) as reader:
            questions = list(reader)

        # 划分训练集和验证集 (80%训练，20%验证)
        split_idx = int(0.8 * len(questions))
        train_questions = questions[:split_idx]
        val_questions = questions[split_idx:]

        # 生成训练数据
        self._generate_finetuning_data(train_questions, os.path.join(output_dir, "train.jsonl"), doc_folder)
        self._generate_finetuning_data(val_questions, os.path.join(output_dir, "val.jsonl"), doc_folder)

        print(f"训练数据已准备完成，保存在 {output_dir} 目录")

    def _generate_finetuning_data(self, questions: List[Dict], output_path: str, doc_folder: str):
        """生成微调训练数据格式"""
        with jsonlines.open(output_path, mode='w') as writer:
            for q in tqdm(questions, desc="Preparing training data"):
                doc_path = os.path.join(doc_folder, q["document"])
                multimodal_features = self.pdf_processor.extract_page_features(doc_path)

                if not multimodal_features:
                    continue

                # 构建上下文
                context_parts = []
                image_descriptions = []

                for p in sorted(multimodal_features.keys()):
                    page_data = multimodal_features[p]
                    if page_data["text"]:
                        context_parts.append(f"第{p}页:\n{page_data['text']}")
                    if page_data["image_features"]:
                        image_descriptions.append(f"第{p}页包含{len(page_data['image_features'])}张技术图纸")

                full_context = "\n".join(context_parts[:5])  # 限制前5页
                if image_descriptions:
                    full_context += "\n\n文档图像信息:\n" + "\n".join(image_descriptions[:3])

                # 构建训练样本
                sample = {
                    "id": q["id"],
                    "document": q["document"],
                    "instruction": "你是工业专利文档多模态问答专家，请根据文档内容和图像信息回答问题。",
                    "input": f"文档内容:\n{full_context}\n\n问题:\n{q['question']}\n\n选项:\n{chr(10).join(q.get('options', []))}",
                    "output": q.get("answer", "").strip().upper()
                }

                writer.write(sample)


def evaluate_accuracy(questions_file: str, answers_file: str, error_output_file: str = "errors.jsonl"):
    import difflib
    correct = 0
    total = 0
    wrong_cases = []

    with jsonlines.open(questions_file) as q_reader, jsonlines.open(answers_file) as a_reader:
        for q, a in zip(q_reader, a_reader):
            total += 1
            qid = q.get("id", "?")
            gold = q.get("answer", "").strip().upper()
            pred = a.get("answer", "").strip().upper()

            if "options" in q and gold in ["A", "B", "C", "D"]:
                if pred == gold:
                    correct += 1
                else:
                    wrong_case = {
                        "id": qid,
                        "question": q.get("question", ""),
                        "options": q.get("options", []),
                        "gold": gold,
                        "pred": pred,
                        "document": q.get("document", "")
                    }
                    wrong_cases.append(wrong_case)
            else:
                print(f"跳过无标准答案的问题 ID: {qid}")

    accuracy = correct / total if total else 0
    print(f"\n📊 总问题数: {total}")
    print(f"✅ 正确答案数: {correct}")
    print(f"❌ 错误答案数: {len(wrong_cases)}")
    print(f"🎯 准确率: {accuracy:.2%}")

    # 打印前若干个错误样本
    print("\n🧪 示例错误题目（前5个）:")
    for i, item in enumerate(wrong_cases[:5]):
        print(f"\n🆔 题目ID: {item['id']}")
        print(f"📄 文档: {item['document']}")
        print(f"❓ 问题: {item['question']}")
        for opt in item['options']:
            print(f"    {opt}")
        print(f"✅ 正确答案: {item['gold']}")
        print(f"❌ 模型预测: {item['pred']}")

    # 保存错误样本到文件
    if wrong_cases:
        with jsonlines.open(error_output_file, mode='w') as writer:
            for item in wrong_cases:
                writer.write(item)
        print(f"\n📝 错误样本已保存到: {error_output_file}")


if __name__ == "__main__":
    # 配置参数
    API_KEY = "75018c382f7246b5a50241ce2f34132a.pD0X3Hz9BWGfs8Q0"  # 替换为你的实际API密钥
    POPPLER_PATH = r"D:\360Downloads\Release-24.08.0-0\poppler-24.08.0\Library\bin"  # 替换为你的实际路径或设为None
    QUESTIONS_FILE = "questions.jsonl"  # 样例集(用于训练)
    TEST_QUESTIONS_FILE = "test_questions.jsonl"  # 测试集
    OUTPUT_FILE = "answers.jsonl"
    TEST_OUTPUT_FILE = "test_answers.jsonl"
    DOC_FOLDER = "documents"  # PDF文档存储目录
    TRAINING_DATA_DIR = "training_data"

    try:
        qa_system = IndustrialDocQASystem(API_KEY, POPPLER_PATH)

        # 1. 处理样例集(可用于训练)
        print("处理样例集...")
        qa_system.evaluate(QUESTIONS_FILE, OUTPUT_FILE, DOC_FOLDER)
        evaluate_accuracy(QUESTIONS_FILE, OUTPUT_FILE)

        # 2. 准备训练数据
        print("\n准备训练数据...")
        qa_system.prepare_training_data(QUESTIONS_FILE, TRAINING_DATA_DIR, DOC_FOLDER)

        # 3. 处理测试集
        print("\n处理测试集...")
        qa_system.process_test_set(TEST_QUESTIONS_FILE, TEST_OUTPUT_FILE, DOC_FOLDER)
        evaluate_accuracy(TEST_QUESTIONS_FILE, TEST_OUTPUT_FILE)

    except KeyboardInterrupt:
        print("\n程序被用户中断")
        sys.exit(0)