# 查询分类器检索策略
"""分类用户查询类型"""
import datetime
import os
import json

import numpy as np
import torch
from markdown_it.rules_block import fence
# BertTokenizer: 用于将文本转换为BERT模型的输入格式,分词器
from transformers import BertTokenizer, BertForSequenceClassification
# 模型微调的核心类
# Trainer: 用于训练模型, 如调用train()方法
# TrainingArguments: 用于配置训练参数, 如学习率, 批次大小, 训练轮数等
from transformers import Trainer, TrainingArguments

from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix, recall_score, f1_score

from src.spark_edu_rag.utils import get_logger, get_project_root, config_ini


class QAClassifier:
    """封装BERT模型的查询分类的完整流程：
    1. 初始化模型和分词器
    2. 准备数据集
    3. 配置训练参数
    4. 训练模型
    5. 评估模型
    """
    def __init__(self,model_path: str = None):
        self.logger = get_logger(__name__)
        if model_path is None:
            model_path = os.path.join(get_project_root,
                                      config_ini.MODEL_PATH.BERT_QUERY_CLASSIFIER_MODEL_PATH)
        self.model_path = model_path
        self.bert_base_chinese_model_path = os.path.join(get_project_root,
                                                         config_ini.MODEL_PATH.BERT_BASE_CHINESE_MODEL_PATH)
        self.tokenizer = BertTokenizer.from_pretrained(self.bert_base_chinese_model_path)
        self.model = None
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.logger.info(f"当前设备环境: {self.device}")
        self.label_map = {"通用知识": 0, "专业咨询": 1}
        self._load_model()
        self.logger.info(f"模型保存路径： {self.model_path}")

    def _load_model(self):
        """加载指定目录的模型，若不存在则从预训练模型加载"""
        if os.path.exists(self.model_path):
            self.model = BertForSequenceClassification.from_pretrained(self.model_path)
            self.model.to(self.device)
            self.logger.info(f"从 {self.model_path} 加载模型")
        else:
            self.model = BertForSequenceClassification.from_pretrained(self.model_path, num_labels=len(self.label_map))
            self.model.to(self.device)
            self.logger.info(f"从预训练模型 {self.model_path} 加载模型")


    def my_save_model(self):
        """保存当前模型到指定目录"""
        try:
            # 确保目录存在
            os.makedirs(self.model_path, exist_ok=True)

            # 保存模型和分词器
            self.model.save_pretrained(self.model_path)
            self.tokenizer.save_pretrained(self.model_path)
            self.logger.info(f"当前模型已保存到 {self.model_path}")

        except Exception as e:
            self.logger.error(f"保存模型时出错: {str(e)}")
            # 如果是Windows系统特有错误，尝试使用其他保存方式
            if os.name == 'nt':  # Windows系统
                try:
                    # 使用不同的序列化方法
                    from transformers import WEIGHTS_NAME, CONFIG_NAME
                    import torch

                    # 确保导入了必要的常量
                    from transformers import WEIGHTS_NAME, CONFIG_NAME

                    # 使用绝对路径并确保文件可写
                    weights_path = os.path.abspath(os.path.join(self.model_path, WEIGHTS_NAME))
                    config_path = os.path.abspath(os.path.join(self.model_path, CONFIG_NAME))

                    # 尝试保存权重文件，添加文件释放保障
                    try:
                        # 显式关闭可能打开的文件
                        import gc
                        gc.collect()

                        # 使用二进制模式保存，提高兼容性
                        with open(weights_path, 'wb') as f:
                            torch.save(self.model.state_dict(), f)
                        self.logger.info(f"成功保存模型权重到 {weights_path}")
                    except Exception as weights_e:
                        self.logger.error(f"保存权重文件失败: {str(weights_e)}")
                        # 尝试使用不同的序列化方法
                        torch.save(self.model.state_dict(), weights_path, _use_new_zipfile_serialization=False)

                    # 尝试保存配置文件
                    try:
                        self.model.config.to_json_file(config_path)
                        self.logger.info(f"成功保存模型配置到 {config_path}")
                    except Exception as config_e:
                        self.logger.error(f"保存配置文件失败: {str(config_e)}")
                        # 备用配置保存方法
                        import json
                        with open(config_path, 'w', encoding='utf-8') as f:
                            json.dump(self.model.config.to_dict(), f, indent=2, ensure_ascii=False)

                    # 保存分词器
                    self.tokenizer.save_pretrained(self.model_path)
                    self.logger.info(f"使用备用方法保存模型到 {self.model_path}")
                except Exception as backup_e:
                    self.logger.error(f"备用保存方法也失败: {str(backup_e)}")
                    raise


    def my_preprocess_data(self, data: list, labels: list = None):
        """
        对输入数据进行预处理，包括分词和转换为模型输入格式
        :param data: 输入文本数据列表
        :param labels: 对应的标签列表，若为None则返回None
        :return: 预处理后的输入张量和标签张量
        """
        embeddings = self.tokenizer(data,padding=True,truncation=True,max_length=128,return_tensors="pt")
        return embeddings, labels

    def my_compute_metrics(self, eval_pred):
        """自定义评估指标计算函数"""
        logits, labels = eval_pred
        predictions = torch.argmax(torch.tensor(logits), dim=-1)
        # 计算准确率
        accuracy = (predictions == torch.tensor(labels)).float().mean().item()
        # 计算F1分数
        f1 = f1_score(torch.tensor(labels), predictions)
        # 计算召回率
        recall = recall_score(torch.tensor(labels), predictions)
        return {
            "accuracy": accuracy,
            "f1": f1,
            "recall": recall

        }


    def create_dataset(self, embeddings: dict, labels: list):
        """
        创建PyTorch数据集
        :param embeddings: 输入张量字典，包含input_ids, token_type_ids, attention_mask
        :param labels: 标签列表
        :return: 自定义的QADataset数据集
        """
        from torch.utils.data import Dataset
        class QADataset(Dataset):
            def __init__(self, embeddings, labels):
                super().__init__()
                self.embeddings = embeddings
                self.labels = labels

            def __len__(self):
                return len(self.labels)

            def __getitem__(self, idx):
                # embeddings:{'input_ids': tensor([[ 101, 100]]), 'token_type_ids': tensor([[0, 0]]), 'attention_mask': tensor([[1, 1]])}
                item = {key: val[idx] for key, val in self.embeddings.items()}
                item["labels"] = torch.tensor(self.labels[idx])
                return item
        return QADataset(embeddings, labels)

    def train_model(self, train_file_path: str = None):
        """
        流程：加载数据集、数据预处理、配置训练参数和训练模型
        用于BERT模型训练，区分查询类型为通用知识和专用知识
        """
        if train_file_path is None:
            train_file_path = os.path.join(get_project_root,
                                           config_ini.TRAIN_FILE.TRAIN_FILE_PATH)
        if not os.path.exists(train_file_path):
            self.logger.error(f"训练文件 {train_file_path} 不存在")
            raise FileNotFoundError(f"训练文件 {train_file_path} 不存在")
        with open(train_file_path, "r", encoding="utf-8") as f:
            data = [json.loads(line) for line in f]
        # print(data)
        x = [item["query"] for item in data]
        y = [self.label_map[item["label"]] for item in data]
        # 划分训练集合测试集
        x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
        # 对训练集和测试集进行预处理
        train_embedding, train_labels = self.my_preprocess_data(x_train, y_train)
        test_embedding, test_labels = self.my_preprocess_data(x_test, y_test)

        # 创建训练集和测试集的数据集
        train_dataset = self.create_dataset(train_embedding, train_labels)
        test_dataset = self.create_dataset(test_embedding, test_labels)

        # 配置训练参数，定义模型训练的超参数
        training_args = TrainingArguments(
            output_dir= "../../resources/my_models", # 模型检查点和日志保存目录
            num_train_epochs=3, # 训练轮数
            per_device_train_batch_size=16, # 每个设备上的训练批次大小
            per_device_eval_batch_size=16, # 每个设备上的评估批次大小
            warmup_steps=500, # 学习率预热步数，用于在训练开始时逐渐增加学习率，避免初始学习率过大导致模型不稳定
            weight_decay=0.01, # 权重衰减系数，用于正则化，防止过拟合
            logging_dir="../../resources/my_models/logs", # 日志保存目录
            logging_steps=10, # 日志记录步数，用于控制日志输出的频率
            eval_strategy="epoch", # 评估策略，每个epoch评估一次
            save_strategy="epoch", # 模型保存策略，每个epoch保存一次
            save_total_limit=1, # 保存的模型检查点数量限制，设置为1表示只保存最佳模型
            load_best_model_at_end=True, # 训练完成后加载最佳模型
            dataloader_pin_memory= self.device == "cuda", # 是否将数据加载到CUDA设备的内存中，加速数据传输
            fp16= self.device == "cuda", # 是否使用混合精度训练，仅在CUDA设备上可用
        )
        trainer = Trainer(
            model=self.model,
            args=training_args,
            train_dataset=train_dataset,
            eval_dataset=test_dataset,
            compute_metrics=self.my_compute_metrics
        )

        # # 开始训练模型，和记录训练日志
        # start_time = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
        # self.logger.info(f"{start_time}_开始模型训练, 训练参数: {training_args}")
        # trainer.train()
        # self.logger.info(f"{start_time}_模型训练完成")
        # # 评估模型在测试集上的性能
        # metrics = trainer.evaluate()
        # self.logger.info(f"{start_time}_模型在测试集上的性能指标: {metrics}")
        # # 保存最终模型
        # self.my_save_model()
        # self.logger.info(f"{start_time}_模型已保存到 {self.model_path}, 分词器已保存到 {self.model_path}")
        #
        # # 评估模型在测试集上的性能
        self.my_evaluate_model(x_test, y_test)

    def my_evaluate_model(self, texts: list, labels: list = None):
        """评估模型在测试集上的性能"""
        test_embedding = self.tokenizer(texts, truncation=True, padding="max_length", max_length=128, return_tensors="pt")
        dataset = self.create_dataset(test_embedding, labels)

        trainer = Trainer(model=self.model)
        predictions = trainer.predict(dataset)
        self.logger.info(f"模型在测试集上的预测结果: {predictions}")
        pred_labels = np.argmax(predictions.predictions, axis=-1)
        self.logger.info(f"模型在测试集上的预测类别: {pred_labels}")
        true_labels = np.array(labels)
        self.logger.info(f"测试集的真实类别: {true_labels}")
        self.logger.info(f"分类报告：{
            classification_report(true_labels, pred_labels, target_names=self.label_map.keys())
        }")

    def predict_category(self, query: str):
        """预测输入文本的查询类型"""
        if self.model is None:
            self.logger.error("模型未训练，加载")
            raise "通用知识"

        # 对输入文本进行预处理
        test_embedding = self.tokenizer(query, truncation=True, padding="max_length", max_length=128, return_tensors="pt")
        # 将编码转移到模型所在设备
        test_embedding = {key: value.to(self.device) for key, value in test_embedding.items()}
        # 开始预测，不计算梯度
        with torch.no_grad():
            outputs = self.model(**test_embedding)
            pred = torch.argmax(outputs.logits, dim=1).item()

        # return "专业咨询" if pred == 1 else "通用知识"
        reverse_label_map = {v: k for k, v in self.label_map.items()}
        return reverse_label_map.get(pred, "通用知识")  # 默认返回通用知识  


if __name__ == '__main__':
        qa_classifier = QAClassifier()
        # qa_classifier.train_model()

        # result = qa_classifier.predict_category("我的保险怎么理赔?") # 通用知识
        result = qa_classifier.predict_category("AI的课程大纲是什么?") # 专业咨询
        print(result)

