"""
模型训练器模块
负责BERT模型的训练、评估和保存
"""

import torch
import numpy as np
from torch.utils.data import DataLoader, Dataset
from transformers import (
    Trainer,
    TrainingArguments,
    DataCollatorWithPadding,
    EarlyStoppingCallback
)
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
from typing import Dict, List, Tuple, Optional, Any
import logging
from pathlib import Path
import json
import time

logger = logging.getLogger(__name__)


def convert_to_json_safe(obj: Any) -> Any:
    """
    将对象转换为JSON安全的类型
    
    Args:
        obj: 要转换的对象
        
    Returns:
        JSON安全的对象
    """
    if isinstance(obj, dict):
        return {str(k): convert_to_json_safe(v) for k, v in obj.items()}
    elif isinstance(obj, (list, tuple)):
        return [convert_to_json_safe(item) for item in obj]
    elif isinstance(obj, np.ndarray):
        return obj.tolist()
    elif isinstance(obj, (np.integer, np.int64, np.int32, np.int16, np.int8)):
        return int(obj)
    elif isinstance(obj, (np.floating, np.float64, np.float32, np.float16)):
        return float(obj)
    elif hasattr(obj, 'item'):  # 其他numpy标量类型
        return obj.item()
    elif hasattr(obj, 'tolist'):  # 其他numpy数组类型
        return obj.tolist()
    else:
        return obj


class IntentDataset(Dataset):
    """意图识别数据集"""
    
    def __init__(self, encodings, labels):
        """
        初始化数据集
        
        Args:
            encodings: tokenized数据
            labels: 标签
        """
        self.encodings = encodings
        self.labels = labels
    
    def __getitem__(self, idx):
        """获取单个样本"""
        item = {}
        for key, val in self.encodings.items():
            # 检查是否已经是tensor，如果是则clone，否则转换为tensor
            if isinstance(val[idx], torch.Tensor):
                item[key] = val[idx].clone().detach()
            else:
                item[key] = torch.tensor(val[idx])
        
        # 处理标签
        if isinstance(self.labels[idx], torch.Tensor):
            item['labels'] = self.labels[idx].clone().detach().long()
        else:
            item['labels'] = torch.tensor(self.labels[idx], dtype=torch.long)
        
        return item
    
    def __len__(self):
        """获取数据集大小"""
        return len(self.labels)


class IntentTrainer:
    """意图识别训练器"""
    
    def __init__(self, 
                 model,
                 tokenizer,
                 config,
                 label_mappings: Dict):
        """
        初始化训练器
        
        Args:
            model: BERT模型
            tokenizer: 分词器
            config: 训练配置
            label_mappings: 标签映射字典
        """
        self.model = model
        self.tokenizer = tokenizer
        self.config = config
        self.label_mappings = label_mappings
        
        # 创建数据整理器
        self.data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
        
        # 初始化训练器为None，将在setup_trainer中设置
        self.trainer = None
        
        # 训练历史
        self.training_history = {
            'train_loss': [],
            'eval_loss': [],
            'eval_accuracy': [],
            'learning_rates': []
        }
    
    def compute_metrics(self, eval_pred):
        """
        计算评估指标
        
        Args:
            eval_pred: 预测结果
            
        Returns:
            Dict: 评估指标
        """
        predictions, labels = eval_pred
        predictions = np.argmax(predictions, axis=1)
        
        # 计算准确率
        accuracy = accuracy_score(labels, predictions)
        
        # 计算精确率、召回率、F1分数
        precision, recall, f1, _ = precision_recall_fscore_support(
            labels, predictions, average='weighted', zero_division=0
        )
        
        return {
            'accuracy': accuracy,
            'precision': precision,
            'recall': recall,
            'f1': f1
        }
    
    def setup_trainer(self, train_dataset, eval_dataset):
        """
        设置Hugging Face Trainer
        
        Args:
            train_dataset: 训练数据集
            eval_dataset: 验证数据集
        """
        # 设置训练参数
        training_args = TrainingArguments(
            output_dir=self.config.output_dir,
            learning_rate=self.config.learning_rate,
            per_device_train_batch_size=self.config.batch_size,
            per_device_eval_batch_size=self.config.batch_size,
            num_train_epochs=self.config.num_epochs,
            weight_decay=self.config.weight_decay,
            warmup_steps=self.config.warmup_steps,
            logging_steps=self.config.logging_steps,
            eval_steps=self.config.eval_steps,
            save_steps=self.config.save_steps,
            save_total_limit=self.config.save_total_limit,
            eval_strategy="steps",  # 修复：evaluation_strategy -> eval_strategy
            save_strategy="steps",
            load_best_model_at_end=self.config.load_best_model_at_end,
            metric_for_best_model=self.config.metric_for_best_model,
            greater_is_better=True,
            seed=self.config.seed,
            fp16=self.config.fp16,
            dataloader_pin_memory=False,
            remove_unused_columns=False,
            report_to=None,  # 不使用wandb等
        )
        
        # 创建训练器
        self.trainer = Trainer(
            model=self.model,
            args=training_args,
            train_dataset=train_dataset,
            eval_dataset=eval_dataset,
            processing_class=self.tokenizer,
            data_collator=self.data_collator,
            compute_metrics=self.compute_metrics,
            callbacks=[EarlyStoppingCallback(early_stopping_patience=3)]
        )
        
        logger.info("Trainer设置完成")
    
    def train(self, train_encodings, train_labels, eval_encodings, eval_labels):
        """
        训练模型
        
        Args:
            train_encodings: 训练数据编码
            train_labels: 训练标签
            eval_encodings: 验证数据编码
            eval_labels: 验证标签
            
        Returns:
            训练结果
        """
        logger.info("开始模型训练...")
        
        # 创建数据集
        train_dataset = IntentDataset(train_encodings, train_labels)
        eval_dataset = IntentDataset(eval_encodings, eval_labels)
        
        logger.info(f"训练集大小: {len(train_dataset)}")
        logger.info(f"验证集大小: {len(eval_dataset)}")
        
        # 设置训练器
        self.setup_trainer(train_dataset, eval_dataset)
        
        # 开始训练
        start_time = time.time()
        
        try:
            train_result = self.trainer.train()
            
            # 记录训练时间
            training_time = time.time() - start_time
            logger.info(f"训练完成，用时: {training_time:.2f}秒")
            
            # 保存训练历史
            self._save_training_history()
            
            return train_result
            
        except Exception as e:
            logger.error(f"训练过程中发生错误: {e}")
            raise
    
    def evaluate(self, eval_encodings, eval_labels):
        """
        评估模型
        
        Args:
            eval_encodings: 评估数据编码
            eval_labels: 评估标签
            
        Returns:
            评估结果
        """
        logger.info("开始模型评估...")
        
        # 创建数据集
        eval_dataset = IntentDataset(eval_encodings, eval_labels)
        
        # 评估
        eval_result = self.trainer.evaluate(eval_dataset)
        
        logger.info(f"评估完成，准确率: {eval_result.get('eval_accuracy', 0):.4f}")
        
        return eval_result
    
    def predict(self, test_encodings):
        """
        预测
        
        Args:
            test_encodings: 测试数据编码
            
        Returns:
            预测结果
        """
        logger.info("开始预测...")
        
        # 创建数据集（标签设为0，实际不使用）
        test_labels = [0] * len(test_encodings['input_ids'])
        test_dataset = IntentDataset(test_encodings, test_labels)
        
        # 预测
        predictions = self.trainer.predict(test_dataset)
        
        return predictions
    
    def save_model(self, save_path: str):
        """
        保存模型和相关文件
        
        Args:
            save_path: 保存路径
        """
        logger.info(f"保存模型到: {save_path}")
        
        # 创建保存目录
        save_dir = Path(save_path)
        save_dir.mkdir(parents=True, exist_ok=True)
        
        # 保存模型和tokenizer - 这会自动保存config.json
        self.trainer.save_model(save_path)
        self.tokenizer.save_pretrained(save_path)
        
        # 手动保存模型配置，确保config.json存在
        try:
            self.model.config.save_pretrained(save_path)
            logger.info("模型配置已保存")
        except Exception as e:
            logger.warning(f"保存模型配置时出现警告: {e}")
            # 手动创建config.json文件
            config_dict = {
                "model_type": getattr(self.model.config, 'model_type', 'bert'),
                "num_labels": getattr(self.model.config, 'num_labels', len(self.label_mappings.get('intent_to_label', {}))),
                "id2label": {str(k): v for k, v in self.label_mappings.get('label_to_intent', {}).items()},
                "label2id": self.label_mappings.get('intent_to_label', {}),
                "architectures": ["BertForSequenceClassification"],
                "hidden_size": getattr(self.model.config, 'hidden_size', 768),
                "vocab_size": getattr(self.model.config, 'vocab_size', 21128),
                "max_position_embeddings": getattr(self.model.config, 'max_position_embeddings', 512),
                "type_vocab_size": getattr(self.model.config, 'type_vocab_size', 2),
                "hidden_dropout_prob": getattr(self.model.config, 'hidden_dropout_prob', 0.1),
                "attention_probs_dropout_prob": getattr(self.model.config, 'attention_probs_dropout_prob', 0.1),
            }
            
            config_path = save_dir / "config.json"
            with open(config_path, 'w', encoding='utf-8') as f:
                json.dump(config_dict, f, ensure_ascii=False, indent=2)
            logger.info("手动创建了config.json文件")
        
        # 保存标签映射
        mappings_path = save_dir / "label_mappings.json"
        with open(mappings_path, 'w', encoding='utf-8') as f:
            json_safe_mappings = convert_to_json_safe(self.label_mappings)
            json.dump(json_safe_mappings, f, ensure_ascii=False, indent=2)
        
        # 保存配置
        config_path = save_dir / "training_config.json"
        with open(config_path, 'w', encoding='utf-8') as f:
            # 将配置对象转换为字典，然后保存
            from dataclasses import asdict
            config_dict = asdict(self.config)
            json_safe_config = convert_to_json_safe(config_dict)
            json.dump(json_safe_config, f, ensure_ascii=False, indent=2)
        
        # 保存训练历史
        history_path = save_dir / "training_history.json"
        with open(history_path, 'w', encoding='utf-8') as f:
            json_safe_history = convert_to_json_safe(self.training_history)
            json.dump(json_safe_history, f, ensure_ascii=False, indent=2)
        
        logger.info("模型保存完成")
        
        # 验证重要文件是否存在
        required_files = ["config.json", "model.safetensors", "tokenizer.json", "label_mappings.json"]
        missing_files = []
        for file_name in required_files:
            if not (save_dir / file_name).exists():
                missing_files.append(file_name)
        
        if missing_files:
            logger.warning(f"缺少以下文件: {missing_files}")
        else:
            logger.info("所有必需的模型文件都已保存")
    
    def _save_training_history(self):
        """保存训练历史"""
        if self.trainer.state.log_history:
            for log in self.trainer.state.log_history:
                if 'train_loss' in log:
                    self.training_history['train_loss'].append(log['train_loss'])
                if 'eval_loss' in log:
                    self.training_history['eval_loss'].append(log['eval_loss'])
                if 'eval_accuracy' in log:
                    self.training_history['eval_accuracy'].append(log['eval_accuracy'])
                if 'learning_rate' in log:
                    self.training_history['learning_rates'].append(log['learning_rate'])
    
    def get_training_summary(self) -> Dict:
        """
        获取训练摘要
        
        Returns:
            训练摘要信息
        """
        if not self.trainer or not self.trainer.state:
            return {}
        
        summary = {
            'best_model_checkpoint': self.trainer.state.best_model_checkpoint,
            'best_metric': self.trainer.state.best_metric,
            'global_step': self.trainer.state.global_step,
            'num_train_epochs': self.trainer.state.num_train_epochs,
            'total_training_time': getattr(self.trainer.state, 'training_time', 0),
        }
        
        # 添加最终评估指标
        if self.training_history['eval_accuracy']:
            summary['final_accuracy'] = self.training_history['eval_accuracy'][-1]
            summary['best_accuracy'] = max(self.training_history['eval_accuracy'])
        
        return summary


def create_trainer(model, tokenizer, config, label_mappings):
    """
    创建训练器实例
    
    Args:
        model: BERT模型
        tokenizer: 分词器
        config: 配置
        label_mappings: 标签映射
        
    Returns:
        训练器实例
    """
    return IntentTrainer(
        model=model,
        tokenizer=tokenizer,
        config=config,
        label_mappings=label_mappings
    )


class TrainingPipeline:
    """训练流水线"""
    
    def __init__(self, config):
        """
        初始化训练流水线
        
        Args:
            config: 训练配置
        """
        self.config = config
    
    def run_training(self, 
                    model, 
                    tokenizer,
                    train_data,
                    eval_data,
                    test_data=None,
                    label_mappings=None):
        """
        运行完整的训练流水线
        
        Args:
            model: BERT模型
            tokenizer: 分词器
            train_data: 训练数据 (encodings, labels)
            eval_data: 验证数据 (encodings, labels)
            test_data: 测试数据 (encodings, labels)，可选
            label_mappings: 标签映射
            
        Returns:
            训练结果和评估结果
        """
        logger.info("开始训练流水线...")
        
        # 解包数据
        train_encodings, train_labels = train_data
        eval_encodings, eval_labels = eval_data
        
        # 创建训练器
        trainer = create_trainer(model, tokenizer, self.config, label_mappings)
        
        # 训练模型
        train_result = trainer.train(
            train_encodings, train_labels,
            eval_encodings, eval_labels
        )
        
        # 评估模型
        eval_result = trainer.evaluate(eval_encodings, eval_labels)
        
        # 如果有测试数据，进行测试
        test_result = None
        if test_data is not None:
            test_encodings, test_labels = test_data
            test_result = trainer.evaluate(test_encodings, test_labels)
        
        # 保存模型
        final_model_path = f"{self.config.output_dir}/final_model"
        trainer.save_model(final_model_path)
        
        # 获取训练摘要
        training_summary = trainer.get_training_summary()
        
        results = {
            'train_result': train_result,
            'eval_result': eval_result,
            'test_result': test_result,
            'training_summary': training_summary,
            'model_path': final_model_path
        }
        
        logger.info("训练流水线完成")
        return results


if __name__ == "__main__":
    # 测试训练器
    from bert_model import ModelConfig
    
    config = ModelConfig(
        model_name='bert-base-chinese',
        num_labels=8,
        batch_size=8,
        num_epochs=1,
        output_dir='./test_output'
    )
    
    print("训练配置:")
    for key, value in config.to_dict().items():
        print(f"  {key}: {value}")
    
    print("\n训练器模块测试完成!") 