"""RNN时序模型训练（Hugging Face Trainer）"""
"""RNN时序模型训练（Hugging Face Trainer）"""
import torch
import torch.nn as nn
from transformers import AutoTokenizer, AutoModel, Trainer, TrainingArguments
from transformers import EarlyStoppingCallback, IntervalStrategy
from typing import Dict, Any, List, Tuple
import evaluate
import numpy as np
from src.utils.config_loader import config_loader
from src.utils.logger import get_logger
from src.model_development.sentiment_analysis.sentiment_dataset import SentimentDataset, sentiment_collate_fn

logger = get_logger(__name__)

class RNNModel(nn.Module):
    """基于预训练模型和RNN的情感分析模型"""
    
    def __init__(self,
                 model_name: str = config_loader['model']['rnn']['model_name'],
                 hidden_size: int = config_loader['model']['rnn']['hidden_size'],
                 num_layers: int = config_loader['model']['rnn']['num_layers'],
                 dropout: float = config_loader['model']['rnn']['dropout'],
                 num_classes: int = len(config_loader['model']['sentiment_labels'])):
        """
        初始化RNN模型
        
        Args:
            model_name: 预训练模型名称
            hidden_size: RNN隐藏层大小
            num_layers: RNN层数
            dropout: Dropout概率
            num_classes: 类别数量
        """
        super(RNNModel, self).__init__()
        self.bert = AutoModel.from_pretrained(model_name)
        self.rnn = nn.LSTM(
            input_size=self.bert.config.hidden_size,
            hidden_size=hidden_size,
            num_layers=num_layers,
            bidirectional=True,
            dropout=dropout if num_layers > 1 else 0,
            batch_first=True
        )
        self.dropout = nn.Dropout(dropout)
        self.fc = nn.Linear(hidden_size * 2, num_classes)  # 双向RNN，所以乘以2
        
    def forward(self, input_ids, attention_mask=None, token_type_ids=None, entity_mask=None):
        """前向传播"""
        # 获取BERT的输出
        outputs = self.bert(
            input_ids=input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids
        )
        
        # 获取最后一层的隐藏状态
        last_hidden_state = outputs.last_hidden_state  # [batch_size, seq_len, hidden_size]
        
        # 应用实体掩码（如果提供）
        if entity_mask is not None:
            # 将3D实体掩码转换为2D权重
            entity_weights = entity_mask.sum(dim=1)  # [batch_size, seq_len]
            entity_weights = torch.clamp(entity_weights, max=1.0)  # 归一化
            # 应用权重
            last_hidden_state = last_hidden_state * entity_weights.unsqueeze(-1)
        
        # RNN处理
        rnn_output, _ = self.rnn(last_hidden_state)
        
        # 获取最后一个时间步的输出
        last_output = rnn_output[:, -1, :]
        
        # 分类层
        logits = self.fc(self.dropout(last_output))
        
        return logits

class RNNModelTrainer:
    """RNN模型训练器"""
    
    def __init__(self, model_dir: str = config_loader['project_paths']['model_saves']):
        """初始化训练器"""
        self.model_dir = model_dir
        self.tokenizer = AutoTokenizer.from_pretrained(config_loader['model']['rnn']['model_name'])
        self.model = RNNModel()
        self.metric = evaluate.load('f1')
        
    def compute_metrics(self, eval_pred: Tuple[np.ndarray, np.ndarray]) -> Dict[str, float]:
        """计算评估指标"""
        logits, labels = eval_pred
        predictions = np.argmax(logits, axis=-1)
        return self.metric.compute(predictions=predictions, references=labels, average='macro')
    
    def train(self, train_data_path: str, val_data_path: str):
        """训练模型"""
        # 加载数据集
        train_dataset = SentimentDataset(
            data_path=train_data_path,
            tokenizer=self.tokenizer,
            max_length=config_loader['model']['rnn']['max_length']
        )
        
        val_dataset = SentimentDataset(
            data_path=val_data_path,
            tokenizer=self.tokenizer,
            max_length=config_loader['model']['rnn']['max_length']
        )
        
        # 设置训练参数
        training_args = TrainingArguments(
            output_dir=self.model_dir,
            num_train_epochs=config_loader['model']['rnn']['epochs'],
            per_device_train_batch_size=config_loader['model']['rnn']['batch_size'],
            per_device_eval_batch_size=config_loader['model']['rnn']['batch_size'],
            warmup_steps=config_loader['model']['rnn']['warmup_steps'],
            weight_decay=config_loader['model']['rnn']['weight_decay'],
            logging_dir=f"{self.model_dir}/logs",
            logging_strategy=IntervalStrategy.EPOCH,
            evaluation_strategy=IntervalStrategy.EPOCH,
            save_strategy=IntervalStrategy.EPOCH,
            save_total_limit=3,
            load_best_model_at_end=True,
            metric_for_best_model='f1',
            greater_is_better=True,
            fp16=torch.cuda.is_available(),
            gradient_accumulation_steps=config_loader['model']['rnn']['gradient_accumulation_steps']
        )
        
        # 创建Trainer
        trainer = Trainer(
            model=self.model,
            args=training_args,
            train_dataset=train_dataset,
            eval_dataset=val_dataset,
            data_collator=sentiment_collate_fn,
            compute_metrics=self.compute_metrics,
            callbacks=[EarlyStoppingCallback(early_stopping_patience=3)]
        )
        
        # 开始训练
        logger.info("开始训练RNN模型...")
        trainer.train()
        
        # 保存模型
        logger.info("训练完成，保存模型...")
        trainer.save_model(f"{self.model_dir}/rnn_model")
        self.tokenizer.save_pretrained(f"{self.model_dir}/rnn_model")
        
    def evaluate(self, test_data_path: str) -> Dict[str, float]:
        """评估模型"""
        # 加载测试数据集
        test_dataset = SentimentDataset(
            data_path=test_data_path,
            tokenizer=self.tokenizer,
            max_length=config_loader['model']['rnn']['max_length']
        )
        
        # 创建Trainer进行评估
        trainer = Trainer(
            model=self.model,
            data_collator=sentiment_collate_fn,
            compute_metrics=self.compute_metrics
        )
        
        # 评估
        logger.info("评估模型性能...")
        results = trainer.evaluate(test_dataset)
        
        logger.info(f"评估结果: {results}")
        return results
    
    def load_model(self, model_path: str):
        """加载已训练的模型"""
        logger.info(f"加载模型: {model_path}")
        self.model = RNNModel.from_pretrained(model_path)
        self.tokenizer = AutoTokenizer.from_pretrained(model_path)
        
    def predict(self, text: str, entities: List[Dict[str, Any]] = None) -> Dict[str, Any]:
        """预测文本情感"""
        self.model.eval()
        
        # 准备输入
        encoding = self.tokenizer(
            text,
            max_length=config_loader['model']['rnn']['max_length'],
            padding='max_length',
            truncation=True,
            return_tensors='pt'
        )
        
        # 如果没有提供实体信息，创建空掩码
        entity_mask = torch.zeros((1, len(config_loader['model']['entity_types']), config_loader['model']['rnn']['max_length']))
        
        # 如果提供了实体信息，生成实体掩码
        if entities:
            # 简化版的实体掩码生成，实际应用中可以复用SentimentDataset中的方法
            pass
        
        # 移动到设备
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        encoding = {k: v.to(device) for k, v in encoding.items()}
        entity_mask = entity_mask.to(device)
        
        # 预测
        with torch.no_grad():
            logits = self.model(**encoding, entity_mask=entity_mask)
            probabilities = torch.softmax(logits, dim=-1)
            predicted_class = torch.argmax(probabilities, dim=-1).item()
        
        # 返回结果
        sentiment_labels = config_loader['model']['sentiment_labels']
        return {
            'sentiment': sentiment_labels[predicted_class],
            'confidence': probabilities[0][predicted_class].item(),
            'probabilities': {label: probabilities[0][i].item() for i, label in enumerate(sentiment_labels)}
        }