"""
基于DeepSeek-7B的情感分析模型
利用大语言模型的强大语义理解能力进行实体级情感分析
"""
import os
import torch
import numpy as np
from typing import Dict, List, Any, Optional

from torch.optim import AdamW
from transformers import AutoTokenizer, AutoModelForCausalLM, get_linear_schedule_with_warmup
from torch.utils.data import DataLoader, Dataset
from sklearn.metrics import precision_recall_fscore_support
import matplotlib.pyplot as plt

from fin_senti_entity_platform.model_development.incremental_pretrain.base_sentiment_analyzer import BaseSentimentAnalyzer, \
    SentimentAnalyzerFactory
from fin_senti_entity_platform.utils import SENTIMENT_LABELS
from fin_senti_entity_platform.utils.logger import Logger
from fin_senti_entity_platform.utils.config_loader import ConfigLoader

INVERSE_SENTIMENT_LABELS = {v: k for k, v in SENTIMENT_LABELS.items()}

class DeepSeekSentimentAnalyzer(BaseSentimentAnalyzer):
    """基于DeepSeek-7B的情感分析模型实现类"""
    
    def __init__(self, config: Dict[str, Any]):
        """
        初始化DeepSeek-7B情感分析模型
        
        Args:
            config: 模型配置参数
        """
        super().__init__(config)
        self.logger = Logger().get_logger(__name__)
        
        # 设置设备
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.logger.info(f"使用设备: {self.device}")
        
        # 初始化配置
        self.model_name = config.get('model_name', 'deepseek-ai/deepseek-llm-7b-base')
        self.max_seq_length = config.get('max_seq_length', 512)
        self.batch_size = config.get('batch_size', 8)
        self.learning_rate = config.get('learning_rate', 5e-6)
        self.num_epochs = config.get('num_epochs', 2)
        self.warmup_ratio = config.get('warmup_ratio', 0.1)
        self.weight_decay = config.get('weight_decay', 0.01)
        
        # 初始化模型和分词器
        self.logger.info(f"正在加载模型: {self.model_name}")
        self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
        self.model = AutoModelForCausalLM.from_pretrained(
            self.model_name,
            torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
            device_map="auto" if torch.cuda.is_available() else None
        )
        
        # 添加分类头
        self.num_labels = len(SENTIMENT_LABELS)
        self.model.classifier = torch.nn.Linear(
            self.model.config.hidden_size, 
            self.num_labels
        ).to(self.device)
        
        # 设置pad_token_id（如果不存在）
        if self.tokenizer.pad_token_id is None:
            self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
            
    def load_model(self, model_path: str) -> bool:
        """
        加载预训练模型
        
        Args:
            model_path: 模型路径
        
        Returns:
            bool: 加载是否成功
        """
        try:
            if not os.path.exists(model_path):
                self.logger.error(f"模型路径不存在: {model_path}")
                return False
                
            # 加载模型权重
            self.model = AutoModelForCausalLM.from_pretrained(
                model_path,
                torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
                device_map="auto" if torch.cuda.is_available() else None
            )
            
            # 加载分词器
            self.tokenizer = AutoTokenizer.from_pretrained(model_path)
            
            self.logger.info(f"成功加载模型: {model_path}")
            return True
            
        except Exception as e:
            self.logger.error(f"加载模型失败: {str(e)}")
            return False
    
    def save_model(self, model_path: str) -> bool:
        """
        保存模型
        
        Args:
            model_path: 模型保存路径
        
        Returns:
            bool: 保存是否成功
        """
        try:
            # 确保保存目录存在
            os.makedirs(model_path, exist_ok=True)
            
            # 保存模型
            self.model.save_pretrained(model_path)
            
            # 保存分词器
            self.tokenizer.save_pretrained(model_path)
            
            # 保存配置
            with open(os.path.join(model_path, 'config.json'), 'w', encoding='utf-8') as f:
                import json
                json.dump(self.config, f, ensure_ascii=False, indent=2)
                
            self.logger.info(f"成功保存模型到: {model_path}")
            return True
            
        except Exception as e:
            self.logger.error(f"保存模型失败: {str(e)}")
            return False
    
    def train(self, train_data: List[Dict[str, Any]], val_data: Optional[List[Dict[str, Any]]] = None) -> Dict[str, float]:
        """
        训练模型
        
        Args:
            train_data: 训练数据
            val_data: 验证数据
        
        Returns:
            Dict[str, float]: 训练指标
        """
        try:
            # 转换数据为特征
            self.logger.info("开始转换训练数据特征")
            train_features = self.convert_to_features(train_data)
            train_dataset = SentimentDataset(train_features)
            train_dataloader = DataLoader(
                train_dataset, 
                batch_size=self.batch_size, 
                shuffle=True,
                collate_fn=self._collate_fn
            )
            
            # 准备验证数据
            val_dataloader = None
            if val_data:
                self.logger.info("开始转换验证数据特征")
                val_features = self.convert_to_features(val_data)
                val_dataset = SentimentDataset(val_features)
                val_dataloader = DataLoader(
                    val_dataset, 
                    batch_size=self.batch_size, 
                    shuffle=False,
                    collate_fn=self._collate_fn
                )
                
            # 设置优化器和学习率调度器
            # 仅优化分类头参数
            optimizer = AdamW(
                self.model.classifier.parameters(),
                lr=self.learning_rate,
                weight_decay=self.weight_decay
            )
            
            total_steps = len(train_dataloader) * self.num_epochs
            warmup_steps = int(total_steps * self.warmup_ratio)
            scheduler = get_linear_schedule_with_warmup(
                optimizer, 
                num_warmup_steps=warmup_steps, 
                num_training_steps=total_steps
            )
            
            # 训练历史记录
            train_loss_history = []
            val_loss_history = []
            val_f1_history = []
            
            # 开始训练
            self.logger.info(f"开始训练模型，共 {self.num_epochs} 个epoch")
            for epoch in range(self.num_epochs):
                self.model.train()
                total_loss = 0
                
                for step, batch in enumerate(train_dataloader):
                    # 准备数据
                    input_ids = batch['input_ids'].to(self.device)
                    attention_mask = batch['attention_mask'].to(self.device)
                    labels = batch['labels'].to(self.device)
                    
                    # 前向传播
                    outputs = self.model(
                        input_ids=input_ids,
                        attention_mask=attention_mask,
                        output_hidden_states=True
                    )
                    
                    # 获取最后一层的隐藏状态
                    last_hidden_state = outputs.hidden_states[-1]
                    
                    # 使用[CLS]标记的隐藏状态进行分类
                    cls_hidden_state = last_hidden_state[:, 0, :]
                    logits = self.model.classifier(cls_hidden_state)
                    
                    # 计算损失
                    loss_fn = torch.nn.CrossEntropyLoss()
                    loss = loss_fn(logits, labels)
                    
                    # 反向传播和优化
                    loss.backward()
                    torch.nn.utils.clip_grad_norm_(self.model.classifier.parameters(), 1.0)  # 梯度裁剪
                    optimizer.step()
                    scheduler.step()
                    optimizer.zero_grad()
                    
                    # 累积损失
                    total_loss += loss.item()
                    
                    # 打印训练进度
                    if (step + 1) % 50 == 0:
                        self.logger.info(f"Epoch {epoch+1}/{self.num_epochs}, Step {step+1}/{len(train_dataloader)}, Loss: {loss.item():.4f}")
                        
                # 计算平均损失
                avg_train_loss = total_loss / len(train_dataloader)
                train_loss_history.append(avg_train_loss)
                self.logger.info(f"Epoch {epoch+1}/{self.num_epochs}, 平均训练损失: {avg_train_loss:.4f}")
                
                # 在验证集上评估
                if val_dataloader:
                    val_metrics = self._evaluate_dataloader(val_dataloader)
                    val_loss_history.append(val_metrics['loss'])
                    val_f1_history.append(val_metrics['f1'])
                    self.logger.info(f"Epoch {epoch+1}/{self.num_epochs}, 验证损失: {val_metrics['loss']:.4f}, 验证F1: {val_metrics['f1']:.4f}")
                    
            # 绘制训练曲线
            self._plot_training_curves(train_loss_history, val_loss_history, val_f1_history)
            
            # 最后在验证集上的评估结果作为最终指标
            if val_dataloader:
                final_metrics = self._evaluate_dataloader(val_dataloader)
            else:
                final_metrics = {'f1': 0.0, 'precision': 0.0, 'recall': 0.0, 'loss': avg_train_loss}
                
            self.logger.info(f"训练完成，最终指标: {final_metrics}")
            return final_metrics
            
        except Exception as e:
            self.logger.error(f"训练模型失败: {str(e)}")
            raise
    
    def predict(self, text: str, entities: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """
        预测文本中实体的情感
        
        Args:
            text: 输入文本
            entities: 实体列表
        
        Returns:
            List[Dict[str, Any]]: 实体情感列表，每个实体包含情感极性和置信度
        """
        try:
            self.model.eval()
            results = []
            
            # 为每个实体构建情感分析的输入
            for entity in entities:
                # 构建提示文本
                prompt = self._build_prompt(text, entity)
                
                # 编码输入
                inputs = self.tokenizer(
                    prompt,
                    return_tensors='pt',
                    max_length=self.max_seq_length,
                    truncation=True,
                    padding='max_length'
                )
                
                # 移动到设备
                input_ids = inputs['input_ids'].to(self.device)
                attention_mask = inputs['attention_mask'].to(self.device)
                
                # 预测
                with torch.no_grad():
                    outputs = self.model(
                        input_ids=input_ids,
                        attention_mask=attention_mask,
                        output_hidden_states=True
                    )
                    
                    # 获取最后一层的隐藏状态
                    last_hidden_state = outputs.hidden_states[-1]
                    
                    # 使用[CLS]标记的隐藏状态进行分类
                    cls_hidden_state = last_hidden_state[:, 0, :]
                    logits = self.model.classifier(cls_hidden_state)
                    
                    # 计算概率
                    probabilities = torch.nn.functional.softmax(logits, dim=-1).cpu().numpy()[0]
                    
                    # 获取预测标签
                    predicted_label_id = np.argmax(probabilities)
                    predicted_label = INVERSE_SENTIMENT_LABELS.get(predicted_label_id, 'neutral')
                    
                # 添加到结果
                entity_with_sentiment = entity.copy()
                entity_with_sentiment['sentiment'] = predicted_label
                entity_with_sentiment['sentiment_score'] = float(probabilities[predicted_label_id])
                entity_with_sentiment['sentiment_distribution'] = {
                    label: float(probabilities[SENTIMENT_LABELS[label]])
                    for label in SENTIMENT_LABELS
                }
                
                results.append(entity_with_sentiment)
                
            return results
            
        except Exception as e:
            self.logger.error(f"预测情感失败: {str(e)}")
            return []
    
    def evaluate(self, test_data: List[Dict[str, Any]]) -> Dict[str, float]:
        """
        评估模型性能
        
        Args:
            test_data: 测试数据
        
        Returns:
            Dict[str, float]: 评估指标
        """
        try:
            # 转换数据为特征
            self.logger.info("开始转换测试数据特征")
            test_features = self.convert_to_features(test_data)
            test_dataset = SentimentDataset(test_features)
            test_dataloader = DataLoader(
                test_dataset, 
                batch_size=self.batch_size, 
                shuffle=False,
                collate_fn=self._collate_fn
            )
            
            # 评估
            metrics = self._evaluate_dataloader(test_dataloader)
            self.logger.info(f"测试评估完成，指标: {metrics}")
            return metrics
            
        except Exception as e:
            self.logger.error(f"评估模型失败: {str(e)}")
            raise
    
    def convert_to_features(self, data: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """
        将数据转换为模型输入特征
        
        Args:
            data: 原始数据
        
        Returns:
            List[Dict[str, Any]]: 模型输入特征
        """
        features = []
        
        for item in data:
            text = item['text']
            entities = item.get('entities', [])
            
            for entity in entities:
                # 构建提示文本
                prompt = self._build_prompt(text, entity)
                
                # 编码输入
                inputs = self.tokenizer(
                    prompt,
                    return_tensors='pt',
                    max_length=self.max_seq_length,
                    truncation=True,
                    padding='max_length'
                )
                
                # 获取标签
                sentiment_label = entity.get('sentiment', 'neutral')
                label_id = SENTIMENT_LABELS.get(sentiment_label, SENTIMENT_LABELS['neutral'])
                
                features.append({
                    'input_ids': inputs['input_ids'].squeeze().tolist(),
                    'attention_mask': inputs['attention_mask'].squeeze().tolist(),
                    'labels': label_id
                })
                
        return features
    
    def _collate_fn(self, batch):
        """DataLoader的collate_fn函数，用于处理批次数据"""
        return {
            'input_ids': torch.tensor([item['input_ids'] for item in batch]),
            'attention_mask': torch.tensor([item['attention_mask'] for item in batch]),
            'labels': torch.tensor([item['labels'] for item in batch])
        }
    
    def _evaluate_dataloader(self, dataloader: DataLoader) -> Dict[str, float]:
        """评估数据加载器中的数据"""
        self.model.eval()
        total_loss = 0
        all_predictions = []
        all_labels = []
        
        with torch.no_grad():
            for batch in dataloader:
                # 准备数据
                input_ids = batch['input_ids'].to(self.device)
                attention_mask = batch['attention_mask'].to(self.device)
                labels = batch['labels'].to(self.device)
                
                # 前向传播
                outputs = self.model(
                    input_ids=input_ids,
                    attention_mask=attention_mask,
                    output_hidden_states=True
                )
                
                # 获取最后一层的隐藏状态
                last_hidden_state = outputs.hidden_states[-1]
                
                # 使用[CLS]标记的隐藏状态进行分类
                cls_hidden_state = last_hidden_state[:, 0, :]
                logits = self.model.classifier(cls_hidden_state)
                
                # 计算损失
                loss_fn = torch.nn.CrossEntropyLoss()
                loss = loss_fn(logits, labels)
                total_loss += loss.item()
                
                # 获取预测结果
                predictions = torch.argmax(logits, dim=-1).cpu().numpy()
                
                # 收集预测结果和真实标签
                all_predictions.extend(predictions)
                all_labels.extend(labels.cpu().numpy())
                
        # 计算评估指标
        precision, recall, f1, _ = precision_recall_fscore_support(
            all_labels, all_predictions, average='macro', zero_division=0
        )
        
        avg_loss = total_loss / len(dataloader)
        
        return {
            'loss': avg_loss,
            'precision': precision,
            'recall': recall,
            'f1': f1
        }
    
    def _build_prompt(self, text: str, entity: Dict[str, Any]) -> str:
        """
        构建情感分析的提示文本
        
        Args:
            text: 输入文本
            entity: 实体信息
        
        Returns:
            str: 提示文本
        """
        entity_text = entity.get('text', '')
        entity_type = entity.get('type', '')
        
        prompt = (
            f"以下是一段金融文本：\n"
            f"{text}\n\n"
            f"请分析其中实体'{entity_text}'({entity_type})的情感倾向，"
            f"仅返回'positive'、'negative'或'neutral'，不要添加任何额外说明。"
        )
        
        return prompt
    
    def _plot_training_curves(self, train_loss: List[float], val_loss: List[float], val_f1: List[float]):
        """绘制训练曲线"""
        try:
            # 创建图像
            fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
            
            # 绘制损失曲线
            ax1.plot(train_loss, label='训练损失')
            if val_loss:
                ax1.plot(val_loss, label='验证损失')
            ax1.set_title('损失曲线')
            ax1.set_xlabel('Epoch')
            ax1.set_ylabel('损失')
            ax1.legend()
            
            # 绘制F1曲线
            if val_f1:
                ax2.plot(val_f1, label='验证F1')
            ax2.set_title('F1分数曲线')
            ax2.set_xlabel('Epoch')
            ax2.set_ylabel('F1分数')
            ax2.legend()
            
            # 保存图像
            os.makedirs('./reports/figures', exist_ok=True)
            plt.savefig('./reports/figures/sentiment_analysis_training_curves.png')
            self.logger.info('训练曲线已保存到 ./reports/figures/sentiment_analysis_training_curves.png')
            
            # 关闭图像
            plt.close()
            
        except Exception as e:
            self.logger.error(f"绘制训练曲线失败: {str(e)}")


class SentimentDataset(Dataset):
    """情感分析数据集"""
    
    def __init__(self, features: List[Dict[str, Any]]):
        """
        初始化数据集
        
        Args:
            features: 特征列表
        """
        self.features = features
    
    def __len__(self):
        """返回数据集长度"""
        return len(self.features)
    
    def __getitem__(self, idx):
        """获取指定索引的样本"""
        return self.features[idx]


# 注册DeepSeek情感分析模型
SentimentAnalyzerFactory.register_analyzer('deepseek', DeepSeekSentimentAnalyzer)