"""
基于DeepSeek-7B的情感分析模型实现
利用大语言模型进行实体级情感分析
"""
import os
import torch
from typing import Dict, List, Any, Optional
from transformers import AutoTokenizer, AutoModelForCausalLM, get_linear_schedule_with_warmup
from torch.optim import AdamW
from torch import nn
from torch.utils.data import DataLoader, Dataset
from sklearn.metrics import classification_report, f1_score
from src.model_development.sentiment_analysis.base_sentiment_model import BaseSentimentModel, SentimentModelFactory
from src.utils.logger import Logger
from src.utils.config_loader import ConfigLoader
from src.utils.constants import SENTIMENT_LABELS


class SentimentDataset(Dataset):
    """情感分析数据集类"""
    
    def __init__(self, data: List[Dict[str, Any]], tokenizer, sentiment_to_id: Dict[str, int], max_seq_length: int):
        self.data = data
        self.tokenizer = tokenizer
        self.sentiment_to_id = sentiment_to_id
        self.max_seq_length = max_seq_length
    
    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, idx: int) -> Dict[str, Any]:
        item = self.data[idx]
        text = item['text']
        entity = item['entities'][0]  # 假设每个样本只包含一个实体用于训练
        
        # 创建提示
        prompt = self._create_prompt(text, entity)
        
        # 编码文本
        inputs = self.tokenizer.encode_plus(
            prompt,
            add_special_tokens=True,
            max_length=self.max_seq_length,
            padding='max_length',
            truncation=True,
            return_tensors='pt',
            return_offsets_mapping=True
        )
        
        # 获取标签
        label = self.sentiment_to_id.get(entity.get('sentiment', 'neutral'), 0)
        
        # 创建实体掩码
        entity_mask = self._create_entity_mask(text, entity, inputs['offset_mapping'][0])
        
        return {
            'input_ids': inputs['input_ids'].squeeze(),
            'attention_mask': inputs['attention_mask'].squeeze(),
            'labels': torch.tensor(label, dtype=torch.long),
            'entity_masks': torch.tensor(entity_mask, dtype=torch.long)
        }
    
    def _create_prompt(self, text: str, entity: Dict[str, Any]) -> str:
        """创建用于情感分析的提示"""
        entity_text = entity.get('text', '')
        start = entity.get('start', 0)
        end = entity.get('end', 0)
        
        if not entity_text and 'start' in entity and 'end' in entity:
            entity_text = text[start:end]
            
        return f"分析下面文本中关于'{entity_text}'的情感倾向，是积极、消极还是中性？\n文本：{text}"
    
    def _create_entity_mask(self, text: str, entity: Dict[str, Any], offset_mapping: torch.Tensor) -> List[int]:
        """创建实体位置掩码"""
        start = entity.get('start', 0)
        end = entity.get('end', 0)
        entity_mask = [0] * self.max_seq_length
        
        if offset_mapping is not None:
            for i, (offset_start, offset_end) in enumerate(offset_mapping):
                if i >= self.max_seq_length:
                    break
                # 检查token是否与实体重叠
                if offset_end > 0 and offset_start < end and offset_end > start:
                    entity_mask[i] = 1
        
        return entity_mask


def sentiment_collate_fn(batch: List[Dict[str, Any]]) -> Dict[str, torch.Tensor]:
    """情感分析数据批处理函数"""
    input_ids = torch.stack([item['input_ids'] for item in batch])
    attention_mask = torch.stack([item['attention_mask'] for item in batch])
    labels = torch.stack([item['labels'] for item in batch])
    entity_masks = torch.stack([item['entity_masks'] for item in batch])
    
    return {
        'input_ids': input_ids,
        'attention_mask': attention_mask,
        'labels': labels,
        'entity_masks': entity_masks
    }


class DeepSeekSentimentModel(BaseSentimentModel):
    """基于DeepSeek-7B的情感分析模型实现类"""
    
    def __init__(self, config: Dict[str, Any]):
        """
        初始化DeepSeek-7B情感分析模型
        
        Args:
            config: 模型配置参数
        """
        super().__init__(config)
        self.logger = Logger().get_logger(__name__)
        
        # 设置设备
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.logger.info(f"使用设备: {self.device}")
        
        # 加载配置
        self.model_name = config.get('model_name', 'deepseek-ai/deepseek-llm-7b-base')
        self.max_seq_length = config.get('max_seq_length', 512)
        self.batch_size = config.get('batch_size', 8)
        self.learning_rate = config.get('learning_rate', 2e-5)
        self.epochs = config.get('epochs', 3)
        
        # 情感标签映射
        self.sentiment_to_id = {label: i for i, label in enumerate(SENTIMENT_LABELS)}
        self.id_to_sentiment = {i: label for label, i in self.sentiment_to_id.items()}
        
        # 初始化模型和分词器
        self._init_model()
    
    def _init_model(self):
        """初始化DeepSeek模型"""
        # 加载预训练的DeepSeek模型和分词器
        self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
        self.model = AutoModelForCausalLM.from_pretrained(
            self.model_name,
            load_in_4bit=bool(self.config.get('quantization_4bit', False)),
            device_map='auto'
        )
        
        # 添加特殊标记（如果需要）
        if self.tokenizer.pad_token is None:
            self.tokenizer.pad_token = self.tokenizer.eos_token
            
        # 创建分类层，用于情感分类
        self.classifier = nn.Linear(self.model.config.hidden_size, len(SENTIMENT_LABELS))
        
        # 将模型移动到设备
        self.model = self.model.to(self.device)
        self.classifier = self.classifier.to(self.device)
        
        # 如果使用多GPU
        if torch.cuda.device_count() > 1 and not hasattr(self.model, 'is_loaded_in_4bit'):
            self.model = nn.DataParallel(self.model)
    
    def train(self, train_data: List[Dict[str, Any]], val_data: Optional[List[Dict[str, Any]]] = None) -> Dict[str, float]:
        """
        训练DeepSeek情感分析模型
        
        Args:
            train_data: 训练数据，格式为[{"text": str, "entities": [{"start": int, "end": int, "type": str, "sentiment": str}]}]
            val_data: 验证数据
        
        Returns:
            Dict[str, float]: 训练指标
        """
        try:
            self.logger.info(f"开始训练DeepSeek情感分析模型，训练数据量: {len(train_data)}")
            
            # 准备数据集和数据加载器
            train_dataset = SentimentDataset(train_data, self.tokenizer, self.sentiment_to_id, self.max_seq_length)
            train_loader = DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True, collate_fn=sentiment_collate_fn)
            
            # 准备优化器和学习率调度器
            optimizer = AdamW(
                list(self.model.parameters()) + list(self.classifier.parameters()),
                lr=self.learning_rate,
                eps=1e-8
            )
            
            total_steps = len(train_loader) * self.epochs
            scheduler = get_linear_schedule_with_warmup(
                optimizer, 
                num_warmup_steps=0, 
                num_training_steps=total_steps
            )
            
            # 定义损失函数
            criterion = nn.CrossEntropyLoss()
            
            # 训练循环
            best_val_f1 = 0.0
            
            for epoch in range(self.epochs):
                self.logger.info(f"Epoch {epoch + 1}/{self.epochs}")
                
                # 训练模式
                self.model.train()
                self.classifier.train()
                
                total_loss = 0
                
                for step, batch in enumerate(train_loader):
                    # 将批次数据移动到设备
                    input_ids = batch['input_ids'].to(self.device)
                    attention_mask = batch['attention_mask'].to(self.device)
                    labels = batch['labels'].to(self.device)
                    entity_masks = batch['entity_masks'].to(self.device) if 'entity_masks' in batch else None
                    
                    # 前向传播
                    outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, output_hidden_states=True)
                    
                    # 获取最后一层隐藏状态
                    last_hidden_state = outputs.hidden_states[-1]
                    
                    # 如果有实体掩码，使用实体位置的隐藏状态
                    if entity_masks is not None:
                        # 计算实体位置的平均隐藏状态
                        entity_hidden_states = []
                        for i in range(input_ids.size(0)):
                            entity_mask = entity_masks[i].bool()
                            if entity_mask.sum() > 0:
                                # 取实体位置的隐藏状态的平均值
                                entity_hidden_state = last_hidden_state[i][entity_mask].mean(dim=0)
                                entity_hidden_states.append(entity_hidden_state)
                            else:
                                # 如果没有实体掩码，使用[CLS]标记的隐藏状态
                                entity_hidden_states.append(last_hidden_state[i][0])
                                
                        entity_hidden_states = torch.stack(entity_hidden_states)
                    else:
                        # 使用[CLS]标记的隐藏状态
                        entity_hidden_states = last_hidden_state[:, 0, :]
                        
                    # 情感分类
                    logits = self.classifier(entity_hidden_states)
                    
                    # 计算损失
                    loss = criterion(logits, labels)
                    total_loss += loss.item()
                    
                    # 反向传播和优化
                    loss.backward()
                    torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)
                    optimizer.step()
                    scheduler.step()
                    optimizer.zero_grad()
                    
                    # 打印训练进度
                    if (step + 1) % 10 == 0:
                        self.logger.info(f"Step {step + 1}/{len(train_loader)}, Loss: {loss.item():.4f}")
                        
                # 计算平均损失
                avg_loss = total_loss / len(train_loader)
                self.logger.info(f"Epoch {epoch + 1} 平均损失: {avg_loss:.4f}")
                
                # 在验证集上评估
                if val_data:
                    val_metrics = self.evaluate(val_data)
                    self.logger.info(f"验证集指标: {val_metrics}")
                    
                    # 保存最佳模型
                    if val_metrics['f1_score'] > best_val_f1:
                        best_val_f1 = val_metrics['f1_score']
                        model_save_dir = self.config.get('model_save_dir', './model_saves/sentiment_model')
                        self.save(model_save_dir)
                        self.logger.info(f"保存最佳模型，验证集F1分数: {best_val_f1:.4f}")
                        
            # 标记模型为已训练
            self.is_trained = True
            
            # 返回最终的训练指标
            if val_data:
                final_metrics = self.evaluate(val_data)
                return final_metrics
            else:
                return {'loss': avg_loss}
                
        except Exception as e:
            self.logger.error(f"模型训练失败: {str(e)}")
            raise
    
    def predict(self, text: str, entities: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """
        预测文本中每个实体的情感
        
        Args:
            text: 输入文本
            entities: 实体列表
        
        Returns:
            List[Dict[str, Any]]: 带有情感标签的实体列表
        """
        if not self.is_trained:
            self.logger.warning("模型尚未训练，请先训练模型或加载预训练模型")
            
        # 为每个实体创建输入
        results = []
        
        for entity in entities:
            # 创建包含实体的提示
            prompt = self._create_prompt(text, entity)
            
            # 编码提示
            inputs = self.tokenizer.encode_plus(
                prompt,
                add_special_tokens=True,
                max_length=self.max_seq_length,
                padding='max_length',
                truncation=True,
                return_tensors='pt'
            )
            
            # 将输入移动到设备
            input_ids = inputs['input_ids'].to(self.device)
            attention_mask = inputs['attention_mask'].to(self.device)
            
            # 创建实体掩码
            entity_mask = self._create_entity_mask(text, entity, inputs.get('offset_mapping', None))
            entity_mask = torch.tensor([entity_mask], device=self.device)
            
            # 推理模式
            self.model.eval()
            self.classifier.eval()
            
            with torch.no_grad():
                # 前向传播
                outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, output_hidden_states=True)
                
                # 获取最后一层隐藏状态
                last_hidden_state = outputs.hidden_states[-1]
                
                # 使用实体位置的隐藏状态
                if entity_mask.sum() > 0:
                    # 取实体位置的隐藏状态的平均值
                    entity_hidden_state = last_hidden_state[0][entity_mask[0].bool()].mean(dim=0)
                else:
                    # 如果没有实体掩码，使用[CLS]标记的隐藏状态
                    entity_hidden_state = last_hidden_state[0][0]
                    
                # 情感分类
                logits = self.classifier(entity_hidden_state.unsqueeze(0))
                
                # 获取预测的情感标签
                predicted_label_id = torch.argmax(logits, dim=1).item()
                predicted_sentiment = self.id_to_sentiment[predicted_label_id]
                
                # 获取情感概率
                probabilities = torch.softmax(logits, dim=1).squeeze().tolist()
                
                # 添加情感信息到实体
                result_entity = entity.copy()
                result_entity['sentiment'] = predicted_sentiment
                result_entity['sentiment_prob'] = probabilities[predicted_label_id]
                result_entity['sentiment_distribution'] = {label: probabilities[i] for label, i in self.sentiment_to_id.items()}
                
                results.append(result_entity)
                
        return results
    
    def batch_predict(self, texts: List[str], entities_list: List[List[Dict[str, Any]]]) -> List[List[Dict[str, Any]]]:
        """
        批量预测文本中每个实体的情感
        
        Args:
            texts: 输入文本列表
            entities_list: 实体列表的列表
        
        Returns:
            List[List[Dict[str, Any]]]: 带有情感标签的实体列表的列表
        """
        results = []
        
        # 遍历每个文本和对应的实体列表
        for i, (text, entities) in enumerate(zip(texts, entities_list)):
            # 使用predict方法处理每个实体
            entity_results = self.predict(text, entities)
            results.append(entity_results)
            
        return results
    
    def evaluate(self, test_data: List[Dict[str, Any]]) -> Dict[str, float]:
        """
        评估模型性能
        
        Args:
            test_data: 测试数据
        
        Returns:
            Dict[str, float]: 评估指标
        """
        self.logger.info(f"开始评估模型，测试数据量: {len(test_data)}")
        
        # 准备真实标签和预测标签
        true_labels = []
        predicted_labels = []
        
        for item in test_data:
            text = item['text']
            entities = item['entities']
            
            # 预测情感
            predictions = self.predict(text, entities)
            
            # 收集真实标签和预测标签
            for entity, pred_entity in zip(entities, predictions):
                if 'sentiment' in entity:
                    true_labels.append(self.sentiment_to_id.get(entity['sentiment'], 0))
                    predicted_labels.append(self.sentiment_to_id.get(pred_entity['sentiment'], 0))
        
        # 计算评估指标
        f1 = f1_score(true_labels, predicted_labels, average='weighted')
        report = classification_report(true_labels, predicted_labels, target_names=SENTIMENT_LABELS, output_dict=True)
        
        metrics = {
            'f1_score': f1,
            'accuracy': report['accuracy'],
            'precision': report['weighted avg']['precision'],
            'recall': report['weighted avg']['recall']
        }
        
        self.logger.info(f"模型评估完成: {metrics}")
        return metrics
    
    def save(self, path: str) -> bool:
        """
        保存模型
        
        Args:
            path: 模型保存路径
        
        Returns:
            bool: 保存是否成功
        """
        try:
            # 确保保存目录存在
            os.makedirs(path, exist_ok=True)
            
            # 保存分词器
            self.tokenizer.save_pretrained(path)
            
            # 保存分类层
            torch.save(self.classifier.state_dict(), os.path.join(path, 'classifier.pt'))
            
            # 保存配置
            config_path = os.path.join(path, 'config.json')
            with open(config_path, 'w', encoding='utf-8') as f:
                import json
                json.dump({
                    'model_name': self.model_name,
                    'max_seq_length': self.max_seq_length,
                    'sentiment_to_id': self.sentiment_to_id
                }, f, ensure_ascii=False, indent=2)
            
            self.logger.info(f"模型已成功保存到: {path}")
            return True
        except Exception as e:
            self.logger.error(f"模型保存失败: {str(e)}")
            return False
    
    def load(self, path: str) -> bool:
        """
        加载模型
        
        Args:
            path: 模型加载路径
        
        Returns:
            bool: 加载是否成功
        """
        try:
            # 加载配置
            config_path = os.path.join(path, 'config.json')
            if os.path.exists(config_path):
                with open(config_path, 'r', encoding='utf-8') as f:
                    import json
                    config = json.load(f)
                    self.model_name = config.get('model_name', self.model_name)
                    self.max_seq_length = config.get('max_seq_length', self.max_seq_length)
                    if 'sentiment_to_id' in config:
                        self.sentiment_to_id = config['sentiment_to_id']
                        self.id_to_sentiment = {i: label for label, i in self.sentiment_to_id.items()}
            
            # 初始化基础模型
            self._init_model()
            
            # 加载分类层权重
            classifier_path = os.path.join(path, 'classifier.pt')
            if os.path.exists(classifier_path):
                self.classifier.load_state_dict(torch.load(classifier_path, map_location=self.device))
            
            # 标记模型为已训练
            self.is_trained = True
            
            self.logger.info(f"模型已成功加载从: {path}")
            return True
        except Exception as e:
            self.logger.error(f"模型加载失败: {str(e)}")
            return False
    
    def _create_prompt(self, text: str, entity: Dict[str, Any]) -> str:
        """
        创建用于情感分析的提示
        
        Args:
            text: 输入文本
            entity: 实体信息
        
        Returns:
            str: 提示文本
        """
        entity_text = entity.get('text', '')
        start = entity.get('start', 0)
        end = entity.get('end', 0)
        
        # 如果实体文本为空，尝试从原文本中提取
        if not entity_text and start < len(text) and end <= len(text):
            entity_text = text[start:end]
        
        prompt = f"分析下面文本中关于'{entity_text}'的情感倾向，是积极、消极还是中性？\n文本：{text}"
        return prompt
    
    def _create_entity_mask(self, text: str, entity: Dict[str, Any], offset_mapping: Optional[torch.Tensor] = None) -> List[int]:
        """
        创建实体位置掩码
        
        Args:
            text: 输入文本
            entity: 实体信息
            offset_mapping: token偏移映射
        
        Returns:
            List[int]: 实体位置掩码
        """
        start = entity.get('start', 0)
        end = entity.get('end', 0)
        entity_mask = [0] * self.max_seq_length
        
        # 如果没有偏移映射，使用简单的位置映射
        if offset_mapping is None or not hasattr(offset_mapping, '__len__'):
            # 这里使用简化的方法，假设实体在文本中是连续的
            # 实际应用中可能需要更复杂的映射逻辑
            return entity_mask
        
        # 使用偏移映射创建更精确的实体掩码
        for i, (offset_start, offset_end) in enumerate(offset_mapping):
            if i >= self.max_seq_length:
                break
            # 检查token是否与实体重叠
            if offset_end > 0 and offset_start < end and offset_end > start:
                entity_mask[i] = 1
        
        return entity_mask

    def quantize_model(self, quantization_type: str = '4bit') -> bool:
        """
        对模型进行量化处理
        
        Args:
            quantization_type: 量化类型，当前支持'4bit'
        
        Returns:
            bool: 量化是否成功
        """
        try:
            if quantization_type == '4bit' and not hasattr(self.model, 'is_loaded_in_4bit'):
                from transformers import BitsAndBytesConfig
                
                # 保存当前分类器权重
                classifier_weights = self.classifier.state_dict()
                
                # 重新加载模型进行4bit量化
                quantization_config = BitsAndBytesConfig(
                    load_in_4bit=True,
                    bnb_4bit_quant_type="nf4",
                    bnb_4bit_use_double_quant=True,
                    bnb_4bit_compute_dtype=torch.bfloat16
                )
                
                self.logger.info("开始对模型进行4bit量化...")
                self.model = AutoModelForCausalLM.from_pretrained(
                    self.model_name,
                    quantization_config=quantization_config,
                    device_map='auto'
                )
                
                # 重新加载分类器权重
                self.classifier.load_state_dict(classifier_weights)
                self.logger.info("模型量化完成")
                return True
            else:
                self.logger.warning(f"不支持的量化类型: {quantization_type} 或模型已经量化")
                return False
        except Exception as e:
            self.logger.error(f"模型量化失败: {str(e)}")
            return False

    def optimize_inference(self, use_cache: bool = True) -> None:
        """
        优化模型推理性能
        
        Args:
            use_cache: 是否使用KV缓存
        """
        try:
            # 启用模型的KV缓存
            self.model.config.use_cache = use_cache
            
            # 切换到评估模式
            self.model.eval()
            self.classifier.eval()
            
            self.logger.info(f"模型推理优化完成，使用KV缓存: {use_cache}")
        except Exception as e:
            self.logger.error(f"模型推理优化失败: {str(e)}")

# 注册模型到工厂类
SentimentModelFactory.register_model('deepseek_sentiment', DeepSeekSentimentModel)