"""
基于BERT-CRF的实体识别模型
利用预训练语言模型BERT提取特征，结合CRF进行序列标注
"""
import os
import torch
from typing import Dict, List, Any, Optional
from torch.optim import AdamW
from transformers import BertTokenizer, BertModel, get_linear_schedule_with_warmup
from torch.utils.data import DataLoader, Dataset
from sklearn.metrics import precision_recall_fscore_support
import matplotlib.pyplot as plt
from fin_senti_entity_platform.model_development.entity_recognition.base_entity_recognizer import BaseEntityRecognizer, EntityRecognizerFactory
from fin_senti_entity_platform.utils.logger import Logger
from fin_senti_entity_platform.utils.constants import ENTITY_TYPES
from fin_senti_entity_platform.utils.config_loader import ConfigLoader


class BertCrfEntityRecognizer(BaseEntityRecognizer):
    """基于BERT-CRF的实体识别模型实现类"""
    
    def __init__(self, config: Dict[str, Any]):
        """
        初始化BERT-CRF实体识别模型
        
        Args:
            config: 模型配置参数
        """
        super().__init__(config)
        self.logger = Logger().get_logger(__name__)
        
        # 设置设备
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.logger.info(f"使用设备: {self.device}")
        
        # 初始化配置
        self.bert_model_name = config.get('bert_model_name', 'bert-base-chinese')
        self.num_labels = config.get('num_labels', len(ENTITY_LABELS))
        self.max_seq_length = config.get('max_seq_length', 128)
        self.batch_size = config.get('batch_size', 32)
        self.learning_rate = config.get('learning_rate', 2e-5)
        self.num_epochs = config.get('num_epochs', 3)
        self.warmup_ratio = config.get('warmup_ratio', 0.1)
        self.weight_decay = config.get('weight_decay', 0.01)
        
        # 初始化模型和分词器
        self.tokenizer = BertTokenizer.from_pretrained(self.bert_model_name)
        self.model = BertCrfForEntityRecognition(
            bert_model_name=self.bert_model_name,
            num_labels=self.num_labels
        ).to(self.device)
        
        # 初始化CRF转移矩阵
        self.crf_transitions = self.model.crf_transitions
    
    def load_model(self, model_path: str) -> bool:
        """
        加载预训练模型
        
        Args:
            model_path: 模型路径
        
        Returns:
            bool: 加载是否成功
        """
        try:
            if not os.path.exists(model_path):
                self.logger.error(f"模型路径不存在: {model_path}")
                return False
                
            # 加载模型权重
            self.model.load_state_dict(torch.load(
                os.path.join(model_path, 'model.bin'),
                map_location=self.device
            ))
            
            # 加载分词器
            self.tokenizer = BertTokenizer.from_pretrained(model_path)
            
            self.logger.info(f"成功加载模型: {model_path}")
            return True
            
        except Exception as e:
            self.logger.error(f"加载模型失败: {str(e)}")
            return False
    
    def save_model(self, model_path: str) -> bool:
        """
        保存模型
        
        Args:
            model_path: 模型保存路径
        
        Returns:
            bool: 保存是否成功
        """
        try:
            # 确保保存目录存在
            os.makedirs(model_path, exist_ok=True)
            
            # 保存模型权重
            torch.save(self.model.state_dict(), os.path.join(model_path, 'model.bin'))
            
            # 保存分词器
            self.tokenizer.save_pretrained(model_path)
            
            # 保存配置
            with open(os.path.join(model_path, 'config.json'), 'w', encoding='utf-8') as f:
                import json
                json.dump(self.config, f, ensure_ascii=False, indent=2)
                
            self.logger.info(f"成功保存模型到: {model_path}")
            return True
            
        except Exception as e:
            self.logger.error(f"保存模型失败: {str(e)}")
            return False
    
    def train(self, train_data: List[Dict[str, Any]], val_data: Optional[List[Dict[str, Any]]] = None) -> Dict[str, float]:
        """
        训练模型
        
        Args:
            train_data: 训练数据
            val_data: 验证数据
        
        Returns:
            Dict[str, float]: 训练指标
        """
        try:
            # 转换数据为特征
            self.logger.info("开始转换训练数据特征")
            train_features = self.convert_to_features(train_data)
            train_dataset = EntityDataset(train_features)
            train_dataloader = DataLoader(
                train_dataset, 
                batch_size=self.batch_size, 
                shuffle=True,
                collate_fn=self._collate_fn
            )
            
            # 准备验证数据
            val_dataloader = None
            if val_data:
                self.logger.info("开始转换验证数据特征")
                val_features = self.convert_to_features(val_data)
                val_dataset = EntityDataset(val_features)
                val_dataloader = DataLoader(
                    val_dataset, 
                    batch_size=self.batch_size, 
                    shuffle=False,
                    collate_fn=self._collate_fn
                )
                
            # 设置优化器和学习率调度器
            optimizer = AdamW(
                self.model.parameters(),
                lr=self.learning_rate,
                weight_decay=self.weight_decay
            )
            
            total_steps = len(train_dataloader) * self.num_epochs
            warmup_steps = int(total_steps * self.warmup_ratio)
            scheduler = get_linear_schedule_with_warmup(
                optimizer, 
                num_warmup_steps=warmup_steps, 
                num_training_steps=total_steps
            )
            
            # 训练历史记录
            train_loss_history = []
            val_loss_history = []
            val_f1_history = []
            
            # 开始训练
            self.logger.info(f"开始训练模型，共 {self.num_epochs} 个epoch")
            for epoch in range(self.num_epochs):
                self.model.train()
                total_loss = 0
                
                for step, batch in enumerate(train_dataloader):
                    # 准备数据
                    input_ids = batch['input_ids'].to(self.device)
                    attention_mask = batch['attention_mask'].to(self.device)
                    token_type_ids = batch['token_type_ids'].to(self.device)
                    labels = batch['labels'].to(self.device)
                    
                    # 前向传播
                    loss = self.model(input_ids, attention_mask, token_type_ids, labels)
                    
                    # 反向传播和优化
                    loss.backward()
                    torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)  # 梯度裁剪
                    optimizer.step()
                    scheduler.step()
                    optimizer.zero_grad()
                    
                    # 累积损失
                    total_loss += loss.item()
                    
                    # 打印训练进度
                    if (step + 1) % 100 == 0:
                        self.logger.info(f"Epoch {epoch+1}/{self.num_epochs}, Step {step+1}/{len(train_dataloader)}, Loss: {loss.item():.4f}")
                        
                # 计算平均损失
                avg_train_loss = total_loss / len(train_dataloader)
                train_loss_history.append(avg_train_loss)
                self.logger.info(f"Epoch {epoch+1}/{self.num_epochs}, 平均训练损失: {avg_train_loss:.4f}")
                
                # 在验证集上评估
                if val_dataloader:
                    val_metrics = self._evaluate_dataloader(val_dataloader)
                    val_loss_history.append(val_metrics['loss'])
                    val_f1_history.append(val_metrics['f1'])
                    self.logger.info(f"Epoch {epoch+1}/{self.num_epochs}, 验证损失: {val_metrics['loss']:.4f}, 验证F1: {val_metrics['f1']:.4f}")
                    
            # 绘制训练曲线
            self._plot_training_curves(train_loss_history, val_loss_history, val_f1_history)
            
            # 最后在验证集上的评估结果作为最终指标
            if val_dataloader:
                final_metrics = self._evaluate_dataloader(val_dataloader)
            else:
                final_metrics = {'f1': 0.0, 'precision': 0.0, 'recall': 0.0, 'loss': avg_train_loss}
                
            self.logger.info(f"训练完成，最终指标: {final_metrics}")
            return final_metrics
            
        except Exception as e:
            self.logger.error(f"训练模型失败: {str(e)}")
            raise
    
    def predict(self, text: str) -> List[Dict[str, Any]]:
        """
        预测文本中的实体
        
        Args:
            text: 输入文本
        
        Returns:
            List[Dict[str, Any]]: 实体列表，每个实体包含类型、起始位置、结束位置和文本
        """
        try:
            self.model.eval()
            
            # 预处理文本
            tokens = self.tokenizer.tokenize(text)
            
            # 转换为输入特征
            input_ids = self.tokenizer.convert_tokens_to_ids(['[CLS]'] + tokens + ['[SEP]'])
            attention_mask = [1] * len(input_ids)
            token_type_ids = [0] * len(input_ids)
            
            # 转换为张量并添加批次维度
            input_ids = torch.tensor([input_ids]).to(self.device)
            attention_mask = torch.tensor([attention_mask]).to(self.device)
            token_type_ids = torch.tensor([token_type_ids]).to(self.device)
            
            # 预测
            with torch.no_grad():
                logits = self.model(input_ids, attention_mask, token_type_ids)
                
            # CRF解码获取标签
            predictions = self.model.decode(logits, attention_mask)
            
            # 处理预测结果
            entities = self._extract_entities(tokens, predictions[0][1:-1])  # 去掉[CLS]和[SEP]对应的标签
            
            return entities
            
        except Exception as e:
            self.logger.error(f"预测实体失败: {str(e)}")
            return []
    
    def evaluate(self, test_data: List[Dict[str, Any]]) -> Dict[str, float]:
        """
        评估模型性能
        
        Args:
            test_data: 测试数据
        
        Returns:
            Dict[str, float]: 评估指标
        """
        try:
            # 转换数据为特征
            self.logger.info("开始转换测试数据特征")
            test_features = self.convert_to_features(test_data)
            test_dataset = EntityDataset(test_features)
            test_dataloader = DataLoader(
                test_dataset, 
                batch_size=self.batch_size, 
                shuffle=False,
                collate_fn=self._collate_fn
            )
            
            # 评估
            metrics = self._evaluate_dataloader(test_dataloader)
            self.logger.info(f"测试评估完成，指标: {metrics}")
            return metrics
            
        except Exception as e:
            self.logger.error(f"评估模型失败: {str(e)}")
            raise
    
    def convert_to_features(self, data: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """
        将数据转换为模型输入特征
        
        Args:
            data: 原始数据
        
        Returns:
            List[Dict[str, Any]]: 模型输入特征
        """
        features = []
        
        for item in data:
            text = item['text']
            entities = item.get('entities', [])
            
            # 分词
            tokens = []
            token_to_char = []  # 记录token对应的字符位置
            
            for i, char in enumerate(text):
                token = self.tokenizer.tokenize(char)
                if token:
                    tokens.append(token[0])
                    token_to_char.append(i)
                
            # 创建标签
            labels = ['O'] * len(tokens)
            
            # 根据实体标注标签
            for entity in entities:
                entity_type = entity['type']
                start_pos = entity['start_pos']
                end_pos = entity['end_pos']
                entity_text = entity['text']
                
                # 找到对应的token位置
                token_start = None
                token_end = None
                
                for i, char_pos in enumerate(token_to_char):
                    if char_pos == start_pos:
                        token_start = i
                    if char_pos == end_pos - 1:
                        token_end = i
                        break
                        
                if token_start is not None and token_end is not None:
                    # 设置B标签
                    labels[token_start] = f'B-{entity_type}'
                    # 设置I标签
                    for i in range(token_start + 1, token_end + 1):
                        labels[i] = f'I-{entity_type}'
                        
            # 转换为ID
            input_ids = self.tokenizer.convert_tokens_to_ids(['[CLS]'] + tokens + ['[SEP]'])
            label_ids = [ENTITY_LABELS['O']] + [ENTITY_LABELS.get(l, ENTITY_LABELS['O']) for l in labels] + [ENTITY_LABELS['O']]
            
            # 截断或填充到最大长度
            if len(input_ids) > self.max_seq_length:
                input_ids = input_ids[:self.max_seq_length]
                label_ids = label_ids[:self.max_seq_length]
            else:
                padding_length = self.max_seq_length - len(input_ids)
                input_ids += [0] * padding_length  # [PAD] token id
                label_ids += [ENTITY_LABELS['O']] * padding_length
                
            # 创建attention mask
            attention_mask = [1] * (len(input_ids) - padding_length) + [0] * padding_length
            token_type_ids = [0] * self.max_seq_length
            
            features.append({
                'input_ids': input_ids,
                'attention_mask': attention_mask,
                'token_type_ids': token_type_ids,
                'labels': label_ids
            })
            
        return features
    
    def _collate_fn(self, batch):
        """DataLoader的collate_fn函数，用于处理批次数据"""
        return {
            'input_ids': torch.tensor([item['input_ids'] for item in batch]),
            'attention_mask': torch.tensor([item['attention_mask'] for item in batch]),
            'token_type_ids': torch.tensor([item['token_type_ids'] for item in batch]),
            'labels': torch.tensor([item['labels'] for item in batch])
        }
    
    def _evaluate_dataloader(self, dataloader: DataLoader) -> Dict[str, float]:
        """评估数据加载器中的数据"""
        self.model.eval()
        total_loss = 0
        all_predictions = []
        all_labels = []
        
        with torch.no_grad():
            for batch in dataloader:
                # 准备数据
                input_ids = batch['input_ids'].to(self.device)
                attention_mask = batch['attention_mask'].to(self.device)
                token_type_ids = batch['token_type_ids'].to(self.device)
                labels = batch['labels'].to(self.device)
                
                # 计算损失
                loss = self.model(input_ids, attention_mask, token_type_ids, labels)
                total_loss += loss.item()
                
                # 获取预测结果
                logits = self.model(input_ids, attention_mask, token_type_ids)
                predictions = self.model.decode(logits, attention_mask)
                
                # 收集预测结果和真实标签，忽略padding部分
                for i in range(len(predictions)):
                    mask = attention_mask[i].bool()
                    pred = predictions[i][mask]
                    true_label = labels[i][mask]
                    
                    all_predictions.extend(pred.cpu().numpy())
                    all_labels.extend(true_label.cpu().numpy())
                    
        # 计算评估指标
        precision, recall, f1, _ = precision_recall_fscore_support(
            all_labels, all_predictions, average='macro', zero_division=0
        )
        
        avg_loss = total_loss / len(dataloader)
        
        return {
            'loss': avg_loss,
            'precision': precision,
            'recall': recall,
            'f1': f1
        }
    
    def _extract_entities(self, tokens: List[str], predictions: List[int]) -> List[Dict[str, Any]]:
        """
        从预测结果中提取实体
        
        Args:
            tokens: token列表
            predictions: 预测标签ID列表
        
        Returns:
            List[Dict[str, Any]]: 实体列表
        """
        entities = []
        current_entity = None
        
        for i, (token, pred_id) in enumerate(zip(tokens, predictions)):
            label = INVERSE_ENTITY_LABELS.get(pred_id, 'O')
            
            if label.startswith('B-'):
                # 开始一个新实体
                if current_entity:
                    entities.append(current_entity)
                    
                entity_type = label[2:]
                current_entity = {
                    'type': entity_type,
                    'start_pos': i,
                    'end_pos': i + 1,
                    'text': token
                }
                
            elif label.startswith('I-') and current_entity:
                # 继续当前实体
                entity_type = label[2:]
                if entity_type == current_entity['type']:
                    current_entity['end_pos'] = i + 1
                    current_entity['text'] += token.replace('##', '')  # 合并BERT的子词
                    
            else:
                # 实体结束
                if current_entity:
                    entities.append(current_entity)
                    current_entity = None
                    
        # 处理最后一个实体
        if current_entity:
            entities.append(current_entity)
            
        # 调整实体的字符位置
        char_pos = 0
        token_to_char = []
        
        for token in tokens:
            token_to_char.append(char_pos)
            # 移除BERT的子词标记
            token_text = token.replace('##', '')
            char_pos += len(token_text)
            
        # 转换token位置为字符位置
        for entity in entities:
            entity['start_pos'] = token_to_char[entity['start_pos']]
            if entity['end_pos'] < len(token_to_char):
                entity['end_pos'] = token_to_char[entity['end_pos']]
            else:
                entity['end_pos'] = char_pos
                
        return entities
    
    def _plot_training_curves(self, train_loss: List[float], val_loss: List[float], val_f1: List[float]):
        """绘制训练曲线"""
        try:
            # 创建图像
            fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
            
            # 绘制损失曲线
            ax1.plot(train_loss, label='训练损失')
            if val_loss:
                ax1.plot(val_loss, label='验证损失')
            ax1.set_title('损失曲线')
            ax1.set_xlabel('Epoch')
            ax1.set_ylabel('损失')
            ax1.legend()
            
            # 绘制F1曲线
            if val_f1:
                ax2.plot(val_f1, label='验证F1')
            ax2.set_title('F1分数曲线')
            ax2.set_xlabel('Epoch')
            ax2.set_ylabel('F1分数')
            ax2.legend()
            
            # 保存图像
            os.makedirs('./reports/figures', exist_ok=True)
            plt.savefig('./reports/figures/entity_recognition_training_curves.png')
            self.logger.info('训练曲线已保存到 ./reports/figures/entity_recognition_training_curves.png')
            
            # 关闭图像
            plt.close()
            
        except Exception as e:
            self.logger.error(f"绘制训练曲线失败: {str(e)}")


class BertCrfForEntityRecognition(torch.nn.Module):
    """BERT-CRF实体识别模型"""
    
    def __init__(self, bert_model_name: str, num_labels: int):
        """
        初始化BERT-CRF模型
        
        Args:
            bert_model_name: BERT模型名称
            num_labels: 标签数量
        """
        super().__init__()
        self.bert = BertModel.from_pretrained(bert_model_name)
        self.dropout = torch.nn.Dropout(0.1)
        self.classifier = torch.nn.Linear(self.bert.config.hidden_size, num_labels)
        
        # 初始化CRF层
        self.crf_transitions = torch.nn.Parameter(
            torch.randn(num_labels, num_labels)
        )
        
    def forward(
        self, 
        input_ids: torch.Tensor, 
        attention_mask: torch.Tensor, 
        token_type_ids: torch.Tensor, 
        labels: Optional[torch.Tensor] = None
    ):
        """
        前向传播
        
        Args:
            input_ids: 输入ID
            attention_mask: 注意力掩码
            token_type_ids: 段落ID
            labels: 标签（可选）
        
        Returns:
            如果提供了labels，则返回损失值；否则返回logits
        """
        # BERT编码
        outputs = self.bert(
            input_ids=input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids
        )
        
        # 获取序列输出
        sequence_output = outputs.last_hidden_state
        
        # 应用dropout和分类器
        sequence_output = self.dropout(sequence_output)
        logits = self.classifier(sequence_output)
        
        # 如果提供了标签，则计算CRF损失
        if labels is not None:
            loss = self._crf_loss(logits, labels, attention_mask)
            return loss
        
        # 否则返回logits
        return logits
    
    def _crf_loss(self, logits: torch.Tensor, labels: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
        """
        计算CRF损失
        
        Args:
            logits: 预测logits
            labels: 真实标签
            mask: 注意力掩码
        
        Returns:
            torch.Tensor: 损失值
        """
        # 转换为形状：[seq_len, batch_size, num_labels]
        logits = logits.transpose(0, 1)
        labels = labels.transpose(0, 1)
        mask = mask.transpose(0, 1).float()
        
        # 计算路径分数
        seq_len, batch_size = labels.shape
        
        # 初始化分数
        score = torch.zeros(batch_size, device=logits.device)
        
        # 初始状态到第一个标签的分数
        score += logits[0, torch.arange(batch_size), labels[0]]
        
        # 累加后续标签的分数和转移分数
        for i in range(1, seq_len):
            # 只有当mask为1时才累加分数
            mask_i = mask[i]
            prev_labels = labels[i-1]
            curr_labels = labels[i]
            
            # 计算当前步骤的分数
            emit_score = logits[i, torch.arange(batch_size), curr_labels]
            trans_score = self.crf_transitions[prev_labels, curr_labels]
            
            # 应用mask并累加
            score += mask_i * (emit_score + trans_score)
            
        # 计算所有可能路径的总分（对数空间）
        # 使用前向算法计算分区函数
        log_norm = self._forward_algorithm(logits, mask)
        
        # 返回负对数似然作为损失
        return -torch.mean(score - log_norm)
    
    def _forward_algorithm(self, logits: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
        """
        前向算法计算所有可能路径的对数和
        
        Args:
            logits: 预测logits
            mask: 注意力掩码
        
        Returns:
            torch.Tensor: 对数归一化因子
        """
        seq_len, batch_size, num_labels = logits.shape
        
        # 初始化前向分数
        alpha = logits[0]  # [batch_size, num_labels]
        
        # 前向递推
        for i in range(1, seq_len):
            # 广播alpha和转移分数以计算所有可能的转移
            alpha = alpha.unsqueeze(2) + self.crf_transitions.unsqueeze(0)  # [batch_size, num_labels, num_labels]
            alpha = torch.logsumexp(alpha, dim=1)  # [batch_size, num_labels]
            
            # 添加发射分数
            alpha = alpha + logits[i]
            
            # 应用mask
            alpha = mask[i].unsqueeze(1) * alpha + (1 - mask[i].unsqueeze(1)) * alpha_prev
            
            alpha_prev = alpha
            
        # 计算最终的对数和
        return torch.logsumexp(alpha, dim=1)  # [batch_size]
    
    def decode(self, logits: torch.Tensor, mask: torch.Tensor) -> List[List[int]]:
        """
        使用维特比算法解码最可能的标签序列
        
        Args:
            logits: 预测logits
            mask: 注意力掩码
        
        Returns:
            List[List[int]]: 解码后的标签序列
        """
        # 转换为形状：[seq_len, batch_size, num_labels]
        logits = logits.transpose(0, 1)
        mask = mask.transpose(0, 1).float()
        
        seq_len, batch_size, num_labels = logits.shape
        
        # 初始化分数和路径
        scores = logits[0]  # [batch_size, num_labels]
        paths = []  # 存储每个步骤的最佳路径
        
        # 前向递推计算最佳路径
        for i in range(1, seq_len):
            # 广播分数和转移分数以计算所有可能的转移
            next_scores = scores.unsqueeze(2) + self.crf_transitions.unsqueeze(0)  # [batch_size, num_labels, num_labels]
            
            # 找到每个状态的最佳前一状态
            max_scores, max_indices = torch.max(next_scores, dim=1)  # [batch_size, num_labels]
            
            # 添加发射分数
            scores = max_scores + logits[i]
            
            # 存储路径
            paths.append(max_indices)
            
        # 回溯找到最佳路径
        predictions = []
        for b in range(batch_size):
            # 找到最后一步的最佳标签
            seq_end = mask[:, b].sum().long() - 1
            last_label = scores[b].argmax().item()
            
            # 初始化路径
            pred = [last_label]
            
            # 回溯
            for i in range(seq_end - 1, -1, -1):
                last_label = paths[i][b][last_label].item()
                pred.append(last_label)
                
            # 反转路径
            pred.reverse()
            
            # 填充到序列长度
            while len(pred) < seq_len:
                pred.append(0)  # 填充O标签
                
            predictions.append(pred)
            
        return predictions


class EntityDataset(Dataset):
    """实体识别数据集"""
    
    def __init__(self, features: List[Dict[str, Any]]):
        """
        初始化数据集
        
        Args:
            features: 特征列表
        """
        self.features = features
    
    def __len__(self):
        """返回数据集长度"""
        return len(self.features)
    
    def __getitem__(self, idx):
        """获取指定索引的样本"""
        return self.features[idx]


# 注册BERT-CRF实体识别模型
EntityRecognizerFactory.register_storage('bert_crf', BertCrfEntityRecognizer)