"""
BERT-CRF实体识别模型实现
结合BERT和CRF进行高精度实体识别
"""
import os
import torch
from typing import Dict, List, Any, Optional

from torch.optim import AdamW
from transformers import BertTokenizer, BertModel, get_linear_schedule_with_warmup
from torch import nn
from torch.utils.data import DataLoader, Dataset
from sklearn.metrics import classification_report, f1_score
from fin_senti_entity_platform.model_development.entity_recognition.base_ner_model import BaseNERModel, NERModelFactory
from fin_senti_entity_platform.utils.logger import Logger
from fin_senti_entity_platform.utils.config_loader import ConfigLoader
from fin_senti_entity_platform.utils.constants import ENTITY_TYPES


class BertCRFNER(BaseNERModel):
    """BERT-CRF实体识别模型实现类"""
    
    def __init__(self, config: Dict[str, Any]):
        """
        初始化BERT-CRF实体识别模型
        
        Args:
            config: 模型配置参数
        """
        super().__init__(config)
        self.logger = Logger().get_logger(__name__)
        
        # 设置设备
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.logger.info(f"使用设备: {self.device}")
        
        # 加载配置
        self.bert_model_name = config.get('bert_model_name', 'bert-base-chinese')
        self.num_labels = config.get('num_labels', len(ENTITY_TYPES))
        self.max_seq_length = config.get('max_seq_length', 256)
        self.batch_size = config.get('batch_size', 32)
        self.learning_rate = config.get('learning_rate', 2e-5)
        self.epochs = config.get('epochs', 3)
        
        # 标签映射
        self.label_to_id = {label: i for i, label in enumerate(ENTITY_TYPES)}
        self.id_to_label = {i: label for label, i in self.label_to_id.items()}
        
        # 初始化模型和分词器
        self._init_model()
    
    def _init_model(self):
        """\初始化BERT和CRF模型"""
        # 加载预训练的BERT模型和分词器
        self.tokenizer = BertTokenizer.from_pretrained(self.bert_model_name)
        self.bert_model = BertModel.from_pretrained(self.bert_model_name)
        
        # 创建线性分类层，用于将BERT的输出转换为标签概率
        self.classifier = nn.Linear(self.bert_model.config.hidden_size, self.num_labels)
        
        # 创建CRF层
        self.crf = CRF(self.num_labels, batch_first=True)
        
        # 将模型移动到设备
        self.bert_model = self.bert_model.to(self.device)
        self.classifier = self.classifier.to(self.device)
        self.crf = self.crf.to(self.device)
        
    def train(self, train_data: List[Dict[str, Any]], val_data: Optional[List[Dict[str, Any]]] = None) -> Dict[str, float]:
        """
        训练BERT-CRF实体识别模型
        
        Args:
            train_data: 训练数据，格式为[{"text": str, "entities": [{"start": int, "end": int, "type": str}]}]
            val_data: 验证数据
        
        Returns:
            Dict[str, float]: 训练指标
        """
        try:
            self.logger.info(f"开始训练BERT-CRF实体识别模型，训练数据量: {len(train_data)}")
            
            # 准备数据集和数据加载器
            train_dataset = NERDataset(train_data, self.tokenizer, self.label_to_id, self.max_seq_length)
            train_loader = DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True, collate_fn=collate_fn)
            
            # 准备优化器和学习率调度器
            optimizer = AdamW(
                list(self.bert_model.parameters()) + list(self.classifier.parameters()) + list(self.crf.parameters()),
                lr=self.learning_rate,
                eps=1e-8
            )
            
            total_steps = len(train_loader) * self.epochs
            scheduler = get_linear_schedule_with_warmup(
                optimizer, 
                num_warmup_steps=0, 
                num_training_steps=total_steps
            )
            
            # 训练循环
            best_val_f1 = 0.0
            
            for epoch in range(self.epochs):
                self.logger.info(f"Epoch {epoch + 1}/{self.epochs}")
                
                # 训练模式
                self.bert_model.train()
                self.classifier.train()
                
                total_loss = 0
                
                for step, batch in enumerate(train_loader):
                    # 将批次数据移动到设备
                    input_ids = batch['input_ids'].to(self.device)
                    attention_mask = batch['attention_mask'].to(self.device)
                    labels = batch['labels'].to(self.device)
                    
                    # 前向传播
                    outputs = self.bert_model(input_ids=input_ids, attention_mask=attention_mask)
                    sequence_output = outputs[0]
                    logits = self.classifier(sequence_output)
                    
                    # 计算CRF损失
                    loss = -self.crf(logits, labels, mask=attention_mask.byte())
                    total_loss += loss.item()
                    
                    # 反向传播和优化
                    loss.backward()
                    torch.nn.utils.clip_grad_norm_(self.bert_model.parameters(), 1.0)
                    optimizer.step()
                    scheduler.step()
                    optimizer.zero_grad()
                    
                    # 打印训练进度
                    if (step + 1) % 10 == 0:
                        self.logger.info(f"Step {step + 1}/{len(train_loader)}, Loss: {loss.item():.4f}")
                        
                # 计算平均损失
                avg_loss = total_loss / len(train_loader)
                self.logger.info(f"Epoch {epoch + 1} 平均损失: {avg_loss:.4f}")
                
                # 在验证集上评估
                if val_data:
                    val_metrics = self.evaluate(val_data)
                    self.logger.info(f"验证集指标: {val_metrics}")
                    
                    # 保存最佳模型
                    if val_metrics['f1_score'] > best_val_f1:
                        best_val_f1 = val_metrics['f1_score']
                        model_save_dir = self.config.get('model_save_dir', './model_saves/ner_model')
                        self.save(model_save_dir)
                        self.logger.info(f"保存最佳模型，验证集F1分数: {best_val_f1:.4f}")
                        
            # 标记模型为已训练
            self.is_trained = True
            
            # 返回最终的训练指标
            if val_data:
                final_metrics = self.evaluate(val_data)
                return final_metrics
            else:
                return {'loss': avg_loss}
                
        except Exception as e:
            self.logger.error(f"模型训练失败: {str(e)}")
            raise
    
    def predict(self, text: str) -> List[Dict[str, Any]]:
        """
        预测文本中的实体
        
        Args:
            text: 输入文本
        
        Returns:
            List[Dict[str, Any]]: 实体列表，每个实体包含实体文本、类型和位置信息
        """
        if not self.is_trained:
            self.logger.warning("模型尚未训练，请先训练模型或加载预训练模型")
            
        # 将文本转换为模型输入格式
        inputs = self.tokenizer.encode_plus(
            text,
            add_special_tokens=True,
            max_length=self.max_seq_length,
            padding='max_length',
            truncation=True,
            return_tensors='pt'
        )
        
        # 将输入移动到设备
        input_ids = inputs['input_ids'].to(self.device)
        attention_mask = inputs['attention_mask'].to(self.device)
        
        # 推理模式
        self.bert_model.eval()
        self.classifier.eval()
        
        with torch.no_grad():
            # 前向传播
            outputs = self.bert_model(input_ids=input_ids, attention_mask=attention_mask)
            sequence_output = outputs[0]
            logits = self.classifier(sequence_output)
            
            # 使用CRF解码
            predicted_labels = self.crf.decode(logits, mask=attention_mask.byte())[0]
            
        # 将预测标签转换为实体列表
        entities = self._convert_labels_to_entities(text, input_ids[0].tolist(), predicted_labels)
        
        return entities
    
    def batch_predict(self, texts: List[str]) -> List[List[Dict[str, Any]]]:
        """
        批量预测文本中的实体
        
        Args:
            texts: 输入文本列表
        
        Returns:
            List[List[Dict[str, Any]]]: 实体列表的列表
        """
        results = []
        
        # 分批次处理
        for i in range(0, len(texts), self.batch_size):
            batch_texts = texts[i:i + self.batch_size]
            
            # 将文本转换为模型输入格式
            inputs = self.tokenizer.batch_encode_plus(
                batch_texts,
                add_special_tokens=True,
                max_length=self.max_seq_length,
                padding='max_length',
                truncation=True,
                return_tensors='pt'
            )
            
            # 将输入移动到设备
            input_ids = inputs['input_ids'].to(self.device)
            attention_mask = inputs['attention_mask'].to(self.device)
            
            # 推理模式
            self.bert_model.eval()
            self.classifier.eval()
            
            with torch.no_grad():
                # 前向传播
                outputs = self.bert_model(input_ids=input_ids, attention_mask=attention_mask)
                sequence_output = outputs[0]
                logits = self.classifier(sequence_output)
                
                # 使用CRF解码
                predicted_labels = self.crf.decode(logits, mask=attention_mask.byte())
                
            # 处理每个文本的预测结果
            for j, text in enumerate(batch_texts):
                entities = self._convert_labels_to_entities(
                    text,
                    input_ids[j].tolist(),
                    predicted_labels[j]
                )
                results.append(entities)
                
        return results
    
    def _convert_labels_to_entities(self, text: str, input_ids: List[int], predicted_labels: List[int]) -> List[Dict[str, Any]]:
        """
        将预测标签转换为实体列表
        
        Args:
            text: 原始文本
            input_ids: 输入ID列表
            predicted_labels: 预测标签列表
        
        Returns:
            List[Dict[str, Any]]: 实体列表
        """
        entities = []
        current_entity = None
        
        # 将input_ids转换为tokens
        tokens = self.tokenizer.convert_ids_to_tokens(input_ids)
        
        # 遍历tokens和预测标签
        for i, (token, label_id) in enumerate(zip(tokens, predicted_labels)):
            # 跳过特殊标记
            if token in ['[CLS]', '[SEP]', '[PAD]']:
                continue
                
            label = self.id_to_label[label_id]
            
            # 处理实体开始
            if label.startswith('B-'):
                # 如果有未完成的实体，先添加
                if current_entity:
                    entities.append(current_entity)
                    
                # 开始新实体
                entity_type = label[2:]
                current_entity = {
                    'text': token.replace('##', ''),  # 去除WordPiece标记
                    'type': entity_type,
                    'start': i,
                    'end': i + 1
                }
                
            # 处理实体内部
            elif label.startswith('I-') and current_entity:
                entity_type = label[2:]
                # 确保实体类型一致
                if entity_type == current_entity['type']:
                    current_entity['text'] += token.replace('##', '')
                    current_entity['end'] = i + 1
                    
            # 处理其他情况
            else:
                if current_entity:
                    entities.append(current_entity)
                    current_entity = None
                    
        # 处理最后一个实体
        if current_entity:
            entities.append(current_entity)
            
        # 调整实体的位置信息，映射回原始文本
        char_pos = 0
        token_to_char = []
        
        for token in tokens:
            if token in ['[CLS]', '[SEP]', '[PAD]']:
                token_to_char.append(-1)  # 特殊标记位置设为-1
            else:
                token_text = token.replace('##', '')
                token_to_char.append(char_pos)
                char_pos += len(token_text)
                
        # 调整实体位置
        for entity in entities:
            start = entity['start']
            end = entity['end']
            
            # 找到有效的开始和结束位置
            while start < len(token_to_char) and token_to_char[start] == -1:
                start += 1
                
            while end > 0 and token_to_char[end - 1] == -1:
                end -= 1
                
            if start < end:
                entity['start'] = token_to_char[start]
                entity['end'] = token_to_char[end - 1] + len(entity['text'])
                
                # 确保实体文本正确
                if entity['start'] < len(text) and entity['end'] <= len(text):
                    entity['text'] = text[entity['start']:entity['end']]
                
        # 过滤掉位置无效的实体
        entities = [e for e in entities if 'start' in e and e['start'] >= 0 and e['end'] <= len(text)]
        
        return entities
    
    def save(self, path: str) -> bool:
        """
        保存模型
        
        Args:
            path: 模型保存路径
        
        Returns:
            bool: 保存是否成功
        """
        try:
            # 确保保存目录存在
            os.makedirs(path, exist_ok=True)
            
            # 保存模型参数
            torch.save({
                'bert_model_state_dict': self.bert_model.state_dict(),
                'classifier_state_dict': self.classifier.state_dict(),
                'crf_state_dict': self.crf.state_dict(),
                'label_to_id': self.label_to_id,
                'id_to_label': self.id_to_label,
                'config': self.config
            }, os.path.join(path, 'model.pt'))
            
            # 保存分词器
            self.tokenizer.save_pretrained(path)
            
            self.logger.info(f"模型成功保存到: {path}")
            return True
            
        except Exception as e:
            self.logger.error(f"模型保存失败: {str(e)}")
            return False
    
    def load(self, path: str) -> bool:
        """
        加载模型
        
        Args:
            path: 模型加载路径
        
        Returns:
            bool: 加载是否成功
        """
        try:
            # 加载模型参数
            checkpoint = torch.load(os.path.join(path, 'model.pt'), map_location=self.device)
            
            # 加载预训练的BERT模型
            self.bert_model = BertModel.from_pretrained(self.bert_model_name)
            self.bert_model.load_state_dict(checkpoint['bert_model_state_dict'])
            
            # 加载分类器和CRF
            self.classifier = nn.Linear(self.bert_model.config.hidden_size, self.num_labels)
            self.classifier.load_state_dict(checkpoint['classifier_state_dict'])
            
            self.crf = CRF(self.num_labels, batch_first=True)
            self.crf.load_state_dict(checkpoint['crf_state_dict'])
            
            # 加载标签映射
            self.label_to_id = checkpoint['label_to_id']
            self.id_to_label = checkpoint['id_to_label']
            
            # 加载分词器
            self.tokenizer = BertTokenizer.from_pretrained(path)
            
            # 将模型移动到设备
            self.bert_model = self.bert_model.to(self.device)
            self.classifier = self.classifier.to(self.device)
            self.crf = self.crf.to(self.device)
            
            # 标记模型为已训练
            self.is_trained = True
            
            self.logger.info(f"模型成功加载自: {path}")
            return True
            
        except Exception as e:
            self.logger.error(f"模型加载失败: {str(e)}")
            return False
    
    def evaluate(self, test_data: List[Dict[str, Any]]) -> Dict[str, float]:
        """
        评估模型性能
        
        Args:
            test_data: 测试数据
        
        Returns:
            Dict[str, float]: 评估指标
        """
        try:
            self.logger.info(f"开始评估模型，测试数据量: {len(test_data)}")
            
            # 预测所有测试数据
            texts = [item['text'] for item in test_data]
            all_predictions = self.batch_predict(texts)
            
            # 准备真实标签和预测标签
            true_labels = []
            pred_labels = []
            
            for i, item in enumerate(test_data):
                # 为每个字符创建标签
                text = item['text']
                true_label_list = ['O'] * len(text)
                
                # 标记真实实体
                for entity in item.get('entities', []):
                    start = entity['start']
                    end = entity['end']
                    entity_type = entity['type']
                    
                    # 确保位置有效
                    if 0 <= start < end <= len(text):
                        # 标记实体开始
                        true_label_list[start] = f'B-{entity_type}'
                        # 标记实体内部
                        for j in range(start + 1, end):
                            true_label_list[j] = f'I-{entity_type}'
                            
                # 获取预测实体
                predictions = all_predictions[i]
                pred_label_list = ['O'] * len(text)
                
                # 标记预测实体
                for entity in predictions:
                    start = entity['start']
                    end = entity['end']
                    entity_type = entity['type']
                    
                    # 确保位置有效
                    if 0 <= start < end <= len(text):
                        # 标记实体开始
                        pred_label_list[start] = f'B-{entity_type}'
                        # 标记实体内部
                        for j in range(start + 1, end):
                            pred_label_list[j] = f'I-{entity_type}'
                            
                # 添加到总列表
                true_labels.extend(true_label_list)
                pred_labels.extend(pred_label_list)
                
            # 计算评估指标
            report = classification_report(true_labels, pred_labels, output_dict=True)
            f1_micro = f1_score(true_labels, pred_labels, average='micro')
            f1_macro = f1_score(true_labels, pred_labels, average='macro')
            
            metrics = {
                'f1_score': report['weighted avg']['f1-score'],
                'f1_micro': f1_micro,
                'f1_macro': f1_macro,
                'precision': report['weighted avg']['precision'],
                'recall': report['weighted avg']['recall']
            }
            
            self.logger.info(f"模型评估完成，指标: {metrics}")
            return metrics
            
        except Exception as e:
            self.logger.error(f"模型评估失败: {str(e)}")
            raise


# CRF层实现
class CRF(nn.Module):
    """条件随机场层"""
    
    def __init__(self, num_tags: int, batch_first: bool = False):
        """
        初始化CRF层
        
        Args:
            num_tags: 标签数量
            batch_first: 批处理维度是否在第一个位置
        """
        super().__init__()
        
        self.num_tags = num_tags
        self.batch_first = batch_first
        
        # 转移矩阵，transitions[i][j]表示从标签i转移到标签j的分数
        self.transitions = nn.Parameter(torch.randn(num_tags, num_tags))
        
        # 初始化开始和结束转移分数
        self.start_transitions = nn.Parameter(torch.randn(num_tags))
        self.end_transitions = nn.Parameter(torch.randn(num_tags))
        
        # 初始化参数
        self.reset_parameters()
    
    def reset_parameters(self):
        """重置参数"""
        nn.init.xavier_normal_(self.transitions)
        nn.init.normal_(self.start_transitions)
        nn.init.normal_(self.end_transitions)
    
    def forward(self, emissions: torch.Tensor, tags: torch.Tensor, mask: Optional[torch.ByteTensor] = None) -> torch.Tensor:
        """
        计算CRF的负对数似然损失
        
        Args:
            emissions: 发射分数，形状为(batch_size, seq_length, num_tags)或(seq_length, batch_size, num_tags)
            tags: 标签序列，形状与emissions的前两维相同
            mask: 掩码，形状与emissions的前两维相同
        
        Returns:
            torch.Tensor: 损失值
        """
        if mask is None:
            mask = torch.ones_like(tags, dtype=torch.uint8, device=tags.device)
            
        if self.batch_first:
            emissions = emissions.transpose(0, 1)
            tags = tags.transpose(0, 1)
            mask = mask.transpose(0, 1)
            
        # 计算路径分数
        numerator = self._compute_path_score(emissions, tags, mask)
        
        # 计算所有可能路径的总分
        denominator = self._compute_partition_function(emissions, mask)
        
        # 返回负对数似然损失
        return -(numerator - denominator).mean()
    
    def decode(self, emissions: torch.Tensor, mask: Optional[torch.ByteTensor] = None) -> List[List[int]]:
        """
        使用Viterbi算法解码最可能的标签序列
        
        Args:
            emissions: 发射分数，形状为(batch_size, seq_length, num_tags)或(seq_length, batch_size, num_tags)
            mask: 掩码，形状与emissions的前两维相同
        
        Returns:
            List[List[int]]: 解码后的标签序列列表
        """
        if mask is None:
            mask = torch.ones(emissions.shape[:2], dtype=torch.uint8, device=emissions.device)
            
        if self.batch_first:
            emissions = emissions.transpose(0, 1)
            mask = mask.transpose(0, 1)
            
        # 获取序列长度和批次大小
        seq_length, batch_size = emissions.shape[:2]
        
        # 初始化Viterbi变量
        viterbi_scores = torch.zeros(seq_length, batch_size, self.num_tags, device=emissions.device)
        backpointers = torch.zeros(seq_length, batch_size, self.num_tags, dtype=torch.long, device=emissions.device)
        
        # 处理第一个时间步
        viterbi_scores[0] = self.start_transitions + emissions[0]
        
        # 处理后续时间步
        for i in range(1, seq_length):
            # 计算从每个标签转移到当前标签的分数
            transition_scores = viterbi_scores[i - 1].unsqueeze(2) + self.transitions.unsqueeze(0)
            
            # 找到最佳前驱标签
            max_scores, max_indices = transition_scores.max(dim=1)
            
            # 更新Viterbi分数和回溯指针
            viterbi_scores[i] = max_scores + emissions[i]
            backpointers[i] = max_indices
            
            # 应用掩码
            mask_i = mask[i].unsqueeze(1).expand_as(viterbi_scores[i])
            viterbi_scores[i] = torch.where(mask_i, viterbi_scores[i], viterbi_scores[i - 1])
            backpointers[i] = torch.where(mask_i, backpointers[i], torch.arange(self.num_tags, device=emissions.device).unsqueeze(0))
            
        # 找到每个序列的最后一个有效位置
        seq_ends = mask.sum(dim=0) - 1
        
        # 初始化结果
        predictions = []
        
        # 为每个批次解码最佳路径
        for b in range(batch_size):
            # 获取最后一个有效位置
            end_idx = seq_ends[b].item()
            
            # 计算最终分数，添加结束转移分数
            final_scores = viterbi_scores[end_idx, b] + self.end_transitions
            
            # 找到最佳结束标签
            best_tag = final_scores.argmax().item()
            
            # 回溯构建路径
            path = [best_tag]
            for i in range(end_idx, 0, -1):
                best_tag = backpointers[i, b, best_tag].item()
                path.append(best_tag)
                
            # 反转路径（因为是从后向前构建的）
            path.reverse()
            
            # 确保路径长度与原始序列长度相同
            while len(path) < seq_length:
                path.append(0)  # 用0填充
                
            predictions.append(path)
            
        # 如果是batch_first模式，调整输出格式
        if self.batch_first:
            predictions = [p[:mask[b].sum().item()] for b, p in enumerate(predictions)]
            
        return predictions
    
    def _compute_path_score(self, emissions: torch.Tensor, tags: torch.Tensor, mask: torch.ByteTensor) -> torch.Tensor:
        """
        计算给定标签序列的分数
        
        Args:
            emissions: 发射分数
            tags: 标签序列
            mask: 掩码
        
        Returns:
            torch.Tensor: 路径分数
        """
        # 获取序列长度和批次大小
        seq_length, batch_size = emissions.shape[:2]
        
        # 计算开始转移分数
        score = self.start_transitions[tags[0]]
        
        # 计算发射分数
        score += emissions[0].gather(1, tags[0].unsqueeze(1)).squeeze(1)
        
        # 计算转移分数和发射分数
        for i in range(1, seq_length):
            # 仅处理掩码为1的位置
            mask_i = mask[i]
            if mask_i.sum() == 0:
                continue
                
            # 计算转移分数
            transition_score = self.transitions[tags[i-1], tags[i]]
            
            # 计算发射分数
            emission_score = emissions[i].gather(1, tags[i].unsqueeze(1)).squeeze(1)
            
            # 添加到总分数
            score += (transition_score + emission_score) * mask_i
            
        # 找到每个序列的最后一个有效位置
        seq_ends = mask.sum(dim=0) - 1
        
        # 计算结束转移分数
        for b in range(batch_size):
            last_tag = tags[seq_ends[b], b]
            score[b] += self.end_transitions[last_tag]
            
        return score
    
    def _compute_partition_function(self, emissions: torch.Tensor, mask: torch.ByteTensor) -> torch.Tensor:
        """
        计算所有可能路径的总分（使用前向算法）
        
        Args:
            emissions: 发射分数
            mask: 掩码
        
        Returns:
            torch.Tensor: 分区函数值
        """
        # 获取序列长度和批次大小
        seq_length, batch_size = emissions.shape[:2]
        
        # 初始化前向变量
        forward = self.start_transitions + emissions[0]
        
        # 前向算法
        for i in range(1, seq_length):
            # 计算从每个标签转移到当前标签的分数
            broadcast_forward = forward.unsqueeze(2)
            broadcast_transitions = self.transitions.unsqueeze(0)
            broadcast_emissions = emissions[i].unsqueeze(1)
            
            # 计算所有可能转移的分数
            scores = broadcast_forward + broadcast_transitions + broadcast_emissions
            
            # 计算对数和指数
            new_forward = torch.logsumexp(scores, dim=1)
            
            # 应用掩码
            forward = torch.where(mask[i].unsqueeze(1), new_forward, forward)
            
        # 找到每个序列的最后一个有效位置
        seq_ends = mask.sum(dim=0) - 1
        
        # 计算最终分数
        final_forward = []
        for b in range(batch_size):
            last_forward = forward[b]
            final_forward.append(last_forward + self.end_transitions)
            
        final_forward = torch.stack(final_forward)
        
        # 计算对数和指数
        return torch.logsumexp(final_forward, dim=1)


# 数据集和数据加载相关类
class NERDataset(Dataset):
    """实体识别数据集"""
    
    def __init__(self, data: List[Dict[str, Any]], tokenizer, label_to_id: Dict[str, int], max_seq_length: int):
        """
        初始化实体识别数据集
        
        Args:
            data: 数据列表
            tokenizer: 分词器
            label_to_id: 标签到ID的映射
            max_seq_length: 最大序列长度
        """
        self.data = data
        self.tokenizer = tokenizer
        self.label_to_id = label_to_id
        self.max_seq_length = max_seq_length
    
    def __len__(self):
        """返回数据集大小"""
        return len(self.data)
    
    def __getitem__(self, idx: int) -> Dict[str, Any]:
        """
        获取指定索引的数据项
        
        Args:
            idx: 索引
        
        Returns:
            Dict[str, Any]: 数据项
        """
        item = self.data[idx]
        text = item['text']
        entities = item.get('entities', [])
        
        # 为每个字符创建标签
        labels = ['O'] * len(text)
        
        # 标记实体
        for entity in entities:
            start = entity['start']
            end = entity['end']
            entity_type = entity['type']
            
            # 确保位置有效
            if 0 <= start < end <= len(text):
                # 标记实体开始
                labels[start] = f'B-{entity_type}'
                # 标记实体内部
                for i in range(start + 1, end):
                    labels[i] = f'I-{entity_type}'
                    
        # 使用分词器编码文本
        encoding = self.tokenizer.encode_plus(
            text,
            add_special_tokens=True,
            max_length=self.max_seq_length,
            padding='max_length',
            truncation=True,
            return_token_type_ids=False,
            return_attention_mask=True,
            return_offsets_mapping=True
        )
        
        # 根据偏移量映射将字符级标签转换为token级标签
        token_labels = []
        offset_mapping = encoding['offset_mapping']
        
        # 添加[CLS]标签
        token_labels.append(self.label_to_id.get('O', 0))
        
        # 处理其他token
        prev_end = -1
        for start, end in offset_mapping[1:-1]:  # 跳过[CLS]和[SEP]
            if start == end:  # 处理特殊token
                token_labels.append(self.label_to_id.get('O', 0))
            else:
                # 找到对应的字符标签
                # 对于WordPiece token，使用第一个字符的标签
                token_label = labels[start]
                
                # 检查是否是实体的一部分
                if token_label.startswith('B-'):
                    # 确保这是实体的开始
                    if start > prev_end:
                        token_labels.append(self.label_to_id.get(token_label, 0))
                    else:
                        # 如果不是开始，转换为I标签
                        entity_type = token_label[2:]
                        token_labels.append(self.label_to_id.get(f'I-{entity_type}', 0))
                else:
                    token_labels.append(self.label_to_id.get(token_label, 0))
                    
                prev_end = end
                
        # 添加[SEP]标签
        token_labels.append(self.label_to_id.get('O', 0))
        
        # 填充到最大序列长度
        while len(token_labels) < self.max_seq_length:
            token_labels.append(self.label_to_id.get('O', 0))
            
        # 截取到最大序列长度
        token_labels = token_labels[:self.max_seq_length]
        
        return {
            'input_ids': encoding['input_ids'],
            'attention_mask': encoding['attention_mask'],
            'labels': token_labels
        }


def collate_fn(batch: List[Dict[str, Any]]) -> Dict[str, torch.Tensor]:
    """
    数据批次处理函数
    
    Args:
        batch: 数据批次
    
    Returns:
        Dict[str, torch.Tensor]: 处理后的批次数据
    """
    return {
        'input_ids': torch.tensor([item['input_ids'] for item in batch]),
        'attention_mask': torch.tensor([item['attention_mask'] for item in batch]),
        'labels': torch.tensor([item['labels'] for item in batch])
    }


# 注册BERT-CRF实体识别模型
NERModelFactory.register_model('bert_crf', BertCRFNER)