"""实体识别数据集（标注格式转换）"""
"""实体识别数据集（标注格式转换）"""
import json
import torch
from torch.utils.data import Dataset
from typing import List, Dict, Any
from transformers import AutoTokenizer
from fin_senti_entity_platform.utils.constants import ENTITY_TYPES
from fin_senti_entity_platform.utils.logger import get_logger

logger = get_logger(__name__)

class EntityDataset(Dataset):
    """实体识别数据集类，用于处理BIO格式的标注数据"""
    
    def __init__(self,
                 data_path: str,
                 tokenizer: AutoTokenizer,
                 max_length: int = 512,
                 labels: List[str] = None):
        """
        初始化实体识别数据集
        
        Args:
            data_path: 数据文件路径
            tokenizer: 分词器
            max_length: 最大序列长度
            labels: 标签列表，如未提供则从配置中获取
        """
        self.tokenizer = tokenizer
        self.max_length = max_length
        self.labels = labels
        self.label_to_id = {label: i for i, label in enumerate(self.labels)} if labels else None
        self.data = self._load_data(data_path)
        
    def _load_data(self, data_path: str) -> List[Dict[str, Any]]:
        """加载数据集"""
        try:
            with open(data_path, 'r', encoding='utf-8') as f:
                data = json.load(f)
            logger.info(f"成功加载实体识别数据集，共 {len(data)} 条样本")
            return data
        except Exception as e:
            logger.error(f"加载数据集失败: {e}")
            raise
    
    def _convert_entities_to_bio(self, text: str, entities: List[Dict[str, Any]]) -> List[str]:
        """将实体列表转换为BIO格式标签"""
        # 初始化所有标签为O
        bio_labels = ['O'] * len(text)
        
        # 为每个实体添加B和I标签
        for entity in entities:
            entity_type = entity.get('type', '')
            if entity_type not in ENTITY_TYPES:
                continue
            
            start_pos = entity.get('start', 0)
            end_pos = entity.get('end', len(text))
            
            # 确保位置有效
            start_pos = max(0, start_pos)
            end_pos = min(len(text), end_pos)
            
            if start_pos >= end_pos:
                continue
            
            # 设置B标签（Begin）
            bio_labels[start_pos] = f'B-{entity_type}'
            
            # 设置I标签（Inside）
            for i in range(start_pos + 1, end_pos):
                bio_labels[i] = f'I-{entity_type}'
        
        return bio_labels
    
    def _align_labels_with_tokens(self, text: str, bio_labels: List[str], token_ids: List[int]) -> List[int]:
        """将文本级别的标签对齐到token级别的标签"""
        # 获取token边界信息
        token_offsets = self.tokenizer(text, return_offsets_mapping=True, max_length=self.max_length, truncation=True)
        offsets = token_offsets['offset_mapping']
        
        # 初始化token级别的标签为O的id
        aligned_labels = [self.label_to_id['O']] * len(offsets)
        
        # 对齐标签
        for i, (start, end) in enumerate(offsets):
            # 跳过特殊token（CLS, SEP等）
            if start == 0 and end == 0:
                aligned_labels[i] = -100  # 忽略这些位置的损失计算
                continue
            
            # 找到与token对应的文本位置，并获取标签
            # 简单策略：取token第一个字符的标签
            if start < len(bio_labels):
                token_label = bio_labels[start]
                if token_label in self.label_to_id:
                    aligned_labels[i] = self.label_to_id[token_label]
                else:
                    aligned_labels[i] = self.label_to_id['O']
        
        # 确保标签长度不超过最大长度
        if len(aligned_labels) < self.max_length:
            aligned_labels += [self.label_to_id['O']] * (self.max_length - len(aligned_labels))
        else:
            aligned_labels = aligned_labels[:self.max_length]
        
        return aligned_labels
    
    def __len__(self) -> int:
        """返回数据集大小"""
        return len(self.data)
    
    def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:
        """获取单个样本"""
        item = self.data[idx]
        text = item.get('text', '')
        entities = item.get('entities', [])
        
        # 转换为BIO格式标签
        bio_labels = self._convert_entities_to_bio(text, entities)
        
        # 分词处理
        encoding = self.tokenizer(
            text,
            max_length=self.max_length,
            padding='max_length',
            truncation=True,
            return_tensors='pt'
        )
        
        # 对齐标签
        token_ids = encoding['input_ids'][0].tolist()
        aligned_labels = self._align_labels_with_tokens(text, bio_labels, token_ids)
        
        return {
            'input_ids': encoding['input_ids'].squeeze(),
            'attention_mask': encoding['attention_mask'].squeeze(),
            'token_type_ids': encoding['token_type_ids'].squeeze() if 'token_type_ids' in encoding else torch.zeros_like(encoding['input_ids']).squeeze(),
            'labels': torch.tensor(aligned_labels, dtype=torch.long)
        }


def entity_collate_fn(batch: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
    """
    实体识别数据批处理函数
    
    Args:
        batch: 数据批次
        
    Returns:
        处理后的批次数据
    """
    input_ids = torch.stack([item['input_ids'] for item in batch])
    attention_mask = torch.stack([item['attention_mask'] for item in batch])
    
    # 处理token_type_ids（有些模型没有）
    token_type_ids = None
    if 'token_type_ids' in batch[0]:
        token_type_ids = torch.stack([item['token_type_ids'] for item in batch])
    
    # 堆叠标签
    labels = torch.stack([item['labels'] for item in batch])
    
    result = {
        'input_ids': input_ids,
        'attention_mask': attention_mask,
        'labels': labels
    }
    
    if token_type_ids is not None:
        result['token_type_ids'] = token_type_ids
    
    return result


def convert_json_to_bio_format(data_path: str, output_path: str):
    """
    将JSON格式的数据转换为BIO格式的文本文件
    
    Args:
        data_path: 输入的JSON文件路径
        output_path: 输出的BIO格式文件路径
    """
    try:
        # 加载数据
        with open(data_path, 'r', encoding='utf-8') as f:
            data = json.load(f)
        
        # 转换格式
        bio_lines = []
        for item in data:
            text = item.get('text', '')
            entities = item.get('entities', [])
            
            # 转换为BIO格式
            bio_labels = ['O'] * len(text)
            for entity in entities:
                entity_type = entity.get('type', '')
                start = entity.get('start', 0)
                end = entity.get('end', len(text))
                
                if start < end and start < len(text) and end <= len(text):
                    bio_labels[start] = f'B-{entity_type}'
                    for i in range(start + 1, end):
                        bio_labels[i] = f'I-{entity_type}'
            
            # 添加到结果
            for char, label in zip(text, bio_labels):
                bio_lines.append(f"{char} {label}")
            bio_lines.append('')  # 句子分隔符
        
        # 保存结果
        with open(output_path, 'w', encoding='utf-8') as f:
            f.write('\n'.join(bio_lines))
        
        logger.info(f"成功将 {data_path} 转换为BIO格式并保存到 {output_path}")
    except Exception as e:
        logger.error(f"格式转换失败: {e}")
        raise