"""情感分析数据集（实体-情感标注）"""
"""情感分析数据集（实体-情感标注）"""
import json
import torch
from torch.utils.data import Dataset
from typing import List, Dict, Any
from transformers import AutoTokenizer
from fin_senti_entity_platform.utils.constants import ENTITY_TYPES, SENTIMENT_LABELS
from fin_senti_entity_platform.utils.logger import get_logger

logger = get_logger(__name__)

class SentimentDataset(Dataset):
    """情感分析数据集类，用于处理实体-情感标注数据"""
    
    def __init__(self,
                 data_path: str,
                 tokenizer: AutoTokenizer,
                 max_length: int = 512,
                 entity_types: List[str] = None,
                 sentiment_labels: List[str] = None):
        """
        初始化情感分析数据集
        
        Args:
            data_path: 数据文件路径
            tokenizer: 分词器
            max_length: 最大序列长度
            entity_types: 实体类型列表
            sentiment_labels: 情感标签列表
        """
        self.tokenizer = tokenizer
        self.max_length = max_length
        self.entity_types = entity_types or ENTITY_TYPES
        self.sentiment_labels = sentiment_labels or SENTIMENT_LABELS
        self.sentiment_to_id = {label: i for i, label in enumerate(self.sentiment_labels)}
        self.data = self._load_data(data_path)
        
    def _load_data(self, data_path: str) -> List[Dict[str, Any]]:
        """加载数据集"""
        try:
            with open(data_path, 'r', encoding='utf-8') as f:
                data = json.load(f)
            logger.info(f"成功加载数据集，共 {len(data)} 条样本")
            return data
        except Exception as e:
            logger.error(f"加载数据集失败: {e}")
            raise
    
    def _process_entity_mask(self, text: str, entities: List[Dict[str, Any]], token_ids: List[int]) -> torch.Tensor:
        """生成实体掩码"""
        entity_mask = torch.zeros((len(self.entity_types), self.max_length))
        tokens = self.tokenizer.tokenize(text, add_special_tokens=True)
        
        # 处理每个实体
        for entity in entities:
            entity_type = entity.get('type')
            if entity_type not in self.entity_types:
                continue
            
            entity_type_idx = self.entity_types.index(entity_type)
            start_pos = entity.get('start', 0)
            end_pos = entity.get('end', len(text))
            
            # 找到实体在token序列中的位置
            entity_tokens = self.tokenizer.tokenize(text[start_pos:end_pos])
            entity_start_idx = None
            
            # 简单的字符串匹配，实际应用中可能需要更复杂的算法
            for i in range(len(tokens) - len(entity_tokens) + 1):
                if tokens[i:i+len(entity_tokens)] == entity_tokens:
                    entity_start_idx = i
                    break
            
            if entity_start_idx is not None:
                entity_end_idx = min(entity_start_idx + len(entity_tokens), self.max_length - 1)
                entity_mask[entity_type_idx, entity_start_idx:entity_end_idx] = 1
        
        return entity_mask
    
    def __len__(self) -> int:
        """返回数据集大小"""
        return len(self.data)
    
    def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:
        """获取单个样本"""
        item = self.data[idx]
        text = item.get('text', '')
        entities = item.get('entities', [])
        sentiment = item.get('sentiment', '中性')
        
        # 分词处理
        encoding = self.tokenizer(
            text,
            max_length=self.max_length,
            padding='max_length',
            truncation=True,
            return_tensors='pt'
        )
        
        # 生成实体掩码
        entity_mask = self._process_entity_mask(text, entities, encoding['input_ids'][0].tolist())
        
        # 情感标签转换为ID
        sentiment_id = self.sentiment_to_id.get(sentiment, 0)  # 默认中性
        
        return {
            'input_ids': encoding['input_ids'].squeeze(),
            'attention_mask': encoding['attention_mask'].squeeze(),
            'token_type_ids': encoding['token_type_ids'].squeeze() if 'token_type_ids' in encoding else torch.zeros_like(encoding['input_ids']).squeeze(),
            'entity_mask': entity_mask,
            'sentiment_label': torch.tensor(sentiment_id, dtype=torch.long)
        }


def sentiment_collate_fn(batch: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
    """
    数据批处理函数
    
    Args:
        batch: 数据批次
        
    Returns:
        处理后的批次数据
    """
    input_ids = torch.stack([item['input_ids'] for item in batch])
    attention_mask = torch.stack([item['attention_mask'] for item in batch])
    
    # 处理token_type_ids（有些模型没有）
    token_type_ids = None
    if 'token_type_ids' in batch[0]:
        token_type_ids = torch.stack([item['token_type_ids'] for item in batch])
    
    # 堆叠实体掩码
    entity_mask = torch.stack([item['entity_mask'] for item in batch])
    
    # 堆叠情感标签
    sentiment_labels = torch.stack([item['sentiment_label'] for item in batch])
    
    result = {
        'input_ids': input_ids,
        'attention_mask': attention_mask,
        'entity_mask': entity_mask,
        'sentiment_labels': sentiment_labels
    }
    
    if token_type_ids is not None:
        result['token_type_ids'] = token_type_ids
    
    return result