import torch
from torch.utils.data import Dataset
from text_preprocessing import preprocess_text, combine_text_features
from data_loading import load_syndrome_mapping


class TCMBertDataset(Dataset):
    """中医辨证BERT数据集类，处理文本和结构化特征"""

    def __init__(self, df, tokenizer, max_length=128, use_structured_features=True):
        """
        Args:
            df: 包含中医辨证数据的DataFrame
            tokenizer: BERT分词器
            max_length: 输入文本最大长度
            use_structured_features: 是否使用结构化中医特征
        """
        super(TCMBertDataset, self).__init__()
        # 确保数据已预处理
        if 'combined_text' not in df.columns:
            df['combined_text'] = df.apply(combine_text_features, axis=1)
            df['combined_text'] = df['combined_text'].apply(preprocess_text)

        self.df = df
        self.tokenizer = tokenizer
        self.max_length = max_length
        self.use_structured_features = use_structured_features

        # 构建证型映射
        _, self.syndrome_to_id, _ = load_syndrome_mapping('syndrome.json')
        self.labels = self.df['syndrome'].map(self.syndrome_to_id).tolist()

        # 提取结构化特征
        self.structured_feature_names = [
            col for col in df.columns
            if col.startswith('symptom_') or
               col.startswith('tongue_') or
               col.startswith('pulse_')
        ]
        if use_structured_features and self.structured_feature_names:
            self.structured_features = df[self.structured_feature_names].values
        else:
            self.structured_features = None

    def __len__(self):
        """返回数据集大小"""
        return len(self.df)

    def __getitem__(self, idx):
        """获取单个样本"""
        # 获取文本和标签
        text = self.df['combined_text'].iloc[idx]
        label = self.labels[idx]

        # 文本编码
        encoding = self.tokenizer.encode_plus(
            text,
            add_special_tokens=True,
            max_length=self.max_length,
            padding='max_length',
            truncation=True,
            return_attention_mask=True,
            return_tensors='pt'
        )

        # 构造输出字典
        item = {
            'input_ids': encoding['input_ids'].flatten(),
            'attention_mask': encoding['attention_mask'].flatten(),
            'label': torch.tensor(label, dtype=torch.long)
        }

        # 添加结构化特征
        if self.use_structured_features and self.structured_features is not None:
            item['structured_features'] = torch.tensor(
                self.structured_features[idx],
                dtype=torch.float32
            )

        return item


# 示例使用
if __name__ == "__main__":
    from data_loading import load_data
    from transformers import BertTokenizer

    # 加载数据
    train_df = load_data('train.json')
    # 初始化分词器
    tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
    # 创建数据集
    dataset = TCMBertDataset(train_df, tokenizer, use_structured_features=True)
    print(f"数据集大小: {len(dataset)}")
    # 查看第一个样本
    sample = dataset[0]
    print(f"输入ID形状: {sample['input_ids'].shape}")
    print(f"标签: {sample['label']}")
    if 'structured_features' in sample:
        print(f"结构化特征形状: {sample['structured_features'].shape}")