from torch.utils.data import Dataset
import os 
import torch 
from transformers import BertTokenizerFast

class keywordsDataset(Dataset):
    def __init__(self,data_dir,tokenizer,flag='train',max_length=512) -> None:
        '''
        data_dir: 存放数据的目录地址
        tokenizer: 分词器
        flag: 可选，'train','test'
        max_length: 每句话最长限度
        '''
        assert flag in ['train','test'],"falg参数设置错误！"

        self.tokenizer = tokenizer
        self.datas = []
        self.max_length = max_length
        self.flag = flag 
        self.text = []
        self.keywords = []
        self.lable = []
        # 加载数据集
        self.data_dir = os.path.join(data_dir,flag+'.txt')

        with open(self.data_dir,'r',encoding='utf-8-sig') as fp:
            data = fp.readlines()
            for i in range(0,len(data),3):
                self.text.append(data[i].strip())
                self.keywords.append(data[i+1].strip())
                self.lable.append(int(data[i+2].strip()))
        pass

    def __len__(self):
        return len(self.text)
    
    def __getitem__(self, idx):
        # 使用tokenizer对文本进行编码，并设置max_length和padding参数以执行填充操作
        encoded_input = self.tokenizer.encode_plus(
            self.text[idx], max_length=self.max_length, padding="max_length", truncation=True, return_tensors="pt"
        )

        input_ids = encoded_input['input_ids'].squeeze(0)
        kwidx = torch.tensor(self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(self.keywords[0])))   # 关键词的编码列表

        # 创建一个掩码，初始化为全 False
        mask = torch.zeros(len(input_ids) - len(kwidx) + 1, dtype=torch.bool)
        # 遍历 input_ids 中所有可能的起始位置
        for i in range(len(input_ids) - len(kwidx) + 1):
            # 提取从位置 i 开始的子序列
            sub_tensor = input_ids[i:i + len(kwidx)]
            # 检查子序列是否与 b 相等
            if torch.equal(sub_tensor, kwidx):
                mask[i] = True

        # 如果需要将掩码扩展到与 input_ids 同样的长度
        full_mask = torch.zeros(len(input_ids), dtype=torch.bool)
        for i in range(len(mask)):
            if mask[i]:
                full_mask[i:i + len(kwidx)] = True
                # 返回编码结果,掩码和对应的标签

        aliged_labels = [-100 if i is None else int(full_mask[i]) for i in encoded_input.word_ids()]
        keywordLable = torch.tensor(aliged_labels)
        return input_ids,(encoded_input['attention_mask']==1).squeeze(0),keywordLable,self.lable[0]  

class hufu_dataset(Dataset):
    def __init__(self,checkpoint,path,vocab_path,split='train',max_length=128):
        self.max_length = max_length
        assert split in ['train','dev']   # 确保split参数有效
    
        # 加载分词器,不加入开始结束标识符
        self.tokenizer = BertTokenizerFast.from_pretrained(checkpoint)

        # 获得词典列表
        with open(vocab_path ,'r') as fp:
            self.vocab = fp.read().split()

        # 读取文本数据
        self.text_path = path
        self.text_lt = []
        with open(self.text_path,'r') as fp:
            for i in fp.readlines():
                self.text_lt.append(i.strip())

        self.label_path = os.path.join(os.path.split(path)[0],split+'_label.txt')
        self.label_lt = self.data_lt(self.label_path)
        
    # 辅助函数，从文件加载二维数据列表
    def data_lt(self,data_dir):
        # 加载二维数据列表
        data_lt = []
        with open(data_dir,'r') as fp:
            for data in fp.readlines():
                data_lt.append(data.split())
        return data_lt

    # 获取数据集中的一个样本 
    def __getitem__(self, index):
        text = self.tokenizer_fn(self.text_lt[index])

        # 压缩张量并对齐标签
        for k,v in text.items():
            text[k] = v.squeeze(0)

        if self.label_lt:
            # 将标签对齐
            labels = [self.vocab.index(lb) for lb in self.label_lt[index]]
            # 将非样本标签设置为-100
            aliged_labels = [-100 if i is None else labels[i] for i in text.word_ids()]
            text['labels'] = torch.tensor(aliged_labels)
        return text['input_ids'],text['attention_mask'],text['labels']

    # 分词器函数，用于对一个标题进行分词
    def tokenizer_fn(self,caption):
        if self.max_length:
            return self.tokenizer(caption,padding='max_length',truncation=True,max_length=self.max_length,return_tensors='pt')
        else:
            return self.tokenizer(caption,padding=True,return_tensors='pt')

    # 获取数据集的长度
    def __len__(self):
        return len(self.text_lt)