import pickle
import os
from torch.utils.data import Dataset, DataLoader
from transformers import AutoModel, AutoTokenizer, BertTokenizer

max_len=128
data_path = "../data/realise/"
vocab_path = os.path.join(data_path, 'vocab.txt')
# tokenizer = AutoTokenizer.from_pretrained("hfl/chinese-roberta-wwm-ext")
tokenizer = BertTokenizer(vocab_file=vocab_path)


def collate_fn(batch):
    src, tgt = zip(*batch)
    src, tgt = list(src), list(tgt)
    # 将相邻字符间加入空格，防止tokenizer解码数字时候会把如“20”这样的数字解码成一个字符
    src_with_space = []
    for s in src:
        src_with_space.append(' '.join(list(s)))
    tgt_with_space = []
    for t in tgt:
        tgt_with_space.append(' '.join(list(t)))

    src_tokens = tokenizer(src_with_space, padding='max_length', max_length=max_len, return_tensors='pt', truncation=True)['input_ids']
    tgt_tokens = tokenizer(tgt_with_space, padding='max_length', max_length=max_len, return_tensors='pt', truncation=True)['input_ids']

    correction_targets = tgt_tokens
    detection_targets = (src_tokens != tgt_tokens).float()
    return src_with_space, correction_targets, detection_targets, src_tokens  # src_tokens在计算Correction的精准率时要用到


class CSCDataset(Dataset):

    def __init__(self):
        super(CSCDataset, self).__init__()
        with open("../data/realise/trainall.times2.pkl", mode='br') as f:
            train_data = pickle.load(f)

        self.train_data = train_data

    def __getitem__(self, index):
        src = self.train_data[index]['src']
        tgt = self.train_data[index]['tgt']
        return src, tgt

    def __len__(self):
        return len(self.train_data)




if __name__ == '__main__':
    from tqdm import tqdm
    ### 训练阶段
    train_data = CSCDataset()
    # print(train_data.__getitem__(0))

    # 构建dataloader
    batch_size=2
    train_loader = DataLoader(train_data, batch_size=batch_size, collate_fn=collate_fn, shuffle=True)
    
    for src_with_space, correction_targets, detection_targets, src_tokens in train_loader:
        print("src_with_space: ", src_with_space)
        print("src_tokens: ", src_tokens)
        break

    ### 测试阶段
    # with open("../data/realise/test.sighan15.pkl", mode='br') as test_file:
    #     test_data = pickle.load(test_file)
    
    #     prograss = tqdm(range(len(test_data)))
    #     for i in prograss:
    #         src, tgt = test_data[i]['src'], test_data[i]['tgt']
    #         # 将相邻字符间加入空格，防止tokenizer解码数字时候会把如“20”这样的数字解码成一个字符
    #         src_with_space=' '.join(list(src))
    #         tgt_with_space=' '.join(list(tgt))

    #         src_tokens = tokenizer(src_with_space, return_tensors='pt', max_length=max_len, truncation=True)['input_ids'][0][1:-1]
    #         tgt_tokens = tokenizer(tgt_with_space, return_tensors='pt', max_length=max_len, truncation=True)['input_ids'][0][1:-1]
    #         if i==297:
    #             print("src: ", src)
    #             print("src_with_space: ", src_with_space)
    #             print("src_tokens: ", src_tokens)
    #             print("len(src): {}, len(src_tokens): {}".format(len(src), len(src_tokens)))