from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import Dataset, DataLoader
import torch
from tokenizer import ChineseCharacterTokenizer, TitleTokenizer, open_jsonl_file
import model1
charset_tokenizer = ChineseCharacterTokenizer()
charset_tokenizer.load_vocab()
title_tokenizer = TitleTokenizer()

SEQ_LEN = 25

class DoubanDataset(Dataset):
    """"""

    def __init__(self, dict_list):
        self.titles = [dic['title'] for dic in dict_list]
        self.comments = [dic['comment'] for dic in dict_list]

    def __getitem__(self, index):
        title = title_tokenizer.encode(self.titles[index])
        comment = charset_tokenizer.encode(self.comments[index])
        return torch.tensor(comment, dtype=torch.int32), torch.tensor(title)

    def __len__(self):
        return len(self.titles)


def collate_fn(batch):
    # batch 是一个列表，每个元素是一个元组 (comments, titles)
    # data_list = [item[0] for item in batch]
    # target_list = [item[1] for item in batch]
    data_list, target_list = zip(*batch)
    data_lens = torch.tensor([len(item) for item in data_list])
    pad_length = SEQ_LEN
    padded_sentences = torch.zeros((len(data_list), pad_length), dtype=torch.float32)
    # 遍历每个句子，将其复制到空张量(tensor)中对应的位置
    for i, s in enumerate(data_list):
        if SEQ_LEN > len(s):
            padded_sentences[i, :len(s)] = s.clone().detach()
            padded_sentences[i, len(s):] = charset_tokenizer.PAD_ID
        else:
            padded_sentences[i] = s.clone().detach()[:SEQ_LEN]
    target_list = [item.data for item in target_list]
    target_list = torch.tensor(target_list)
    return padded_sentences, data_lens, target_list

def divide_data(jsonl_path, dev_ratio=0.2, test_ratio=0.1):
    dict_list = open_jsonl_file(jsonl_path)
    num = len(dict_list)
    train_ratio = 1 - dev_ratio - test_ratio
    train_list = dict_list[:int(num * (1 - dev_ratio - test_ratio))]
    dev_list = dict_list[int(num * (1 - dev_ratio - test_ratio)):int(num * (train_ratio + dev_ratio))]
    test_list = dict_list[int(num * (train_ratio + dev_ratio)):]
    return train_list, dev_list, test_list


def get_dataloader(jsonl_path, batch_size):
    train_list, dev_list, test_list = divide_data(jsonl_path)
    train_dataset = DoubanDataset(train_list)
    train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
    dev_dataset = DoubanDataset(dev_list)
    dev_dataloader = DataLoader(dev_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
    test_dataset = DoubanDataset(test_list)
    test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
    return train_dataloader, dev_dataloader, test_dataloader

if __name__ == '__main__':
    train_loader, dev_loader, test_loader = get_dataloader('../my_spider_douban_formal/output/clean_data.jsonl', 4)
