import torch.utils.data as Data 
import torch
import json 

def get_dataset_info():
    try:
        dict_datas = json.load(open('/data/whl/cl/gpt2/model/dict_datas.json', 'r'))
    except FileNotFoundError:
        print("无法找到词汇表文件'dict_datas.json'，请检查文件是否存在。")

    word2id, id2word = dict_datas['word2id'], dict_datas['id2word']

    vocab_size = len(word2id)
    return word2id, id2word, vocab_size

class MyDataSet(Data.Dataset):
    def __init__(self, datas):
        self.datas = datas

    def __getitem__(self, item):
        data = self.datas[item]
        decoder_input = data[:-1]
        decoder_output = data[1:]
        decoder_input_len = len(decoder_input)
        decoder_output_len = len(decoder_output)
        return {
            "decoder_input": decoder_input,
            "decoder_input_len": decoder_input_len,
            "decoder_output": decoder_output,
            "decoder_output_len": decoder_output_len
        }

    def __len__(self):
        return len(self.datas)

    def padding_batch(self, batch):
        word2id, a, b = get_dataset_info()
        decoder_input_lens = [d["decoder_input_len"] for d in batch]
        decoder_output_lens = [d["decoder_output_len"] for d in batch]
        decoder_input_maxlen = max(decoder_input_lens)
        decoder_output_maxlen = max(decoder_output_lens)
        for d in batch:
            d["decoder_input"].extend([word2id["<pad>"]] * (decoder_input_maxlen - d["decoder_input_len"]))
            d["decoder_output"].extend([word2id["<pad>"]] * (decoder_output_maxlen - d["decoder_output_len"]))
        decoder_inputs = torch.tensor([d["decoder_input"] for d in batch], dtype=torch.long)
        decoder_outputs = torch.tensor([d["decoder_output"] for d in batch], dtype=torch.long)
        return decoder_inputs, decoder_outputs