from config import *
from util import *
import torch.utils.data as data
from torch.nn.utils.rnn import pad_sequence
import json
import torch
from model import get_subsequent_mask,get_padding_mask

class Dataset(data.Dataset):
    def __init__(self,type='train'):
        super().__init__()
        if type == "train":
            file_path = TRAIN_SAMPLE_PATH
        elif type == "val":
            file_path = VAL_SAMPLE_PATH
        with open(file_path,encoding='utf-8') as file:
            self.lines = json.loads(file.read())
        #引入词表
        _,self.en_vocab2id = get_vocab("en")
        _,self.zh_vocab2id = get_vocab("zh")
    def __len__(self):
        return len(self.lines)

    def __getitem__(self, index):
        en_text,zh_text = self.lines[index]
        source = [self.en_vocab2id.get(v.lower(),UNK_ID) for v in divided_en(en_text)]
        target = [self.zh_vocab2id.get(v.lower(),UNK_ID) for v in divided_zh(zh_text)]
        return source,target,zh_text
    #数据对齐和整理
    #因为每个批次不同的句子长度可能不一致
    def collate_fn(self,batch):
        batch_src,batch_tgt,tgt_text = zip(*batch)
        #处理encoder的输入
        src_x = pad_sequence([torch.LongTensor(src) for src in batch_src],True,PAD_ID)
        src_mask = get_padding_mask(src_x,PAD_ID)
        #处理decoder的输入
        tgt_x = [torch.LongTensor([SOS_ID] + tgt) for tgt in batch_tgt]
        tgt_x = pad_sequence(tgt_x,True,PAD_ID)
        tgt_pad_mask = get_padding_mask(tgt_x,PAD_ID)
        tgt_subsquent_mask = get_subsequent_mask(tgt_x.size(1))
        tgt_mask = tgt_pad_mask | tgt_subsquent_mask
        tgt_mask = tgt_mask != 0
        #处理标签
        tgt_y = [torch.LongTensor(tgt+[EOS_ID]) for tgt in batch_tgt]
        tgt_y = pad_sequence(tgt_y,True,PAD_ID)
        return src_x,src_mask,tgt_x,tgt_mask,tgt_y,tgt_text

if __name__ == '__main__':
    val = Dataset("val")
    loader = data.DataLoader(val,batch_size=2,collate_fn=val.collate_fn)
    print(next(iter(loader)))

