import torch
from config import BASE_DIR
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
from vocab import Vocab, build_vocab_from_file
from configs.model_config import device, train_data_path, params


class Seq2SeqDataset(Dataset):
    def __init__(self, src_file_path, src_vocab: Vocab, tgt_file_path, tgt_vocab: Vocab):
        self.src_sentences = []
        self.tgt_sentences = []
        super(Seq2SeqDataset, self).__init__()
        with open(BASE_DIR / src_file_path) as file:
            lines = file.readlines()
            for line in lines:
                words = line.split()
                indices = src_vocab.words2indices(words)
                self.src_sentences.append(indices)
        with open(BASE_DIR / tgt_file_path) as file:
            lines = file.readlines()
            for line in lines:
                words = line.split()
                indices = tgt_vocab.words2indices(words)
                self.tgt_sentences.append(indices)

    def __getitem__(self, index):
        return self.src_sentences[index], self.tgt_sentences[index]

    def __len__(self):
        return len(self.src_sentences)

    @staticmethod
    def pad_collate(batch: list):
        batch_src = [torch.tensor(src, dtype=torch.long, device=device) for src, tgt in batch]
        batch_tgt = [torch.tensor(tgt, dtype=torch.long, device=device) for src, tgt in batch]
        batch_src_len = [len(src) for src in batch_src]
        batch_tgt_len = [len(tgt) for tgt in batch_tgt]
        batch_src = pad_sequence(batch_src, batch_first=True, padding_value=2)
        batch_tgt = pad_sequence(batch_tgt, batch_first=True, padding_value=2)
        return batch_src, batch_src_len, batch_tgt, batch_tgt_len


src_vocab = build_vocab_from_file(BASE_DIR / 'data' / 'src.vocab')
tgt_vocab = build_vocab_from_file(BASE_DIR / 'data' / 'src.vocab')

dataset = Seq2SeqDataset(
    src_file_path=BASE_DIR / params['data']['train']['src'],
    src_vocab=src_vocab,
    tgt_file_path=BASE_DIR / params['data']['train']['tgt'],
    tgt_vocab=tgt_vocab)

train_data_loader = DataLoader(dataset,
                               batch_size=params['sp']['batch_size'],
                               shuffle=True,
                               collate_fn=Seq2SeqDataset.pad_collate,  # 一个batch内的长度要相同
                               drop_last=True)
if __name__ == '__main__':
    for src, src_len, tgt, tgt_len in train_data_loader:
        print(src[0], src_len[0], tgt[0], tgt_len[0])
