import os

import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader, RandomSampler, TensorDataset

from model.seq2seq.word import Word


class Seq2SeqProcessor(Dataset):
    def __init__(self, name, raw_dir, max_len=10, batch_size=32):
        self.root_dir = os.path.join(raw_dir, name)
        self.batch_size = batch_size

        self.in_words = Word("in")
        self.out_words = Word("out")
        self.pairs = []

        self.train = None
        self.test = None
        self.valid = None

        self.max_len = max_len

        self.process()

    def process(self):
        root_dir = self.root_dir
        train_path = os.path.join(root_dir, "train.txt")
        test_path = os.path.join(root_dir, "test.txt")
        valid_path = os.path.join(root_dir, "valid.txt")
        all_path = os.path.join(root_dir, "all.txt")

        train_pairs = _read_seq2seq(train_path)
        valid_pairs = _read_seq2seq(valid_path)
        test_pairs = _read_seq2seq(test_path)

        # 不要通过这种方式读取，不然训练集测试集改变后，单词的序号会变化的
        # self.pairs.extend(train_pairs)
        # self.pairs.extend(valid_pairs)
        # self.pairs.extend(test_pairs)
        self.pairs = _read_seq2seq(all_path)
        # print(self.pairs)

        for pair in self.pairs:
            self.in_words.add_sentence(pair[0])
            self.out_words.add_sentence(pair[1])

        self.train = _get_dataloader(train_pairs, self.in_words, self.out_words, max_length=self.max_len,
                                     batch_size=self.batch_size)
        # self.valid = _get_dataloader(valid_pairs, self.in_words, self.out_words, max_length=self.max_len,
        #                              batch_size=self.batch_size, is_train=False)
        self.test = _get_dataloader(test_pairs, self.in_words, self.out_words, max_length=self.max_len,
                                    batch_size=self.batch_size)

        print("# loading seq2seq dataset...")
        print("# input words: {}".format(self.in_words.n_words))
        print("# output words: {}".format(self.out_words.n_words))
        print("# train: {}".format(len(train_pairs)))
        print("# valid: {}".format(len(valid_pairs)))
        print("# test: {}".format(len(test_pairs)))

    def __getitem__(self, index):
        return self.pairs[index]

    def __len__(self):
        return len(self.pairs)


def _read_seq2seq(filename: str):
    pairs = []
    with open(filename, "r", encoding="utf-8") as f:
        for line in f:
            pair = line.strip().split('\t')
            pairs.append(pair)
    return pairs


def _get_dataloader(pairs, in_word: Word, out_word: Word, max_length=10, batch_size=32, device="cpu"):
    n = len(pairs)
    input_ids = np.zeros((n, max_length), dtype=np.int32)
    output_ids = np.zeros((n, max_length), dtype=np.int32)
    target_ids = np.zeros((n, max_length), dtype=np.int32)

    for idx, (inp, tgt) in enumerate(pairs):
        in_words = inp.split(' ')
        tgt_words = tgt.split(' ')

        if len(in_words) >= max_length or len(tgt_words) >= max_length:
            continue

        inp_ids = [in_word.word2index[w] for w in in_words]
        tgt_ids = [out_word.word2index[w] for w in tgt_words]
        # encoder inputs
        input_ids[idx, :len(inp_ids)] = inp_ids
        input_ids[idx, len(inp_ids):] = [in_word.word2index["EOS"] for i in range(max_length - len(inp_ids))]  # add EOS
        # decoder outputs
        output_ids[idx, :len(tgt_ids)] = tgt_ids
        output_ids[idx, len(tgt_ids):] = [out_word.word2index["EOS"] for i in
                                          range(max_length - len(tgt_ids))]  # add EOS
        # decoder inputs
        target_ids[idx, 1:len(tgt_ids) + 1] = tgt_ids
        target_ids[idx, len(tgt_ids) + 1:] = [out_word.word2index["EOS"] for i in
                                              range(max_length - len(tgt_ids) - 1)]  # add EOS

    data = TensorDataset(
        torch.LongTensor(input_ids).to(device),
        torch.LongTensor(output_ids).to(device),
        torch.LongTensor(target_ids).to(device)
    )
    # sampler = SequentialSampler(data)
    sampler = RandomSampler(data)
    dataloader = DataLoader(data, sampler=sampler, batch_size=batch_size)

    return dataloader


if __name__ == "__main__":
    p = Seq2SeqProcessor("seq2seq", "../../datasets/datasets4")
