import torch
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence
import pandas as pd
from dataset.vocab import Vocab, tokenizer
import re

df = pd.read_csv("./data/translate_chinese2english.csv")
data = df.to_numpy()

# 数据清洗，将不需要内容去除
# en_data = []
# for item in data[:, 1]:
#     en_data.append(re.sub(r"[^a-zA-Z<>]+", " ", item).lower().strip())
# zh_data = []
# for item in data[:, 0]:
#     zh_data.append(re.sub(r"[^\u4e00-\u9fff]+", " ", item).strip())
# print(en_data)
# print(zh_data)

# 加入<bos> 与<eos>
en_data = []
for item in data[:, 1]:
    en_data.append("<bos> " + item + " <eos>")

en_tokens = tokenizer(en_data, mode="word")
zh_tokens = tokenizer(data[:, 0], mode="char")
en_vocab = Vocab(en_tokens, 0, retired_tokens=['<pad>'])
zh_vocab = Vocab(zh_tokens, 0, retired_tokens=['<pad>'])
en_idx = [torch.tensor(en_vocab.to_idx(line)) for line in en_tokens]
zh_idx = [torch.tensor(zh_vocab.to_idx(line)) for line in zh_tokens]


class TranslateDataset(Dataset):
    def __init__(self, sentence, translate_sentence):
        super().__init__()
        self.sentence = sentence
        self.translate_sentence = translate_sentence

    def __len__(self):
        return len(self.sentence)

    def __getitem__(self, index):
        return self.sentence[index], self.translate_sentence[index]


# 将一个此次中数据全部对齐
def collate_fn(batch):
    zh_inputs, en_inputs = zip(*batch)
    zh_pad = pad_sequence(zh_inputs, batch_first=True, padding_value=zh_vocab.to_idx("<pad>"))
    en_pad = pad_sequence(en_inputs, batch_first=True, padding_value=en_vocab.to_idx("<pad>"))
    return zh_pad, en_pad


dataset = TranslateDataset(zh_idx, en_idx)
dataloader = DataLoader(dataset, batch_size=20, shuffle=True, collate_fn=collate_fn)

for zh_inputs, en_inputs in dataloader:
    print(zh_inputs)
    print(en_inputs)
    break
