import json

import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
import pandas as pd
from dataset.vocab import Vocab, tokenizer
import re

data = []
with open("./data/train.jsonl", "r", encoding="utf-8") as f:
    lines = f.readlines()
    for line in lines:
        obj = json.loads(line)
        data.append([obj['conversation'][0]['human'], obj['conversation'][0]['assistant']])
data = np.array(data)

out_tokens = tokenizer(data[:, 1], mode="char")
in_tokens = tokenizer(data[:, 0], mode="char")
vocab = Vocab(in_tokens + out_tokens, 0, retired_tokens=['<pad>', '<bos>', '<eos>'])
bos = [vocab.to_idx('<bos>')]
eos = [vocab.to_idx('<eos>')]
out_idx = [torch.tensor(bos + vocab.to_idx(line) + eos) for line in out_tokens]
in_idx = [torch.tensor(vocab.to_idx(line)) for line in in_tokens]
out_valid_len = [len(line) for line in out_tokens]
in_valid_len = [len(line) for line in in_tokens]



def get_valid_len():
    """
    :return: 每个句子的有效长度
    """
    return in_valid_len, out_valid_len


def generate_vocab():
    return vocab


class TranslateDataset(Dataset):
    def __init__(self, sentence, translate_sentence, in_valid_len, out_valid_len):
        super().__init__()
        self.sentence = sentence
        self.translate_sentence = translate_sentence
        self.in_valid_len = in_valid_len
        self.out_valid_len = out_valid_len

    def __len__(self):
        return len(self.sentence)

    def __getitem__(self, index):
        return self.sentence[index], self.translate_sentence[index], self.in_valid_len[index], self.out_valid_len[index]


# 将一个此次中数据全部对齐
def collate_fn(batch):
    in_inputs, out_inputs, tmp1, tmp2 = zip(*batch)
    in_pad = pad_sequence(in_inputs, batch_first=True, padding_value=vocab.to_idx("<pad>"))
    out_pad = pad_sequence(out_inputs, batch_first=True, padding_value=vocab.to_idx("<pad>"))
    return in_pad, out_pad, tmp1, tmp2


def generate_loader(batch_size=20):
    dataset = TranslateDataset(in_idx, out_idx, in_valid_len, out_valid_len)
    dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
    return dataloader
