"""
准备数据集
"""
import torch
from torch.utils.data import DataLoader, Dataset
import chatbot.config as config


class DnnSortDataset(Dataset):
    def __init__(self, by_word=True):
        self.q_lines = open(config.sort_q_by_word_path if by_word else config.sort_q_path, encoding="utf-8").readlines()
        self.sim_q_lines = open(config.sort_sim_q_by_word_path if by_word else config.sort_sim_q_path,
                                encoding="utf-8").readlines()
        self.target_lines = open(config.sort_target_path, encoding="utf-8").readlines()
        assert len(self.q_lines) == len(self.sim_q_lines) == len(self.target_lines), "数据长度不一致了!!!"

    def __getitem__(self, idx):
        q = self.q_lines[idx].split()
        sim_q = self.sim_q_lines[idx].split()
        target = int(self.target_lines[idx])
        len_q = len(q) if len(q) < config.sort_q_max_len else config.sort_q_max_len
        len_sim_q = len(sim_q) if len(sim_q) < config.sort_sim_q_max_len else config.sort_sim_q_max_len
        return q, sim_q, target, len_q, len_sim_q

    def __len__(self):
        return len(self.q_lines)


def collate_fn(batch):
    batch = sorted(batch, key=lambda x: x[-2], reverse=True)
    input1, input2, target, input1_length, input2_length = zip(*batch)
    input1 = [config.sort_ws.transform(i, max_len=config.sort_q_max_len) for i in input1]
    input2 = [config.sort_ws.transform(i, max_len=config.sort_q_max_len) for i in input2]
    return torch.LongTensor(input1).to(config.device), torch.LongTensor(input2).to(config.device), torch.LongTensor(
        target).to(config.device)


dnnsort_data_loader = DataLoader(DnnSortDataset(), batch_size=config.sort_batch_size, shuffle=True,
                                 collate_fn=collate_fn)
