"""
准备数据集
"""
import torch
from torch.utils.data import DataLoader, Dataset

import config


class DnnsortData(Dataset):
    def __init__(self):
        with open(config.sort_q_data_path, mode='r', encoding="UTF-8") as f_q:
            self.q_lines = f_q.readlines()
        with open(config.sort_sim_q_data_path, mode='r', encoding="UTF-8") as f_sim_q:
            self.sim_q_lines = f_sim_q.readlines()
        with open(config.sort_v_data_path, mode='r', encoding="UTF-8") as f_v:
            self.v_lines = f_v.readlines()

        assert len(self.q_lines) == len(self.sim_q_lines) == len(self.v_lines), "输入值数量和目标值数量不一致"

    def __getitem__(self, index):
        input1_data = self.q_lines[index].strip().split()
        input2_data = self.sim_q_lines[index].strip().split()
        target_data = int(self.v_lines[index].strip())

        # input_len = len(input_data) if len(input_data) <= config.sort_max_len else config.sort_max_len
        # target_len = len(target_data) if len(target_data) <= config.sort_max_len else config.sort_max_len

        return input1_data, input2_data, target_data

    def __len__(self):
        return len(self.q_lines)


def collate_fn(batch):
    """
    :param: batch: [(input_data, target_data, input_len, target_len), (), ...]
    """
    # # 1. 排序
    # batch = sorted(batch, key=lambda x: x[2], reverse=True)

    # 2. 解包
    input1_data, input2_data, target_data = zip(*batch)

    # 3. 将数据转成LongTensor
    input1_data = torch.LongTensor([config.sort_q_ws_model.transform(i, max_length=config.sort_max_len)
                                   for i in input1_data])
    input2_data = torch.LongTensor([config.sort_sim_q_ws_model.transform(i, max_length=config.sort_max_len)
                                    for i in input2_data])
    target_data = torch.LongTensor(target_data)
    # input_len = torch.LongTensor(input_len)
    # target_len = torch.LongTensor(target_len)

    # 4. 返回数据
    return input1_data, input2_data, target_data


data_loader = DataLoader(DnnsortData(), batch_size=config.sort_batch_size, shuffle=True, collate_fn=collate_fn)
