import torch
from torch.utils.data import Dataset, DataLoader
from config import *
from vocab import *


class MyCustomDataset(Dataset):
    def __init__(self, data):
        self.data = data

    def __getitem__(self, index):
        if index < len(self.data):
            return self.data[index]
        else:
            # 重复的样本是索引小于数据集原始大小的样本
            repeat_index = index % len(self.data)
            return self.data[repeat_index]

    def __len__(self):
        # 返回数据集中样本的数量
        total_samples = (len(self.data) // batch_size) * batch_size
        return total_samples

    def get_sentenceMaxLen(self):
        max_len_en = 0
        max_len_ch = 0
        for sentence_en, sentence_ch in self.data:
            if len(sentence_en) > max_len_en: max_len_en = len(sentence_en)
            if len(sentence_ch) > max_len_ch: max_len_ch = len(sentence_ch)
        return max_len_en, max_len_ch


def tensor_padding(tensor_list, seq_len):
    # 填充前两个张量
    padded_tensors = []
    for tensor in tensor_list:
        padding = (0, seq_len - len(tensor))  # 在末尾填充1
        padded_tensor = torch.nn.functional.pad(tensor, padding, mode='constant', value=PAD)
        padded_tensors.append(padded_tensor)
    return padded_tensors


def dateset2loader(dataset, en_max_len, ch_max_len):
    # 加载数据集合，转换为张量
    def collate_batch(batch):
        en_list_tokenizer, ch_list_tokenizer = [], []
        for (en_text, ch_text) in batch:
            processed_text = torch.tensor(en_text, dtype=torch.int64)
            en_list_tokenizer.append(processed_text)
            processed_text = torch.tensor(ch_text, dtype=torch.int64)
            ch_list_tokenizer.append(processed_text)

        seq_len1 = en_max_len
        seq_len2 = ch_max_len

        # 每一个batch里统一长度
        en_batch_seq = torch.stack(tensor_padding(en_list_tokenizer, seq_len1))
        ch_batch_seq = torch.stack(tensor_padding(ch_list_tokenizer, seq_len2))

        return en_batch_seq, ch_batch_seq

    train_dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, collate_fn=collate_batch)

    return train_dataloader
