import torch
from torch.utils.data import Dataset, DataLoader

# 限定句子的最大单词数量
MAX_LEN = 50
# 目标语言词汇表中<sos>的ID
SOS_ID = 1

class TextDataset(Dataset):
    def __init__(self, file_path):
        self.sentences = []
        with open(file_path, 'r') as f:
            for line in f:
                # 根据空格将单词编号切分开并放入一个一维向量
                words = line.strip().split()
                # 将字符串形式的单词编号转化为整数
                word_ids = [int(word) for word in words]
                self.sentences.append(torch.tensor(word_ids))

    def __len__(self):
        return len(self.sentences)

    def __getitem__(self, idx):
        sentence = self.sentences[idx]
        length = len(sentence)
        return sentence, length

def make_src_trg_dataset(src_path, trg_path, batch_size):
    # 首先分别读取源语言数据和目标语言数据。
    src_data = TextDataset(src_path)
    trg_data = TextDataset(trg_path)

    # 通过zip操作将两个Dataset合并为一个Dataset。
    dataset = list(zip(src_data, trg_data))

    # 删除内容为空（只包含<eos>和<sos>）的句子和长度过长的句子
    filtered_dataset = [
        (src_tuple, trg_tuple) for src_tuple, trg_tuple in dataset
        if 1 < src_tuple[1] <= MAX_LEN and 1 < trg_tuple[1] <= MAX_LEN
    ]

    # 解码器需要两种格式的目标句子：
    #   1.解码器的输入(trg_input)，形式如同"<sos> X Y Z"
    #   2.解码器的目标输出(trg_label)，形式如同"X Y Z <eos>"
    # 上面从文件中读到的目标句子是"X Y Z <eos>"的形式，我们需要从中生成"<sos> X Y Z"
    # 形式并加入到Dataset中。
    processed_dataset = []
    for (src_input, src_len), (trg_label, trg_len) in filtered_dataset:
        trg_input = torch.cat([torch.tensor([SOS_ID]), trg_label[:-1]])
        processed_dataset.append(((src_input, src_len), (trg_input, trg_label, trg_len)))

    # 随机打乱训练数据
    import random
    random.shuffle(processed_dataset)

    # 创建DataLoader进行batching操作
    def collate_fn(batch):
        src_inputs, src_lengths = zip(*[item[0] for item in batch])
        trg_inputs, trg_labels, trg_lengths = zip(*[item[1] for item in batch])

        # 填充后输出的数据维度
        src_inputs_padded = torch.nn.utils.rnn.pad_sequence(src_inputs, batch_first=True)
        trg_inputs_padded = torch.nn.utils.rnn.pad_sequence(trg_inputs, batch_first=True)
        trg_labels_padded = torch.nn.utils.rnn.pad_sequence(trg_labels, batch_first=True)

        return (src_inputs_padded, torch.tensor(src_lengths)), \
               (trg_inputs_padded, trg_labels_padded, torch.tensor(trg_lengths))

    dataloader = DataLoader(processed_dataset, batch_size=batch_size, shuffle=False, collate_fn=collate_fn)
    return dataloader



