import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import os

# 限定句子的最大单词数量
MAX_LEN = 50
# 目标语言词汇表中<sos>的ID
SOS_ID = 1
EOS_ID = 1

class TextDataset(Dataset):
    def __init__(self, file_path):
        self.sentences = []
        with open(file_path, 'r') as f:
            for line in f:
                # 根据空格将单词编号切分开并放入一个一维向量
                words = line.strip().split()
                # 将字符串形式的单词编号转化为整数
                word_ids = [int(word) for word in words]
                self.sentences.append(torch.tensor(word_ids))

    def __len__(self):
        return len(self.sentences)

    def __getitem__(self, idx):
        sentence = self.sentences[idx]
        length = len(sentence)
        return sentence, length


def make_src_trg_dataset(src_path, trg_path, batch_size):
    # 首先分别读取源语言数据和目标语言数据。
    src_data = TextDataset(src_path)
    trg_data = TextDataset(trg_path)

    # 通过zip操作将两个Dataset合并为一个Dataset。
    dataset = list(zip(src_data, trg_data))

    # 删除内容为空（只包含<eos>和<sos>）的句子和长度过长的句子
    filtered_dataset = [
        (src_tuple, trg_tuple) for src_tuple, trg_tuple in dataset
        if 1 < src_tuple[1] <= MAX_LEN and 1 < trg_tuple[1] <= MAX_LEN
    ]

    # 解码器需要两种格式的目标句子：
    #   1.解码器的输入(trg_input)，形式如同"<sos> X Y Z"
    #   2.解码器的目标输出(trg_label)，形式如同"X Y Z <eos>"
    # 上面从文件中读到的目标句子是"X Y Z <eos>"的形式，我们需要从中生成"<sos> X Y Z"
    # 形式并加入到Dataset中。
    processed_dataset = []
    for (src_input, src_len), (trg_label, trg_len) in filtered_dataset:
        trg_input = torch.cat([torch.tensor([SOS_ID]), trg_label[:-1]])
        processed_dataset.append(((src_input, src_len), (trg_input, trg_label, trg_len)))

    # 随机打乱训练数据
    import random
    random.shuffle(processed_dataset)

    # 创建DataLoader进行batching操作
    def collate_fn(batch):
        src_inputs, src_lengths = zip(*[item[0] for item in batch])
        trg_inputs, trg_labels, trg_lengths = zip(*[item[1] for item in batch])

        # 填充后输出的数据维度
        src_inputs_padded = torch.nn.utils.rnn.pad_sequence(src_inputs, batch_first=True)
        trg_inputs_padded = torch.nn.utils.rnn.pad_sequence(trg_inputs, batch_first=True)
        trg_labels_padded = torch.nn.utils.rnn.pad_sequence(trg_labels, batch_first=True)

        return (src_inputs_padded, torch.tensor(src_lengths)), \
               (trg_inputs_padded, trg_labels_padded, torch.tensor(trg_lengths))

    dataloader = DataLoader(processed_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
    return dataloader


# LSTM的隐藏层规模
HIDDEN_SIZE = 1024
# 深层循环神经网络中LSTM结构的层数
NUM_LAYERS = 2
# 源语言词汇表大小
SRC_VOCAB_SIZE = 10000
# 目标语言词汇表大小
TRG_VOCAB_SIZE = 4000
# 训练数据batch的大小
BATCH_SIZE = 10
# 使用训练数据的轮数
NUM_EPOCH = 5
# 节点不被dropout的概率
KEEP_PROB = 0.8
# 用于控制梯度膨胀的梯度大小上限
MAX_GRAD_NROM = 5
# 在Softmax层和词向量层之间共享参数
SHARE_EMB_AND_SOFTMAX = True


class NMTModel(nn.Module):
    def __init__(self, src_vocab_size, trg_vocab_size, hidden_size, num_layers, dropout_rate):
        super(NMTModel, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers

        self.src_embedding = nn.Embedding(src_vocab_size, hidden_size)
        self.trg_embedding = nn.Embedding(trg_vocab_size, hidden_size)

        self.encoder = nn.LSTM(hidden_size, hidden_size, num_layers, batch_first=True, dropout=dropout_rate)
        self.decoder = nn.LSTM(hidden_size, hidden_size, num_layers, batch_first=True, dropout=dropout_rate)

        if SHARE_EMB_AND_SOFTMAX:
            self.fc_out = nn.Linear(hidden_size, trg_vocab_size, bias=False)
            self.fc_out.weight = self.trg_embedding.weight
        else:
            self.fc_out = nn.Linear(hidden_size, trg_vocab_size)

    def forward(self, src_input, src_size, trg_input, trg_size):
        batch_size = src_input.size(0)

        embedded_src = self.src_embedding(src_input)
        packed_src_embedded = nn.utils.rnn.pack_padded_sequence(embedded_src, src_size.cpu(), enforce_sorted=False, batch_first=True)
        _, (hidden, cell) = self.encoder(packed_src_embedded)

        embedded_trg = self.trg_embedding(trg_input)
        packed_trg_embedded = nn.utils.rnn.pack_padded_sequence(embedded_trg, trg_size.cpu(), enforce_sorted=False, batch_first=True)
        output, _ = self.decoder(packed_trg_embedded, (hidden, cell))

        unpacked_output, _ = nn.utils.rnn.pad_packed_sequence(output, batch_first=True)
        prediction = self.fc_out(unpacked_output)

        return prediction.view(-1, prediction.size(2))


def train(model, data_loader, optimizer, criterion, device):
    model.train()
    epoch_loss = 0

    for i, ((src, src_len), (trg_input, trg_label, trg_len)) in enumerate(data_loader):
        src, trg_input, trg_label = src.to(device), trg_input.to(device), trg_label.to(device)

        optimizer.zero_grad()

        output = model(src, src_len, trg_input, trg_len)
        loss = criterion(output, trg_label.view(-1))
        loss.backward()

        torch.nn.utils.clip_grad_norm_(model.parameters(), MAX_GRAD_NROM)
        optimizer.step()

        epoch_loss += loss.item()

        if i % 10 == 0:
            print(f"After {i} steps, per token cost is {loss.item()/trg_label.nelement():.3f}")

    return epoch_loss / len(data_loader.dataset)


def check_dataset_size(file_path):
    with open(file_path, 'r') as f:
        count = sum(1 for _ in f)
    return count


def main():
    root_path = 'C:/pycharmproject/project1/Seq2/data/'
    SRC_TRAIN_DATA = os.path.join(root_path, "en.number")
    TRG_TRAIN_DATA = os.path.join(root_path, "zh.number")
    CHECKPOINT_PATH = os.path.join(root_path, "seq2seq_ckpt.pth")

    src_size = check_dataset_size(SRC_TRAIN_DATA)
    trg_size = check_dataset_size(TRG_TRAIN_DATA)

    print(f"Source dataset size: {src_size}")
    print(f"Target dataset size: {trg_size}")

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    data_loader = make_src_trg_dataset(SRC_TRAIN_DATA, TRG_TRAIN_DATA, BATCH_SIZE)

    model = NMTModel(SRC_VOCAB_SIZE, TRG_VOCAB_SIZE, HIDDEN_SIZE, NUM_LAYERS, 1 - KEEP_PROB).to(device)
    optimizer = optim.SGD(model.parameters(), lr=1.0)
    criterion = nn.CrossEntropyLoss(ignore_index=0)

    best_valid_loss = float('inf')
    for epoch in range(NUM_EPOCH):
        print(f"In iteration: {epoch + 1}")
        train_loss = train(model, data_loader, optimizer, criterion, device)
        print(f"Epoch {epoch+1}, Train Loss: {train_loss:.3f}")

        # Save the model checkpoint
        torch.save({
            'epoch': epoch,
            'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            'loss': train_loss,
        }, CHECKPOINT_PATH)


if __name__ == "__main__":
    main()



