# -*- coding: UTF-8 -*-
'''
@File ：2_seq2seq.py
@IDE ：PyCharm
@Author ：chaojie
@Date ：2025/11/6 
@Introduce:
'''


import torch
import torch.nn as nn
torch.manual_seed(28)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


def t1():
    ############ 模拟过程 翻译 #######

    ####################################  数据构造 ##########
    # 1. 设置一条样本， 用以翻译
    src_text = "小明吃苹果"
    tgt_text = "xiao ming eats apples"

    # 2. 分词
    src_tokens = ["小明", "吃", "苹果"]
    tgt_tokens = ['xiao', 'ming', 'eats', 'apples']

    # 3. 词转换为id --> 词典mapping映射 --> 假定编码器和解码器使用同一个词表
    src_token_ids = [25, 136, 98]
    tgt_token_ids = [125, 54, 1253, 265]

    # 4. 将样本合并转换为批次  -> 针对解码器的输入和输出分别添加特殊token，此时1表示输入特殊token【start】，2表示结尾特殊token【end】
    src_batch_ids = torch.tensor([  # 编码器输入
        [25, 136, 98]
    ])
    tgt_input_batch_ids = torch.tensor([  # 解码器输入， 与输出错位
        [1, 125, 54, 1253, 265]
    ])
    tgt_output_batch_ids = torch.tensor([
        [125, 54, 1253, 265, 2]
    ])


    ## -------------------------------  2. 模拟Seq2Seq的训练过程 -----------------------------------
    # 1. 输入模型以及期望模型输出的数据， 两个样本

    # 编码器输入
    src_batch_ids = torch.tensor([
        [25, 136, 98],
        [36, 24, 8]
    ])

    # 解码器输入和输出 注意长度要一致，1， 2 表示开始和结束
    tgt_input_batch_ids = torch.tensor([
        [1, 125, 54, 1253, 265],
        [1, 58, 1482, 2356, 2342]
    ])
    tgt_output_batch_ids = torch.tensor([
        [125, 54, 1253, 265, 2],
        [58, 1482, 2356, 2342, 2]
    ])

    # 2. 网络结构
    vocab_size = 5000
    embedding_layer = nn.Embedding(num_embeddings=vocab_size, embedding_dim=4)  # 文本向量转换 [b, n, 4]
    encoder_layer   = nn.RNN(input_size=4, hidden_size=8, batch_first=True)  # 编码器
    decoder_layer   = nn.RNN(input_size=4, hidden_size=8, batch_first=True)  # 解码器
    decoder_fc_layer = nn.Linear(in_features=8, out_features=vocab_size)  # 输出层
    loss_fn = nn.CrossEntropyLoss(reduction='none')  # loss_fn

    # 3. 前向

    src_embedding = embedding_layer(src_batch_ids)  # [batch_size, seq_len, embedding_dim]  向量化

    # 编码器
    # encoder_output: [N, Encoder_T, hidden_size * (2 if bidirectional else 1)]
    # encoder_state: [num_layers * (2 if bidirectional else 1), N, hidden_size]

    encoder_output, encoder_state = encoder_layer(src_embedding)            # 编码器

    c = encoder_state

    # 解码器
    tgt_embedding = embedding_layer(tgt_input_batch_ids)  # [N,Decoder_T] -> [N,Decoder_T,hidden_size]

    # decoder_output: [N, Decoder_T, hidden_size * (2 if bidirectional else 1)]
    # decoder_state [num_layers * (2 if bidirectional else 1), N, hidden_size]
    decoder_output, decoder_state = decoder_layer(tgt_embedding, hx=c)

    # output_score: [N, Decoder_T, vocab_size]
    output_score = decoder_fc_layer(decoder_output)

    print(output_score.shape)

    loss = loss_fn(torch.transpose(output_score, dim0=2, dim1=1), tgt_output_batch_ids)
    print(loss)




def t2():
    """
    模拟Seq2Seq推理预测 --> 仅考虑单样本的推理
    :return:
    """
    max_tokens = 10     # 最大结果长度

    src_batch_ids = torch.tensor([
        [25, 136, 98]
    ])

    tgt_token_ids = [1]

    # 结构
    vocab_size = 5000
    embedding_layer = nn.Embedding(num_embeddings=vocab_size, embedding_dim=4)
    encoder_layer = nn.RNN(input_size=4, hidden_size=4, batch_first=True)
    decoder_layer = nn.RNN(input_size=4, hidden_size=4, batch_first=True)
    decoder_fc_layer = nn.Linear(in_features=4, out_features=vocab_size)

    # 前向过程
    src_embedding = embedding_layer(src_batch_ids)  # [N,Encoder_T] -> [N,Encoder_T,hidden_size]
    # # encoder_output: [N, Encoder_T, hidden_size * (2 if bidirectional else 1)]
    # encoder_state: [num_layers * (2 if bidirectional else 1), N, hidden_size]
    encoder_output, encoder_state = encoder_layer(src_embedding)
    c = encoder_state

    # 解码
    decoder_state = c
    pred_token_id = torch.tensor([
        tgt_token_ids
    ])
    while True:
        tgt_input_batch_ids = pred_token_id  # 将上一时刻的预测作为当前时刻的输入
        tgt_embedding = embedding_layer(tgt_input_batch_ids)  # [N,Decoder_T] -> [N,Decoder_T,hidden_size]
        decoder_output, decoder_state = decoder_layer(tgt_embedding, hx=decoder_state)      # 结合两个输入进行解码
        output_score = decoder_fc_layer(decoder_output[:, -1, :])

        pred_token_id = torch.argmax(output_score, dim=1, keepdim=True)  # [N, 1] # 从置信度中选择最大的索引下标作为预测值

        tgt_token_ids.append(pred_token_id[0, 0].item())    #  将当前时刻的预测token添加到列表中 --> note：仅考虑单样本的预测

        # 判断是否结束  --> 遇到结尾token、长度超过最大限制
        if tgt_token_ids[-1] == 2:
            break
        if len(tgt_token_ids) > max_tokens:
            break


def t3():
    """
    模拟Seq2Seq的训练过程
    :return:
    """

    # 输入模型以及期望模型输出的数据
    src_batch_ids = torch.tensor([
        [25, 136, 98],
        [36, 24, 8]
    ])
    tgt_input_batch_ids = torch.tensor([
        [1, 125, 54, 1253, 265],
        [1, 58, 1482, 2356, 2342]
    ])
    tgt_output_batch_ids = torch.tensor([
        [125, 54, 1253, 265, 2],
        [58, 1482, 2356, 2342, 2]
    ])
    bs = src_batch_ids.shape[0]

    # 网络结构
    vocab_size = 5000
    hidden_size = 4
    embedding_layer = nn.Embedding(num_embeddings=vocab_size, embedding_dim=hidden_size)
    # 人为构造encoder和decoder结构不同，encoder的输出state形状:[6,bs,4], decoder的期望的state形状:[2,bs,4]
    encoder_layer = nn.RNN(
        input_size=hidden_size, hidden_size=hidden_size,
        num_layers=3, bidirectional=True, batch_first=True
    )
    decoder_layer = nn.RNN(
        input_size=hidden_size, hidden_size=hidden_size,
        num_layers=2, batch_first=True
    )
    decoder_fc_layer = nn.Linear(in_features=hidden_size, out_features=vocab_size)
    encoder_decoder_fc_layer = nn.Linear(6 * hidden_size, 2 * hidden_size)

    loss_fn = nn.CrossEntropyLoss(reduction='none')

    # 模拟一下前向过程
    # a. 获取编码器的最终输出特征向量c
    src_embedding = embedding_layer(src_batch_ids)  # [N,Encoder_T] -> [N,Encoder_T,hidden_size]
    # encoder_output: [N, Encoder_T, hidden_size * (2 if bidirectional else 1)]
    # encoder_state: [num_layers * (2 if bidirectional else 1), N, hidden_size]
    encoder_output, encoder_state = encoder_layer(src_embedding)

    # 状态的转换 中间加了一个
    c = encoder_decoder_fc_layer(
        torch.transpose(encoder_state, dim0=1, dim1=0).reshape(bs, -1)
    )
    c = c.reshape(bs, -1, hidden_size)
    c = torch.transpose(c, dim0=1, dim1=0)

    # b. 解码器的执行过程
    tgt_embedding = embedding_layer(tgt_input_batch_ids)  # [N,Decoder_T] -> [N,Decoder_T,hidden_size]
    # decoder_output: [N, Decoder_T, hidden_size * (2 if bidirectional else 1)]
    # decoder_state [num_layers * (2 if bidirectional else 1), N, hidden_size]
    decoder_output, decoder_state = decoder_layer(tgt_embedding, hx=c)
    # output_score: [N, Decoder_T, vocab_size]
    output_score = decoder_fc_layer(decoder_output)

    print(output_score.shape)

    loss = loss_fn(torch.transpose(output_score, dim0=2, dim1=1), tgt_output_batch_ids)
    print(loss)



if __name__ == '__main__':
    # t1()
    t3()
