import torch
import torch.nn as nn
import torch.nn.functional as F
import random
import numpy as np
import torch.utils
import torch.utils.data

class Encoder(nn.Module):
    def __init__(self, src_vocab_size, embed_dim, hidden_dim):
        super().__init__()
        self.embedding = nn.Embedding(src_vocab_size, embed_dim)
        self.gru = nn.GRU(embed_dim, hidden_dim, batch_first=True)

    def forward(self, x,src_length=None):
        if src_length is not None:
            x = nn.utils.rnn.pack_padded_sequence(x, batch_first=True,enforce_sorted=False)[0]
        embedded = self.embedding(x)  # (batch_size, seq_len, embed_dim)  (1,4,100)
        outputs, hidden = self.gru(embedded)  # outputs: (batch_size, seq_len, hidden_dim)  (1,4,256)  hidden: (1, batch_size, hidden_dim)  (1,1,256)  
        return outputs, hidden

class Attention(nn.Module):
    def __init__(self, hidden_dim):
        super().__init__()
        self.hidden_dim = hidden_dim

    def forward(self, decoder_hidden, encoder_outputs): # (1,256) (4,4,256)
        # decoder_hidden: (batch_size, hidden_dim)  (1,1,256)
        # encoder_outputs: (batch_size, seq_len, hidden_dim)  (1,4,256)
        decoder_hidden = decoder_hidden[-1]
        decoder_hidden = decoder_hidden.unsqueeze(2)
        scores = torch.bmm(encoder_outputs, decoder_hidden).squeeze(2)  # (batch_size, seq_len) # (1,4)
        weights = F.softmax(scores, dim=1)  # 注意力权重 (2,4)
        context = torch.bmm(weights.unsqueeze(1), encoder_outputs)  # (1,1,4)  (1,4,256)   (1,1,256)
        return context.squeeze(1), weights  # (1,256) (1,4)

class Decoder(nn.Module):
    def __init__(self, tgt_vocab_size, embed_dim, hidden_dim):
        '''
        tagt_vocab_size: 目标词表大小  10000
        embed_dim: 嵌入维度  100
        hidden_dim: 隐藏层维度  256
        '''
        super().__init__()
        self.embedding = nn.Embedding(tgt_vocab_size, embed_dim) # (10000,100)
        self.attention = Attention(hidden_dim)
        self.gru = nn.GRU(embed_dim + hidden_dim, hidden_dim, batch_first=True)
        self.fc = nn.Linear(hidden_dim * 2, tgt_vocab_size)

    def forward(self, y, prev_hidden, encoder_outputs):  # (1,1) (1,1,256) (1,4,256)
        embedded = self.embedding(y)  # (batch_size, 1, embed_dim)  (1,1) -> (1,1,100)
        context, weights = self.attention(prev_hidden, encoder_outputs)  #   (1,256) (1,4)
        gru_input = torch.cat([embedded, context.unsqueeze(1)], dim=2) # (1,1,100) (1,1,256)  (1,1,356)
        output, hidden = self.gru(gru_input, prev_hidden)  #(1,1,356)  (1,1,256)
        output = self.fc(torch.cat([output.squeeze(1), context], dim=1))
        return output, hidden, weights  # (1,10000) (1,256) (1,4)

class Seq2Seq(nn.Module):
    def __init__(self, encoder, decoder):
        super().__init__()
        self.encoder = encoder
        self.decoder = decoder

    def forward(self, src, tgt, teacher_forcing_ratio=0.5):
        '''
        src: 输入序列 (batch_size, src_len)  (1,4)
        '''
        batch_size = tgt.shape[0] # 1
        tgt_len = tgt.shape[1] # 4
        encoder_outputs, hidden = self.encoder(src)   #       # (1,4,256) batch_size=1  seq_len=4   hidden_dim=256  （1,1,256）
        outputs = torch.zeros(batch_size, tgt_len, self.decoder.fc.out_features).to(src.device) # 

        y = tgt[:, 0].unsqueeze(1)  # 初始输入 <sos>  (1,1)  # 1表示batch_size=1
        for t in range(1, tgt_len): # 循环4次
            output, hidden, _ = self.decoder(y, hidden, encoder_outputs) # （1,1） ，（1,256），（1,4,256）
            outputs[:, t] = output
            use_teacher_forcing = torch.rand(1).item() < teacher_forcing_ratio
            y = tgt[:, t].unsqueeze(1) if use_teacher_forcing else output.argmax(1).unsqueeze(1) # (1,1)
        return outputs

# 参数设置
src_vocab_size = 5000  # 中文词表大小
tgt_vocab_size = 10000 # 英文词表大小
embed_dim = 100
hidden_dim = 256

# 初始化模型
encoder = Encoder(src_vocab_size, embed_dim, hidden_dim) 
decoder = Decoder(tgt_vocab_size, embed_dim, hidden_dim)  # 
model = Seq2Seq(encoder, decoder)

criterion = nn.CrossEntropyLoss(ignore_index=0)  
opt = torch.optim.Adam(model.parameters(), lr=0.001)



def generate_random_samples(num_samples, src_vocab_size, tgt_vocab_size):
    samples = []
    for _ in range(num_samples):
        src_len = random.randint(3, 8)
        tgt_len = random.randint(3, 8)
        src = torch.randint(1, src_vocab_size, (src_len,))
        tgt = torch.randint(1, tgt_vocab_size, (tgt_len,))
        samples.append((src, tgt))
    return samples

s = generate_random_samples(10,5000,10000)



# s = generate_random_samples(10,5000,10000)
# def change(batch):
#     print(batch)
#     input = [_[0] for _ in batch]
#     target = [_[0] for _ in batch]
#     i = nn.utils.rnn.pad_sequence(input, batch_first=True)
#     t = nn.utils.rnn.pad_sequence(target, batch_first=True)
#     return i,t
# DataLoader = torch.utils.data.DataLoader(s, batch_size=3, shuffle=True,collate_fn=change)  # batch_size=2
# for x in DataLoader:
#     print(x)
#     break 
# quit()



num_epochs = 100
for ep in range(num_epochs):
    opt.zero_grad()  # 清空梯度
    # 模拟输入（batch_size=1）
    src = torch.LongTensor([[1, 2, 3, 4],[1, 2, 3, 4]])  # "我 爱 人工 智能"
    tgt = torch.LongTensor([[5, 6, 7, 8],[5, 6, 7, 8]])   # "I love artificial intelligence"
    # output = model(src, tgt)
    # print(output.shape)  # (1, 4, 10000)
    # print(output.view(-1, tgt_vocab_size).argmax(1))  # (4, 10000)

    # ignore_index is optional, set it to ignore certain target values
    # Assuming model, src, and tgt are already defined as in your code

    output = model(src, tgt)  # (batch_size, tgt_len, tgt_vocab_size)

    # Reshape output and targets for loss computation
    # output: (batch_size * tgt_len, tgt_vocab_size)
    # tgt: (batch_size * tgt_len, )

    loss = criterion(output.view(-1, tgt_vocab_size), tgt.view(-1))
    loss.backward()  # 反向传播
    
    opt.step()
    
    print(f"Loss: {loss.item()}")



