import torch
import torch.nn as nn
import torch.nn.functional as F
import rich
import copy
import math
from torch import optim
from torch.utils.data import Dataset, DataLoader
import pathlib
import os
import random
import numpy as np


# 设置随机种子
seed = 42
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed_all(seed)

Device = 'cuda' if torch.cuda.is_available() else 'cpu'
Device = torch.device(Device)

save_model_folder = pathlib.Path(__file__).parent.joinpath('models')
save_model_folder.mkdir(parents=True, exist_ok=True)

# -------------------------- 2. 用户指定的SimpleDataset --------------------------
class SimpleDataset(Dataset):
    def __init__(self, num):
        prefix = torch.tensor([0])  # 起始符号0
        suffix = torch.tensor([9])  # 结束符号9
        self.data = [torch.randint(1, 9, size=(5,), dtype=torch.long) for _ in range(num)]
        self.labels = [torch.cat([prefix, _, suffix], dim=0) for _ in copy.deepcopy(self.data)]

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        return self.data[idx], self.labels[idx]  # data: (5,), label: (7,)
def create_data(batch_size=5, num=100):
    dataset = SimpleDataset(num)
    data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
    return data_loader







# 3.1 位置编码（不变）
class PositionalEncoding(nn.Module):
    def __init__(self, d_model, max_len=10, dropout=0.1):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)

        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))

        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0)  # (1,max_len, d_model)
        self.register_buffer('pe', pe)

    def forward(self, x):
        # x: (batch_size, seq_len, d_model)
        x = x + self.pe[:,:x.size(1), :]
        return self.dropout(x)


class Transform(nn.Module):
    def __init__(self,
                 enc_vocab_size=10,
                 dec_vocab_size=10,
                 d_model=512,
                 max_len=100,
                 n_head=8,
                 n_layer=6,
                 dropout=0.1,
                 middle_dim=2048
                 ):
        super().__init__()
        self.enc_embedding = nn.Embedding(enc_vocab_size, d_model)
        self.dec_embedding = nn.Embedding(dec_vocab_size, d_model)
        self.position_embedding = PositionalEncoding(d_model=d_model, max_len=max_len)
        self.transformer_encoder_layer = nn.TransformerEncoderLayer(d_model,nhead=n_head,dim_feedforward=middle_dim,dropout=dropout,batch_first=True)
        self.transformer_decoder_layer = nn.TransformerDecoderLayer(d_model,nhead=n_head,dim_feedforward=middle_dim,dropout=dropout,batch_first=True)
        
        self.transformer_encoder = nn.TransformerEncoder(self.transformer_encoder_layer,num_layers=n_layer)
        self.transformer_decoder = nn.TransformerDecoder(self.transformer_decoder_layer,num_layers=n_layer)
        self.fc_out = nn.Linear(d_model, dec_vocab_size)

        self.init_weights()

    def init_weights(self):
        # 嵌入层和输出层权重初始化
        initrange = 0.1
        self.enc_embedding.weight.data.uniform_(-initrange, initrange)
        self.dec_embedding.weight.data.uniform_(-initrange, initrange)
        self.fc_out.bias.data.zero_()
        self.fc_out.weight.data.uniform_(-initrange, initrange)

    
    def get_padding_mask(x):
        mask = x==10
        return mask
        
    
    def forward(self,enc,dec):
        src = copy.deepcopy(enc)
        dst = copy.deepcopy(dec)
        enc = self.enc_embedding(enc)
        dec = self.dec_embedding(dec)
        
        enc = self.position_embedding(enc)
        dec = self.position_embedding(dec)
        
        
        # encoder  测试阶段 输入队列长度一样，不需要进行mask
        mask = None
        src_key_padding_mask = None
        enc_out = self.transformer_encoder(enc,mask=mask, src_key_padding_mask=src_key_padding_mask)
        
        # decoder
        tgt_mask = nn.Transformer.generate_square_subsequent_mask(dec.size(1)).to(Device)
        tgt_key_padding_mask = None
        memory_mask = None
        memory_key_padding_mask = None
        
        dec_out = self.transformer_decoder(dec,enc_out,tgt_mask=tgt_mask, memory_mask=memory_mask, tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask)
        dec_out_res = self.fc_out(dec_out)
        return dec_out_res
        
        

if __name__ == '__main__':
    # d = torch.randint(0, 9, size=(10,5), dtype=torch.long)
    # print(d)
    model_path = save_model_folder.joinpath(f'model_no_fixed.pth').resolve().__str__()
    model_path2 = save_model_folder.joinpath(f'model_static_no_fixed.pth').resolve().__str__()
    # if os.path.exists(model_path):  # 模型已存在
    #     print('模型存在了，直接加载')
    #     model = torch.load(model_path)
    # else:
    #     model = Transform()
    #     model.to(Device)
    model = Transform()
    model.to(Device)
    model.train()
    datas = create_data(batch_size=64, num=1000)
    # 训练配置不变
    intervals = 50
    criterion = nn.CrossEntropyLoss()
    # opt = optim.SGD(model.parameters(), lr=1e-1)
    opt = optim.Adam(model.parameters(), lr=1e-4, betas=(0.9, 0.98), eps=1e-9)
    # opt = optim.SGD(model.parameters(), lr=1e-1)

    # 训练循环不变
    for interval in range(intervals):
        
        loss_all = 0.0
        for data, label in datas:
            data = data.to(Device)
            label = label.to(Device)

            input_label = label[:, :-1]  # (32,6)
            output_label = label[:, 1:]  # (32,6)

            opt.zero_grad()
            output = model(data, input_label)  # 调用自定义编码器的forward

            loss = criterion(
                output.reshape(-1, output.size(-1)),
                output_label.reshape(-1)
            )
            loss_all += loss.item()

            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
            opt.step()

        avg_loss = loss_all / len(datas)
        print(f'第 {interval + 1} 轮, 平均损失: {avg_loss:.6f}, 学习率: {opt.param_groups[0]["lr"]:.8f}')
        
        
        torch.save(model, model_path)
        torch.save(model.state_dict(), model_path2)
    
    # 测试逻辑不变（自定义编码器输出的memory可正常被解码器使用）
    model.eval()
    with torch.no_grad():
        test_in_datas = [1, 2, 3, 4, 5]
        test_in_data = torch.tensor(test_in_datas, dtype=torch.long).view(1, -1)  # (1,5)
        label = torch.tensor([[0]], dtype=torch.long)  # 初始输入：起始符号0

        while True:
            test_in_data = test_in_data.to(Device)
            label = label.to(Device)

            # # 生成解码器掩码（修复原代码掩码维度错误）
            # tgt_seq_len = label.size(1)
            # tgt_mask = model.generate_tgt_mask(tgt_seq_len)

            # 前向传播（自定义编码器参与计算）
            output = model(test_in_data, label)  # output: (1, tgt_seq_len, 11)
            log_softmax = F.log_softmax(output, dim=-1)
            max_token = log_softmax.argmax(dim=-1)  # (1, tgt_seq_len)

            # 拼接最新预测的token
            label = torch.cat([label, max_token[:, -1].unsqueeze(1)], dim=1)
            print(f"当前预测序列: {label.squeeze().tolist()}")

            # 遇到结束符9停止
            if max_token[:, -1].item() == 9:
                break