import copy
import gc
import math
import random
from typing import List
from typing import Union

import torch
import torch.nn as nn
import config as cfg
import numpy as np
from torch.nn import functional as F

from config import args
from data_loader import GraphDataset

GraphVocab = torch.tensor
VocabEmbedding = torch.tensor
GraphIndex = torch.tensor


class DecoderSequenceGenerator(nn.Module):
    def __init__(self, d_model: int, heads: int, num_layers: int,
                 vocab_dim: int,
                 embedding_model: nn.Module,
                 dropout: float = .0):
        super(DecoderSequenceGenerator, self).__init__()
        self.embedding_model = embedding_model
        self.d_model = d_model
        self.decoder_layer = nn.TransformerDecoderLayer(
            d_model=d_model, nhead=heads, dropout=dropout)
        self.decoder = nn.TransformerDecoder(
            self.decoder_layer, num_layers=num_layers, norm=nn.LayerNorm(d_model))
        self.linear = nn.Linear(d_model, vocab_dim)

    def forward(self, x, tgt_mask: torch.tensor, embedded=False) -> torch.tensor:
        if not embedded:
            x = self.embedding_model(x) * math.sqrt(self.d_model)
        x *= math.sqrt(self.d_model)

        tgt = self.decoder(tgt=x, memory=x, tgt_mask=tgt_mask)
        tgt = self.linear(tgt)
        return tgt

    def to_embedding(self, x, no_grad=False):
        if no_grad:
            with torch.no_grad():
                return self.embedding_model(x)
        else:
            return self.embedding_model(x)

    def generate_sequence(self, x: torch.tensor, seq_len: int, device: str, no_grad=False) -> torch.tensor:
        output = None
        _, batch_size, vocab_size = x.shape
        result = torch.empty([seq_len, batch_size, vocab_size], device=device)
        for i in range(seq_len):
            tgt_mask = nn.Transformer.generate_square_subsequent_mask(
                x.shape[0]).detach()
            output = self.forward(x, tgt_mask)
            next_out = output[-1, :, :].unsqueeze(0)
            result[i] = next_out
            x = torch.cat([x, next_out], dim=0)
        return result


if __name__ == '__main__':
    d_model = 512  # 模型的维度
    nhead = 8  # 多头注意力机制中的头数
    num_layers = 3  # 解码器层数
    vocab_size = 5  # 假设词汇表的大小
    INPUT = 15  # 输入序列长度
    PRED = 10  # 输出序列长度
    batch_size = 2  # 批次大小
    num_epochs = 10
    desire_rst = torch.tensor([-1, 1, 2, 1, 0] * (batch_size * INPUT), dtype=torch.float32,
                              requires_grad=False).reshape(
        (INPUT, batch_size, vocab_size))

    model = DecoderSequenceGenerator(
        d_model=d_model,
        heads=nhead,
        num_layers=num_layers,
        vocab_dim=vocab_size,
        embedding_model=nn.Linear(5, 512),
        dropout=0.1
    )
    model.to(cfg.device)

    for p in model.parameters():
        if p.dim() > 1:
            nn.init.xavier_uniform_(p)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    criterion = nn.BCEWithLogitsLoss()
    loss_list = []
    input_vec = torch.rand(INPUT, batch_size, vocab_size,
                           requires_grad=False).to(cfg.device)  # 需要初始化一个起始序列

    tgt_mask = nn.Transformer.generate_square_subsequent_mask(
        INPUT).detach().to(cfg.device)
    for epoch in range(num_epochs):
        input_cpy = copy.deepcopy(input_vec)
        output = model.generate_sequence(
            input_cpy, INPUT, cfg.device, no_grad=False)
        print(output.shape)
        loss = criterion(output, desire_rst)
        loss_list.append(loss.item())
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}')

    pred_seq = model.generate_sequence(
        input_vec, seq_len=PRED, device=cfg.backup_device)
    print(pred_seq.shape, torch.argmax(pred_seq, dim=-1))
