import math
import random
from typing import List

import numpy as np
import torch
import torch.nn as nn
import torch_geometric.nn as gnn
from torch.nn import functional as F
import lzyutil

from clustering import ClusteringMachine
from config import args, devs
from data_loader import GraphDataset
from seq_gen_model import DecoderSequenceGenerator
import timing

GraphVocab = torch.tensor
VocabEmbedding = torch.tensor

random.seed(args.random_seed)


class ChannelMixingBlock(nn.Module):
    def __init__(self, channel_dim: int, hidden: int = -1):
        super(ChannelMixingBlock, self).__init__()
        self.channel_dim = channel_dim
        if hidden < 0:
            hidden = channel_dim
        self.mixer = nn.Sequential(
            nn.LayerNorm(channel_dim),
            nn.Linear(channel_dim, hidden),
            nn.GELU(),
            nn.Linear(hidden, channel_dim)
        )

    def forward(self, x: torch.Tensor):
        assert len(x.shape) == 4 and x.shape[0] == self.channel_dim
        x = x.transpose(0, 3).to(dtype=torch.float32)
        x = x+self.mixer(x)
        return x.transpose(0, 3)


class SequenceEncoder(nn.Module):
    def __init__(self, sub_graph_id: int, numnodes: int,
                 in_dim: int, gcn_out_dim: int,
                 seq_len: int,
                 num_layers: int,
                 hidden_size: int,
                 node_dim: int,
                 nheads=4,
                 dropout=.0, ):

        super(SequenceEncoder, self).__init__()

        self.sub_graph_id = sub_graph_id
        self.in_dim = in_dim
        self.gcn_out_dim = gcn_out_dim
        self.node_feat_dim = node_dim

        self.gcns = nn.ModuleList()

        self.gcns.add_module(f"cluster{self.sub_graph_id}_gcn_layer-init",
                             gnn.GATv2Conv(in_channels=in_dim,
                                           out_channels=hidden_size // nheads,
                                           heads=nheads,
                                           dropout=dropout))

        for i in range(1, num_layers-1):
            self.gcns.add_module(f"cluster{self.sub_graph_id}_gcn_layer-{i}",
                                 gnn.GATv2Conv(in_channels=hidden_size,
                                               out_channels=hidden_size // nheads,
                                               heads=nheads,
                                               dropout=dropout))

        self.gcn_linear = nn.Linear(hidden_size, gcn_out_dim)
        self.channel_mixer = ChannelMixingBlock(seq_len)
        self.vocab_linear = nn.Linear(gcn_out_dim+node_dim, 1)

    def forward(self, inputs):
        """
        input (Tensor([seq_len, batch, numnodes, in_dim]), edge_index)
        output: (Tensor([seq_len, batch, numnodes, node_feat_dim]), edge_index)
        """

        # x:(seq_len, batch, numnodes, in_dim)
        # node_feat : (seq_len, batch, numnodes, node_dim)
        x, node_feat, edge_index = inputs
        seq_len, batch, numnodes, in_dim = x.shape
        assert node_feat.shape[:-1] == x.shape[:-1]

        # (seq_len*batch*numnodes, in_dim)
        x = torch.flatten(x, start_dim=0, end_dim=-2)

        for gcn in self.gcns:
            x = gcn(x, edge_index)

         # (seq_len, batch, numnodes, gcn_out_dim)
        x = torch.reshape(self.gcn_linear(x),
                          (seq_len, batch, numnodes, self.gcn_out_dim))

        # (seq_len, batch, numnodes, gcn_out_dim+node_dim)
        x = torch.cat((x, node_feat), dim=-1)
        x = self.channel_mixer(x)

        # (seq_len, batch, numnodes)
        x = torch.squeeze(self.vocab_linear(x), -1)

        mean_val = torch.mean(x, dim=(-1), keepdim=True)
        std_val = torch.std(x, dim=(-1), keepdim=True)
        # norm
        return (x - mean_val) / (std_val+1e-6)

    def l2_loss(self):
        loss = None
        assert self.named_parameters() is not None
        for name, p in self.named_parameters():
            if loss is None:
                loss = p.pow(2).sum()
            else:
                loss = loss + p.pow(2).sum()
        return loss


class GCNSeqGenBlock(nn.Module):
    def __init__(self, sub_graph_id: int, numnodes: int,
                 in_dim: int, node_dim: int,
                 d_model: int, nhead: int, gcn_hidden_size: int,
                 num_gcn_layer: int, num_decoder_layer: int,
                 dropout,
                 gcn_norm='none'):
        super(GCNSeqGenBlock, self).__init__()

        self.sub_graph_id = sub_graph_id
        self.numnodes = numnodes
        self.in_dim = in_dim

        self.vocab_2_embedding = nn.Linear(numnodes, d_model)

        self.gcn_vocab = SequenceEncoder(
            sub_graph_id=sub_graph_id,
            numnodes=numnodes,
            in_dim=self.in_dim,
            gcn_out_dim=self.in_dim,
            node_dim=node_dim,
            seq_len=args.seq_len,
            num_layers=num_gcn_layer,
            hidden_size=gcn_hidden_size,
            nheads=nhead,
            dropout=dropout
        )

        self.seq_gen = DecoderSequenceGenerator(
            d_model=d_model,
            heads=nhead,
            num_layers=num_decoder_layer,
            vocab_dim=self.numnodes,  # 每个node输出一个pred值
            embedding_model=self.vocab_2_embedding,
            dropout=dropout
        )

    def forward(self, input_vec: torch.Tensor, node_feat: torch.Tensor, edge_index: torch.Tensor):
        encoded_input = self.gcn_vocab((input_vec, node_feat, edge_index))

        tgt_mask = nn.Transformer.generate_square_subsequent_mask(
            input_vec.shape[0]).detach()
        return self.seq_gen(encoded_input, tgt_mask, embedded=False)

    def generate_seq(self, x: torch.tensor, edge_index: torch.Tensor, gen_seq_len: int, device: str):

        output = None
        _, batch_size, vocab_size, _ = x.shape
        result = torch.empty(
            [gen_seq_len, batch_size, vocab_size], device=device)
        for i in range(gen_seq_len):
            output = self.forward(x, edge_index)
            next_out = output[-1, :, :].unsqueeze(0)
            result[i] = next_out
            x = torch.cat([x, next_out.unsqueeze(-1)], dim=0)
        return result

    def l2_loss(self):
        loss = None
        assert self.named_parameters() is not None
        for name, p in self.named_parameters():
            if loss is None:
                loss = p.pow(2).sum()
            else:
                loss = loss + p.pow(2).sum()
        return loss


class ReverseSequenceGenerator(nn.Module):
    def __init__(self, cluster_machine: ClusteringMachine, optimal_device,
                 cluster_train_batch=1,
                 dropout=0.,
                 activation=F.relu):
        super(ReverseSequenceGenerator, self).__init__()
        self.cluster_machine = cluster_machine
        self.cluster_train_batch = cluster_train_batch
        self.cluster_num = self.cluster_machine.get_cluster_num()
        self.all_clusters = list(range(self.cluster_num))
        random.shuffle(self.all_clusters)
        self.sub_graph_num = math.ceil(
            self.cluster_num / self.cluster_train_batch)

        cluster_partition = lzyutil.Partition(
            self.cluster_num, part_size=cluster_train_batch)
        self.batch_id_2_clusters = cluster_partition.get_range_dense()

        self.dropout = dropout
        self.optimal_device = optimal_device
        self.backup_device = devs.backup_device
        self.activation = activation
        self.blocks: list[GCNSeqGenBlock] = [None] * self.sub_graph_num
        self.graphs: list[torch.Tensor] = [None] * self.sub_graph_num

        for subgraph_id in range(self.sub_graph_num):
            seq_gen_block = GCNSeqGenBlock(sub_graph_id=subgraph_id,
                                           numnodes=self.cluster_machine.get_cluster_size(
                                               self.batch_id_2_clusters[subgraph_id]
                                           ),
                                           in_dim=3,
                                           node_dim=3,
                                           d_model=args.d_model,
                                           nhead=args.nhead,
                                           gcn_hidden_size=args.gcn_hidden,
                                           num_gcn_layer=args.gcn_layers,
                                           num_decoder_layer=args.decoder_layers,
                                           dropout=args.dropout,
                                           gcn_norm='both'
                                           )

            self.add_module(f"cluster{subgraph_id}-block", seq_gen_block)
            self.blocks[subgraph_id] = seq_gen_block
            self.graphs[subgraph_id] = self.cluster_machine.get_subgraph_edge_index(
                self.batch_id_2_clusters[subgraph_id])

    def to(self, device):
        self.cluster_to_device(list(range(self.sub_graph_num)), device)

    def cluster_to_device(self, subgraph_ids: list[int], device):

        if self.optimal_device == devs.backup_device:
            pass
        for subgraph_id in subgraph_ids:
            self.blocks[subgraph_id] = self.blocks[subgraph_id].to(device)
            self.graphs[subgraph_id] = self.graphs[subgraph_id].to(device)

        # if device != 'cpu':
        #     torch.cuda.empty_cache()

    # def forward(self, inputs: torch.Tensor, acc_loss=False):
    #     subgraph_ids = list(range(self.sub_graph_num))
    #     random.shuffle(subgraph_ids)

    #     seq_len, batch_size, numnodes = inputs.shape

    #     # 最终结果对每个node输出一个pred值
    #     all_result = torch.empty(
    #         (seq_len, batch_size, numnodes)).to(self.optimal_device)
    #     for subgraph_id in subgraph_ids:
    #         rsg_block = self.blocks[subgraph_id]
    #         assert isinstance(rsg_block, GCNSeqGenBlock)
    #         nodes = torch.tensor(
    #             self.cluster_machine.get_cluster_nodes(
    #                 self.batch_id_2_clusters[subgraph_id])
    #         ).to(self.optimal_device)
    #         all_result[nodes] = rsg_block(
    #             inputs[:, :, nodes], self.graphs[subgraph_id])
    #     return all_result

    def generate_optimizer(self, zero_grad=False) -> List[torch.optim.Adam]:
        optimizer_list = [torch.optim.Adam(sub_model.parameters(
        ), lr=args.learning_rate) for sub_model in self.blocks]

        if zero_grad:
            for optimizer in optimizer_list:
                optimizer.zero_grad()
        return optimizer_list

    def process(self, inputs: torch.Tensor, node_feat: torch.Tensor, loss_func, optimizer_list: List[torch.optim.Adam],
                src_list: np.ndarray, train=True):

        seq_len, input_batch_size, numnodes, input_dim = inputs.shape

        # inputs = inputs.unsqueeze(-1)

        src_ground_truth = torch.zeros(
            (input_batch_size, numnodes)).to(self.optimal_device)
        for i, src_nodes in enumerate(src_list):
            src_ground_truth[i, src_nodes] = 1

        all_result = torch.empty(
            (seq_len, input_batch_size, numnodes)).to(self.optimal_device)
        subgraphs = list(range(self.sub_graph_num))
        random.shuffle(subgraphs)
        loss_list = [0] * self.sub_graph_num
        sg_loss_list = [0] * self.sub_graph_num
        l2_loss_list = [0] * self.sub_graph_num
        for sub_graph_id in subgraphs:
            clusters = self.batch_id_2_clusters[sub_graph_id]
            self.cluster_to_device([sub_graph_id], self.optimal_device)
            cur_rsg_block = self.blocks[sub_graph_id]
            cur_optimizer = optimizer_list[sub_graph_id]
            assert isinstance(cur_rsg_block, GCNSeqGenBlock)
            nodes = torch.tensor(
                self.cluster_machine.get_cluster_nodes(clusters)).to(self.optimal_device)
            cluster_size = self.cluster_machine.get_cluster_size(clusters)

            sg_result = cur_rsg_block(
                inputs[:, :, nodes, :],
                node_feat[:, :, nodes, :],
                self.graphs[sub_graph_id]
            )

            all_result[:, :, nodes] = sg_result

            # do BP and optimization in train mode
            if train:
                pred_output = torch.mean(sg_result, dim=0)  # (batch, numnodes)

                l2_loss = cur_rsg_block.l2_loss() * args.weight_decay

                sg_loss = loss_func(pred_output, src_ground_truth[:, nodes])

                total_loss = sg_loss + l2_loss

                loss_list[sub_graph_id] = total_loss.item()
                l2_loss_list[sub_graph_id] += l2_loss.item()
                sg_loss_list[sub_graph_id] += sg_loss.item()

                # if acc_loss:
                #     cur_optimizer.step()

                cur_optimizer.zero_grad()
                total_loss.backward()
                cur_optimizer.step()

            self.cluster_to_device([sub_graph_id], self.backup_device)
            # torch.cuda.empty_cache()

        return torch.mean(all_result, dim=0), loss_list, sg_loss_list, l2_loss_list


if __name__ == '__main__':
    graph = GraphDataset('power')
    numnodes = graph.get_size()
    cm = ClusteringMachine(graph, 3)

    model = GCNSeqGenBlock(
        sub_graph_id=0,
        numnodes=graph.get_size(),
        in_dim=1,
        d_model=args.d_model,
        nhead=args.nhead,
        gcn_hidden_size=args.gcn_hidden,
        num_gcn_layer=args.gcn_layers,
        num_decoder_layer=args.decoder_layers,
        dropout=args.dropout,
    ).to(devs.device)
    for p in model.parameters():
        if p.dim() > 1:
            nn.init.xavier_uniform_(p)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

    print(torch.cuda.is_available())
    with timing.Timer("gpu cost"):
        model.to('cuda')
        edge_index = cm.get_subgraph_edge_index((0, 1, 2)).to('cuda')
        print(edge_index)
        input = torch.randn(2, 1, numnodes, 1).to('cuda')
        output = model.generate_seq(input, edge_index, 20, 'cuda')
        print(input.shape, output.shape)
        print(output.size())
