import torch.nn as nn
import torch
import numpy as np
from torch import nn
from torch.nn import functional as F




import torch
import torch.nn as nn

class SelfAttention(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(SelfAttention, self).__init__()
        self.query = nn.Linear(input_dim, output_dim)
        self.key = nn.Linear(input_dim, output_dim)
        self.value = nn.Linear(input_dim, output_dim)
        self.softmax = nn.Softmax(dim=-1)

    def forward(self, x):
        # x.shape: (batch_size, input_dim, seq_len)
        query = self.query(x)  # query.shape: (batch_size, output_dim, seq_len)
        key = self.key(x)  # key.shape: (batch_size, output_dim, seq_len)
        value = self.value(x)  # value.shape: (batch_size, output_dim, seq_len)
        
        attention_weight = self.softmax(torch.bmm(query.transpose(1,2), key))  # attention_weights.shape: (batch_size, seq_len, seq_len)
        
        # output = torch.bmm(attention_weights, value.transpose(1,2))  # output.shape: (batch_size, seq_len, output_dim)
        return value, attention_weight










class SpatialConvHead(nn.Module):
    def __init__(self, adj_mat=None):
        super().__init__()
   
        self.self_att_module = nn.ModuleList()
        self.conv_module = nn.ModuleList()
        
        for i in range(4):
            self.self_att_module.append(SelfAttention(256, 256))
            self.conv_module.append(nn.Linear(1024, 256))

    # def run_gcn(self, features, edges):    #, num):
    #     x1 = torch.matmul(edges, features)
    #     x1 = torch.matmul(x1, self.gcn_weight_1) + self.gcn_bias_1
    #     # x1 = self.fc_gcn_weight_1(x1)
    #     x1 = self.gcn_dropout_1[0](x1)
    #     x1 = self.gcn_bn_1[0](x1)
    #     x1 = F.relu(x1)
    #     x1 = self.gcn_downsample_1[0](features) + x1

    #     x2 = torch.matmul(edges, x1)
    #     x2 = torch.matmul(x2, self.gcn_weight_2) + self.gcn_bias_2
    #     # x2 = self.fc_gcn_weight_2(x2)
    #     x2 = self.gcn_dropout_2[0](x2)
    #     x2 = self.gcn_bn_2[0](x2)
    #     x2 = F.relu(x2)
    #     x2 = self.gcn_downsample_2[0](x1) + x2
    #     return x2

    def forward(self, x):
        """input: [tensor, tensor, tensor, tensor]"""
        B = x[0].shape[0]
        value_l = []
        att_weight_l = []
        cross_att_weight_l = []
        res_x = []
        for i in range(4):
            view_x = x[i].view(B, 512, -1)

            value, att_weight = self.self_att_module[i](view_x)

            value_l.append(value)
            att_weight_l.append(att_weight)

        att_weights = torch.cat(att_weight_l, dim=2)

        for i in range(4):
            cross_att_weight_l.append(self.conv_module[i](att_weights))

        for i in range(4):
            # print(value.transpose(1,2).shape)
            # print(x[i].shape)

            res_x.append(torch.bmm(cross_att_weight_l[i], value.transpose(1,2)).transpose(1,2)+x[i].view(B, 512, -1))
        # print(res_x[0].shape)
        # raise "ssssssss"
        return res_x


class HRCNHead(nn.Module):
    def __init__(self, adj_mat=None):
        super().__init__()
        self.gate = 0.0   # cfg.MODEL.HEADS.GATE
        self.adj_mat = adj_mat
        self.dropout = 0.4   # cfg.MODEL.HEADS.DROPOUT
        self.device = "cuda"
        # `````````````````````
        self.gcn_weight_1 = nn.Parameter(
                # torch.zeros([self.gcn_channels[0], self.gcn_channels[1]]).normal_(mean=0, std=0.01),
                torch.zeros([512, 512]).normal_(mean=0, std=0.01),    # 1024
                requires_grad=True
            )
        
        # The second weight matrices of GCN.
        self.gcn_weight_2 = nn.Parameter(
                # torch.zeros([self.gcn_channels[1], self.gcn_channels[2]]).normal_(mean=0, std=0.01),
                torch.zeros([512, 512]).normal_(mean=0, std=0.01),      # 1024
                requires_grad=True
            )

        
        # self.gcn_weight_3 = nn.Parameter(
        #         # torch.zeros([self.gcn_channels[1], self.gcn_channels[2]]).normal_(mean=0, std=0.01),
        #         torch.zeros([2048//8, 1]).normal_(mean=0, std=0.01),      # 1024
        #         requires_grad=True
        #     )
        


        # The bias of GCN
        self.gcn_bias_1 = nn.Parameter(torch.zeros([512]), requires_grad=True)
        # 9/14
        # self.gcn_bias_1 = nn.Parameter(torch.zeros([2048]), requires_grad=True)
        # self.gcn_bias_1.extend([nn.Parameter(torch.zeros([self.gcn_channels[1] // pow(2, 2 - i)]),
        #                                      requires_grad=True)
        #                         for i in range(len(self.node_list))])
        # self.gcn_bias_1.append(nn.Parameter(torch.zeros([self.gcn_channels[1]]), requires_grad=True))
        # self.gcn_bias_1.append(nn.Parameter(torch.zeros([1024]), requires_grad=True))

        self.gcn_bias_2 = nn.Parameter(torch.zeros([512]), requires_grad=True)
        # self.gcn_bias_3 = nn.Parameter(torch.zeros([1]), requires_grad=True)
        # 9/14
        # self.gcn_bias_2 = nn.Parameter(torch.zeros([2048]), requires_grad=True)
        # self.gcn_bias_2.extend([nn.Parameter(torch.zeros([self.gcn_channels[2] // pow(2, 2 - i)]),
        #                                      requires_grad=True)
        #                         for i in range(len(self.node_list))])
        # self.gcn_bias_2.append(nn.Parameter(torch.zeros([self.gcn_channels[2]]), requires_grad=True))
        # self.gcn_bias_2.append(nn.Parameter(torch.zeros([1]), requires_grad=True))


        if self.dropout:
            self.gcn_dropout_1 = nn.ModuleList()
            self.gcn_dropout_1.extend([nn.Dropout(p=0.5) for _ in range(1)])
            self.gcn_dropout_2 = nn.ModuleList()
            self.gcn_dropout_2.extend([nn.Dropout(p=0.5) for _ in range(1)])

            self.gcn_dropout_3 = nn.ModuleList()
            self.gcn_dropout_3.extend([nn.Dropout(p=0.5) for _ in range(1)])
        else:
            self.gcn_dropout_1 = nn.ModuleList()
            self.gcn_dropout_1.extend([nn.Identity() for _ in range(1)])
            self.gcn_dropout_2 = nn.ModuleList()
            self.gcn_dropout_2.extend([nn.Identity() for _ in range(1)])

        # Batch norm after Dropout.
        self.gcn_bn_1 = nn.ModuleList()
        # self.gcn_bn_1.append(nn.BatchNorm1d(self.node_list[0]).apply(weights_init_kaiming))
        self.gcn_bn_1.append(nn.BatchNorm1d(512))   #.apply(weights_init_kaiming))
        # self.gcn_bn_1.extend([nn.BatchNorm1d(i + 1).apply(weights_init_kaiming)
        #                       for i in self.node_list[1:]])
        # self.gcn_bn_1.append(nn.BatchNorm1d(self.circle_num + 2).apply(weights_init_kaiming))
        self.gcn_bn_2 = nn.ModuleList()
        # self.gcn_bn_2.append(nn.BatchNorm1d(self.node_list[0]).apply(weights_init_kaiming))
        self.gcn_bn_2.append(nn.BatchNorm1d(512))    #.apply(weights_init_kaiming))
        # self.gcn_bn_2.extend([nn.BatchNorm1d(i + 1).apply(weights_init_kaiming)
        #                       for i in self.node_list[1:]])
        # self.gcn_bn_2.append(nn.BatchNorm1d(self.circle_num + 2).apply(weights_init_kaiming))
        self.gcn_bn_3 = nn.ModuleList()
        # self.gcn_bn_2.append(nn.BatchNorm1d(self.node_list[0]).apply(weights_init_kaiming))
        self.gcn_bn_3.append(nn.BatchNorm1d(512)) 

        # Downsampling layers
        self.gcn_downsample_1 = nn.ModuleList()
        # if self.gcn_channels[0] != self.gcn_channels[1]:

        self.gcn_downsample_1.append(
            nn.Sequential(
                # nn.Linear(self.gcn_channels[0], self.gcn_channels[1], bias=False)
                nn.Linear(512, 512, bias=False)
            ))


        self.gcn_downsample_2 = nn.ModuleList()
        # if self.gcn_channels[1] != self.gcn_channels[2]:
       
        self.gcn_downsample_2.append(
            nn.Sequential(
                # nn.Linear(self.gcn_channels[1], self.gcn_channels[2], bias=False),
                nn.Linear(512, 512, bias=False),
            ))

        # self.gcn_downsample_3 = nn.ModuleList()
        # # if self.gcn_channels[1] != self.gcn_channels[2]:
       
        # self.gcn_downsample_3.append(
        #     nn.Sequential(
        #         # nn.Linear(self.gcn_channels[1], self.gcn_channels[2], bias=False),
        #         nn.Linear(2048//8, 1, bias=False),
        #     ))

        # fc for martrix
        # self.fc_gcn_weight_1 = nn.Linear(2048, 2048//8)
        # self.fc_gcn_weight_2 = nn.Linear(2048//8, 1)

    def edges_mat(self, feat, reduce_diag=True, alpha=0.1):
        features = feat.clone()
        b, n, _, _  = features.shape
        features = features.view(b, n, -1)

        # edges = features.bmm(features.permute(0, 2, 1)) / 256    # bmm 规定维度得相同  .permute(0, 2, 1)  permute 参数即维度
        edges = features.permute(0, 2, 1).bmm(features) / 512


        # edges = torch.where(edges > self.gate, edges,
        #                         torch.zeros([b, n, n]).to(device=self.device, dtype=edges.dtype))
        edges = torch.where(edges > self.gate, edges,
                                torch.zeros([b, 256, 256]).to(device=self.device, dtype=edges.dtype))

        edges = F.normalize(edges, p=1, dim=-1)     # L1-normalization to restrain the weight

        if reduce_diag:      # 去掉了自己和自己的计算
            n = 256
            diag = torch.diag(torch.ones([n])).to(self.device)
            edges = torch.where(diag > 0, torch.zeros([b, n, n]).to(self.device), edges)

        return edges

        # edges = edges + alpha * self.adj_mat.to(self.device)

        # return edges

    def matrix_norm(self, edges):
        b, m, n = edges.shape
        edges += torch.diag(torch.ones(m).to(self.device))    # add an identity matrix In to keep the A' dominance
        #  re-normalization trick   Graph-Laplacian
        D_mat = torch.sum(edges, dim=-1).to(self.device)
        D_mat = torch.pow(D_mat, -0.5)
        D_mat = torch.diag_embed(D_mat)
        edges = D_mat.matmul(edges).matmul(D_mat).type(torch.float32)

        return edges


    def run_gcn(self, features, edges):    #, num):
        # print(features.shape)  512, 16, 16
        # print(edges.shape)
        b, c, m,m = features.shape
        x1 = torch.matmul(edges, features.view(b, c, -1).permute(0, 2, 1))   # torch.Size([64, 256, 512])
        x1 = torch.matmul(x1, self.gcn_weight_1) + self.gcn_bias_1
        # x1 = self.fc_gcn_weight_1(x1)
        x1 = x1.permute(0, 2, 1)
        x1 = self.gcn_dropout_1[0](x1)
        x1 = self.gcn_bn_1[0](x1)
        x1 = F.relu(x1)

        x1 = self.gcn_downsample_1[0](features.view(b, c, -1).permute(0, 2, 1)).permute(0, 2, 1) + x1

        # x2 = torch.matmul(edges, x1)
        # x2 = torch.matmul(x2, self.gcn_weight_2) + self.gcn_bias_2
        # # x2 = self.fc_gcn_weight_2(x2)
        # x2 = self.gcn_dropout_2[0](x2)
        # x2 = self.gcn_bn_2[0](x2)
        # x2 = F.relu(x2)
        # x2 = self.gcn_downsample_2[0](x1) + x2

        # x3 = torch.matmul(edges, x2)
        # x3 = torch.matmul(x3, self.gcn_weight_3) + self.gcn_bias_3
        # x3 = self.gcn_dropout_3[0](x3)
        # x3 = self.gcn_bn_3[0](x3)
        # x3 = F.relu(x3)
        # x3 = self.gcn_downsample_3[0](x2) + x3

        return x1

    def forward(self, x):
        # tesnors
        edges = []
        gcn_feats = []
        for i in range(4):
            edge = self.edges_mat(x[i])    # , reduce_diag=self.reduce_diag)
            edge = self.matrix_norm(edge)
            edges.append(edge)
            gcn_feat = self.run_gcn(x[i], edge)
            gcn_feats.append(gcn_feat)
        # print(x)
        edges_att = torch.stack(edges, dim=-1)
        # print(edges_att.shape)   torch.Size([64, 256, 256, 4])
        
        # raise "ss"
        # edges = self.edges_mat(x)
        # print(len(gcn_feats))
        # print(gcn_feats[0].shape)
        # raise "sssss"
            #, idx)
        return gcn_feats