import torch
import torch.nn as nn


import numpy as np
import torch
from torch import nn
from torch.nn import init
from torch.nn import functional as F



class SimplifiedScaledDotProductAttention(nn.Module):
    '''
    Scaled dot-product attention
    '''

    def __init__(self, d_model, h,dropout=.1):
        '''
        :param d_model: Output dimensionality of the model
        :param d_k: Dimensionality of queries and keys
        :param d_v: Dimensionality of values
        :param h: Number of heads
        '''
        super(SimplifiedScaledDotProductAttention, self).__init__()

        self.d_model = d_model
        self.d_k = d_model//h
        self.d_v = d_model//h
        self.h = h

        self.fc_o = nn.Linear(h * self.d_v, d_model)
        self.dropout=nn.Dropout(dropout)



        self.init_weights()


    def init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                init.kaiming_normal_(m.weight, mode='fan_out')
                if m.bias is not None:
                    init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                init.constant_(m.weight, 1)
                init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                init.normal_(m.weight, std=0.001)
                if m.bias is not None:
                    init.constant_(m.bias, 0)

    def forward(self, queries, keys, values, attention_mask=None, attention_weights=None):
        '''
        Computes
        :param queries: Queries (b_s, nq, d_model)
        :param keys: Keys (b_s, nk, d_model)
        :param values: Values (b_s, nk, d_model)
        :param attention_mask: Mask over attention values (b_s, h, nq, nk). True indicates masking.
        :param attention_weights: Multiplicative weights for attention values (b_s, h, nq, nk).
        :return:
        '''
        b_s, nq = queries.shape[:2]
        nk = keys.shape[1]

        q = queries.view(b_s, nq, self.h, self.d_k).permute(0, 2, 1, 3)  # (b_s, h, nq, d_k)
        k = keys.view(b_s, nk, self.h, self.d_k).permute(0, 2, 3, 1)  # (b_s, h, d_k, nk)
        v = values.view(b_s, nk, self.h, self.d_v).permute(0, 2, 1, 3)  # (b_s, h, nk, d_v)

        att = torch.matmul(q, k) / np.sqrt(self.d_k)  # (b_s, h, nq, nk)
        if attention_weights is not None:
            att = att * attention_weights
        if attention_mask is not None:
            att = att.masked_fill(attention_mask, -np.inf)
        att = torch.softmax(att, -1)
        att=self.dropout(att)

        out = torch.matmul(att, v).permute(0, 2, 1, 3).contiguous().view(b_s, nq, self.h * self.d_v)  # (b_s, nq, h*d_v)
        out = self.fc_o(out)  # (b_s, nq, d_model)
        return out


class HRCNHead(nn.Module):
    def __init__(self, adj_mat=None):
        super().__init__()
        self.gate = 0.0   # cfg.MODEL.HEADS.GATE
        self.adj_mat = adj_mat
        self.dropout = 0.4   # cfg.MODEL.HEADS.DROPOUT
        self.device = "cuda"
        # `````````````````````
        self.gcn_weight_1 = nn.Parameter(
                # torch.zeros([self.gcn_channels[0], self.gcn_channels[1]]).normal_(mean=0, std=0.01),
                torch.zeros([512, 512]).normal_(mean=0, std=0.01),    # 1024
                requires_grad=True
            )
        
        # The second weight matrices of GCN.
        # self.gcn_weight_2 = nn.Parameter(
        #         # torch.zeros([self.gcn_channels[1], self.gcn_channels[2]]).normal_(mean=0, std=0.01),
        #         torch.zeros([2048//8, 2048]).normal_(mean=0, std=0.01),      # 1024
        #         requires_grad=True
        #     )

        
        # self.gcn_weight_3 = nn.Parameter(
        #         # torch.zeros([self.gcn_channels[1], self.gcn_channels[2]]).normal_(mean=0, std=0.01),
        #         torch.zeros([2048//8, 1]).normal_(mean=0, std=0.01),      # 1024
        #         requires_grad=True
        #     )
        


        # The bias of GCN
        self.gcn_bias_1 = nn.Parameter(torch.zeros([512]), requires_grad=True)
        # 9/14
        # self.gcn_bias_1 = nn.Parameter(torch.zeros([2048]), requires_grad=True)
        # self.gcn_bias_1.extend([nn.Parameter(torch.zeros([self.gcn_channels[1] // pow(2, 2 - i)]),
        #                                      requires_grad=True)
        #                         for i in range(len(self.node_list))])
        # self.gcn_bias_1.append(nn.Parameter(torch.zeros([self.gcn_channels[1]]), requires_grad=True))
        # self.gcn_bias_1.append(nn.Parameter(torch.zeros([1024]), requires_grad=True))

        self.gcn_bias_2 = nn.Parameter(torch.zeros([2048]), requires_grad=True)
        # self.gcn_bias_3 = nn.Parameter(torch.zeros([1]), requires_grad=True)
        # 9/14
        # self.gcn_bias_2 = nn.Parameter(torch.zeros([2048]), requires_grad=True)
        # self.gcn_bias_2.extend([nn.Parameter(torch.zeros([self.gcn_channels[2] // pow(2, 2 - i)]),
        #                                      requires_grad=True)
        #                         for i in range(len(self.node_list))])
        # self.gcn_bias_2.append(nn.Parameter(torch.zeros([self.gcn_channels[2]]), requires_grad=True))
        # self.gcn_bias_2.append(nn.Parameter(torch.zeros([1]), requires_grad=True))


        if self.dropout:
            self.gcn_dropout_1 = nn.ModuleList()
            self.gcn_dropout_1.extend([nn.Dropout(p=0.5) for _ in range(1)])
            self.gcn_dropout_2 = nn.ModuleList()
            self.gcn_dropout_2.extend([nn.Dropout(p=0.5) for _ in range(1)])

            self.gcn_dropout_3 = nn.ModuleList()
            self.gcn_dropout_3.extend([nn.Dropout(p=0.5) for _ in range(1)])
        else:
            self.gcn_dropout_1 = nn.ModuleList()
            self.gcn_dropout_1.extend([nn.Identity() for _ in range(1)])
            self.gcn_dropout_2 = nn.ModuleList()
            self.gcn_dropout_2.extend([nn.Identity() for _ in range(1)])

        # Batch norm after Dropout.
        self.gcn_bn_1 = nn.ModuleList()
        # self.gcn_bn_1.append(nn.BatchNorm1d(self.node_list[0]).apply(weights_init_kaiming))
        self.gcn_bn_1.append(nn.BatchNorm1d(4))   #.apply(weights_init_kaiming))
        # self.gcn_bn_1.extend([nn.BatchNorm1d(i + 1).apply(weights_init_kaiming)
        #                       for i in self.node_list[1:]])
        # self.gcn_bn_1.append(nn.BatchNorm1d(self.circle_num + 2).apply(weights_init_kaiming))
        self.gcn_bn_2 = nn.ModuleList()
        # self.gcn_bn_2.append(nn.BatchNorm1d(self.node_list[0]).apply(weights_init_kaiming))
        self.gcn_bn_2.append(nn.BatchNorm1d(4))    #.apply(weights_init_kaiming))
        # self.gcn_bn_2.extend([nn.BatchNorm1d(i + 1).apply(weights_init_kaiming)
        #                       for i in self.node_list[1:]])
        # self.gcn_bn_2.append(nn.BatchNorm1d(self.circle_num + 2).apply(weights_init_kaiming))
        self.gcn_bn_3 = nn.ModuleList()
        # self.gcn_bn_2.append(nn.BatchNorm1d(self.node_list[0]).apply(weights_init_kaiming))
        self.gcn_bn_3.append(nn.BatchNorm1d(4)) 

        # Downsampling layers
        self.gcn_downsample_1 = nn.ModuleList()
        # if self.gcn_channels[0] != self.gcn_channels[1]:

        self.gcn_downsample_1.append(
            nn.Sequential(
                # nn.Linear(self.gcn_channels[0], self.gcn_channels[1], bias=False)
                nn.Linear(2048, 2048//8, bias=False)
            ))


        self.gcn_downsample_2 = nn.ModuleList()
        # if self.gcn_channels[1] != self.gcn_channels[2]:
       
        self.gcn_downsample_2.append(
            nn.Sequential(
                # nn.Linear(self.gcn_channels[1], self.gcn_channels[2], bias=False),
                nn.Linear(2048//8, 2048, bias=False),
            ))

        # self.gcn_downsample_3 = nn.ModuleList()
        # # if self.gcn_channels[1] != self.gcn_channels[2]:
       
        # self.gcn_downsample_3.append(
        #     nn.Sequential(
        #         # nn.Linear(self.gcn_channels[1], self.gcn_channels[2], bias=False),
        #         nn.Linear(2048//8, 1, bias=False),
        #     ))

        # fc for martrix
        # self.fc_gcn_weight_1 = nn.Linear(2048, 2048//8)
        # self.fc_gcn_weight_2 = nn.Linear(2048//8, 1)

    def edges_mat(self, feat, reduce_diag=True, alpha=0.1):
        features = feat.clone()
        b, n, m = features.shape
        edges = features.bmm(features.permute(0, 2, 1)) / m    # bmm 规定维度得相同    permute 参数即维度

        edges = torch.where(edges > self.gate, edges,
                                torch.zeros([b, n, n]).to(device=self.device, dtype=edges.dtype))

        edges = F.normalize(edges, p=1, dim=-1)     # L1-normalization to restrain the weight

        if reduce_diag:      # 去掉了自己和自己的计算
            diag = torch.diag(torch.ones([n])).to(self.device)
            edges = torch.where(diag > 0, torch.zeros([b, n, n]).to(self.device), edges)

        return edges

        # edges = edges + alpha * self.adj_mat.to(self.device)

        # return edges

    def matrix_norm(self, edges):
        b, m, n = edges.shape
        edges += torch.diag(torch.ones(m).to(self.device))    # add an identity matrix In to keep the A' dominance
        #  re-normalization trick   Graph-Laplacian
        D_mat = torch.sum(edges, dim=-1).to(self.device)
        D_mat = torch.pow(D_mat, -0.5)
        D_mat = torch.diag_embed(D_mat)
        edges = D_mat.matmul(edges).matmul(D_mat).type(torch.float32)

        return edges


    def run_gcn(self, features, edges):    #, num):
        x1 = torch.matmul(edges, features.permute(0,2,1))

        x1 = torch.matmul(x1, self.gcn_weight_1) + self.gcn_bias_1
        # print(x1.shape)
        # raise "44444444444444"
        # # x1 = self.fc_gcn_weight_1(x1)
        # x1 = self.gcn_dropout_1[0](x1)
        # x1 = self.gcn_bn_1[0](x1)
        # x1 = F.relu(x1)

        # x1 = self.gcn_downsample_1[0](features) + x1

        # x2 = torch.matmul(edges, x1)
        # x2 = torch.matmul(x2, self.gcn_weight_2) + self.gcn_bias_2
        # # x2 = self.fc_gcn_weight_2(x2)
        # x2 = self.gcn_dropout_2[0](x2)
        # x2 = self.gcn_bn_2[0](x2)
        # x2 = F.relu(x2)
        # x2 = self.gcn_downsample_2[0](x1) + x2

        # x3 = torch.matmul(edges, x2)
        # x3 = torch.matmul(x3, self.gcn_weight_3) + self.gcn_bias_3
        # x3 = self.gcn_dropout_3[0](x3)
        # x3 = self.gcn_bn_3[0](x3)
        # x3 = F.relu(x3)
        # x3 = self.gcn_downsample_3[0](x2) + x3

        return x1.permute(0,2,1)

    def forward(self, x):

        # edges = self.edges_mat(x)    # , reduce_diag=self.reduce_diag)
        # edges = edges * edges_vis     # turn off 
        edges = self.edges_mat(x.permute(0,2,1))

        edges = self.matrix_norm(edges)

        gcn_feats = self.run_gcn(x, edges)    #, idx)

        return gcn_feats



class IncepOctaveConvX(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size=(1, 1), alpha=0.5, stride=1, padding=0, dilation=1,
                 groups=1, bias=False):
        super(IncepOctaveConvX, self).__init__()
        self.alpha = alpha
        self.h2g_pool = nn.MaxPool2d(1)
        self.l2g_pool = nn.AvgPool2d(kernel_size=(2, 2), stride=2)
        self.upsample = torch.nn.Upsample(scale_factor=2, mode='nearest')
        self.stride = stride
        self.hconv = nn.Conv2d(int((alpha * in_channels) / 2), int((alpha * out_channels) / 2),
                               kernel_size,  padding=padding)
        # self.hconv = nn.Conv2d(512, 512, kernel_size=(1, 1), padding=0)
        self.fc1 = nn.Linear(int((alpha * in_channels) / 2), int((alpha * in_channels) / 2))
        self.fc2 = nn.Linear(int((alpha * in_channels) / 2), int((alpha * in_channels) / 2))
        # self.fc1 = nn.Linear(512, 512)
        # self.fc_share = nn.Linear(in_channels, in_channels)

        # self.ssa = SimplifiedScaledDotProductAttention(d_model=1024, h=8)  # n_channels - int(alpha * in_channels)
        self.ssa = SimplifiedScaledDotProductAttention(d_model=in_channels-int(alpha * in_channels), h=8)
        self.hrcn = HRCNHead()
        # self.l2l = torch.nn.Conv2d(int(alpha * in_channels), int(alpha * out_channels),
        #                            kernel_size, 1, padding, dilation, groups, bias)
        # self.l2h = torch.nn.Conv2d(int(alpha * in_channels), out_channels - int(alpha * out_channels),
        #                            kernel_size, 1, padding, dilation, groups, bias)
        # self.h2l = torch.nn.Conv2d(in_channels - int(alpha * in_channels), int(alpha * out_channels),
        #                            kernel_size, 1, padding, dilation, groups, bias)
        # self.h2h = torch.nn.Conv2d(in_channels - int(alpha * in_channels),
        #                            out_channels - int(alpha * out_channels),
        #                            kernel_size, 1, padding, dilation, groups, bias)
        # self.hconv_share = nn.Conv2d(in_channels, in_channels,
        #                 kernel_size,  padding=padding)

    def forward(self, x):
        # alpha = self.alpha
        b, dim, h, w = x.shape
        X_h, X_l = x[:, :int(dim*self.alpha), :, :], x[:, int(dim*self.alpha):, :, :]
        # X_h, X_l = x[:, :1024, :, :], x[:, 1024:, :, :]
        # X_h, X_l = x
        _, dim_h, _, _ = X_h.shape
        X_h_1, X_h_2 = X_h[:, :dim_h // 2, :, :], X_h[:, dim_h // 2:, :, :]
        # X_h_1, X_h_2 = X_h[:, :512, :, :], X_h[:, 512:, :, :]
        # if self.stride ==2:
        #     X_h, X_l = self.h2g_pool(X_h), self.h2g_pool(X_l)

        x_h_1 = self.h2g_pool(X_h_1)
        X_h_1_f = self.fc1(x_h_1.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)

        x_h_2 = self.fc2(X_h_2.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
        X_h_2_f = self.hconv(x_h_2)
        # print(X_h_2_f.shape)
        # raise "ss"
        _, dim_l, _, weight_l = X_l.shape
        xinput_N = weight_l//2
        X_l = self.l2g_pool(X_l)
        # 44444444444444444
        xinput = X_l.reshape(b, dim_l, xinput_N*xinput_N)    # .permute(0, 2, 1)
        res_hrcn = self.hrcn(xinput)

        # xinput = X_l.reshape(b, 1024, 64).permute(0, 2, 1)
        # x_l = self.ssa(xinput, xinput, xinput).permute(0, 2, 1).reshape(b, 1024, 8, 8)
        # x_l = self.ssa(xinput, xinput, xinput).permute(0, 2, 1).reshape(b, dim_l, xinput_N, xinput_N)
        # x_l_f = self.upsample(x_l)
        res_hrcn = res_hrcn.reshape(b, 512, xinput_N, xinput_N)
        x_l_f = self.upsample(res_hrcn)


        # X_l2l = self.l2l(X_l)
        # X_h2l = self.h2l(X_h2l)

        # X_l2h = self.upsample(X_l2h)
        # X_h = X_l2h + X_h2h
        # X_l = X_h2l + X_l2l
        x_fusion = torch.cat([X_h_1_f, X_h_2_f, x_l_f], dim=1)
        
        # x_fusion = self.fc_share((x_fusion+ self.hconv_share(x_fusion)).permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
        # print(x_fusion.shape)
        # raise "sss"
        return x_fusion
