import torch.nn as nn
import torch
import numpy as np
from torch import nn
from torch.nn import functional as F

class HRCNHead(nn.Module):
    def __init__(self, adj_mat=None):
        super().__init__()
        self.gate = 0.0   # cfg.MODEL.HEADS.GATE
        self.adj_mat = adj_mat
        self.dropout = 0.4   # cfg.MODEL.HEADS.DROPOUT
        self.device = "cuda"
        # `````````````````````
        self.gcn_weight_1 = nn.Parameter(
                # torch.zeros([self.gcn_channels[0], self.gcn_channels[1]]).normal_(mean=0, std=0.01),
                torch.zeros([2048, 2048//8]).normal_(mean=0, std=0.01),    # 1024
                requires_grad=True
            )
        
        # The second weight matrices of GCN.
        self.gcn_weight_2 = nn.Parameter(
                # torch.zeros([self.gcn_channels[1], self.gcn_channels[2]]).normal_(mean=0, std=0.01),
                torch.zeros([2048//8, 1]).normal_(mean=0, std=0.01),      # 1024
                requires_grad=True
            )

        
        # self.gcn_weight_3 = nn.Parameter(
        #         # torch.zeros([self.gcn_channels[1], self.gcn_channels[2]]).normal_(mean=0, std=0.01),
        #         torch.zeros([2048//8, 1]).normal_(mean=0, std=0.01),      # 1024
        #         requires_grad=True
        #     )
        


        # The bias of GCN
        self.gcn_bias_1 = nn.Parameter(torch.zeros([2048//8]), requires_grad=True)
        # 9/14
        # self.gcn_bias_1 = nn.Parameter(torch.zeros([2048]), requires_grad=True)
        # self.gcn_bias_1.extend([nn.Parameter(torch.zeros([self.gcn_channels[1] // pow(2, 2 - i)]),
        #                                      requires_grad=True)
        #                         for i in range(len(self.node_list))])
        # self.gcn_bias_1.append(nn.Parameter(torch.zeros([self.gcn_channels[1]]), requires_grad=True))
        # self.gcn_bias_1.append(nn.Parameter(torch.zeros([1024]), requires_grad=True))

        self.gcn_bias_2 = nn.Parameter(torch.zeros([1]), requires_grad=True)
        # self.gcn_bias_3 = nn.Parameter(torch.zeros([1]), requires_grad=True)
        # 9/14
        # self.gcn_bias_2 = nn.Parameter(torch.zeros([2048]), requires_grad=True)
        # self.gcn_bias_2.extend([nn.Parameter(torch.zeros([self.gcn_channels[2] // pow(2, 2 - i)]),
        #                                      requires_grad=True)
        #                         for i in range(len(self.node_list))])
        # self.gcn_bias_2.append(nn.Parameter(torch.zeros([self.gcn_channels[2]]), requires_grad=True))
        # self.gcn_bias_2.append(nn.Parameter(torch.zeros([1]), requires_grad=True))


        if self.dropout:
            self.gcn_dropout_1 = nn.ModuleList()
            self.gcn_dropout_1.extend([nn.Dropout(p=0.5) for _ in range(1)])
            self.gcn_dropout_2 = nn.ModuleList()
            self.gcn_dropout_2.extend([nn.Dropout(p=0.5) for _ in range(1)])

            self.gcn_dropout_3 = nn.ModuleList()
            self.gcn_dropout_3.extend([nn.Dropout(p=0.5) for _ in range(1)])
        else:
            self.gcn_dropout_1 = nn.ModuleList()
            self.gcn_dropout_1.extend([nn.Identity() for _ in range(1)])
            self.gcn_dropout_2 = nn.ModuleList()
            self.gcn_dropout_2.extend([nn.Identity() for _ in range(1)])

        # Batch norm after Dropout.
        self.gcn_bn_1 = nn.ModuleList()
        # self.gcn_bn_1.append(nn.BatchNorm1d(self.node_list[0]).apply(weights_init_kaiming))
        self.gcn_bn_1.append(nn.BatchNorm1d(4))   #.apply(weights_init_kaiming))
        # self.gcn_bn_1.extend([nn.BatchNorm1d(i + 1).apply(weights_init_kaiming)
        #                       for i in self.node_list[1:]])
        # self.gcn_bn_1.append(nn.BatchNorm1d(self.circle_num + 2).apply(weights_init_kaiming))
        self.gcn_bn_2 = nn.ModuleList()
        # self.gcn_bn_2.append(nn.BatchNorm1d(self.node_list[0]).apply(weights_init_kaiming))
        self.gcn_bn_2.append(nn.BatchNorm1d(4))    #.apply(weights_init_kaiming))
        # self.gcn_bn_2.extend([nn.BatchNorm1d(i + 1).apply(weights_init_kaiming)
        #                       for i in self.node_list[1:]])
        # self.gcn_bn_2.append(nn.BatchNorm1d(self.circle_num + 2).apply(weights_init_kaiming))
        self.gcn_bn_3 = nn.ModuleList()
        # self.gcn_bn_2.append(nn.BatchNorm1d(self.node_list[0]).apply(weights_init_kaiming))
        self.gcn_bn_3.append(nn.BatchNorm1d(4)) 

        # Downsampling layers
        self.gcn_downsample_1 = nn.ModuleList()
        # if self.gcn_channels[0] != self.gcn_channels[1]:

        self.gcn_downsample_1.append(
            nn.Sequential(
                # nn.Linear(self.gcn_channels[0], self.gcn_channels[1], bias=False)
                nn.Linear(2048, 2048//8, bias=False)
            ))


        self.gcn_downsample_2 = nn.ModuleList()
        # if self.gcn_channels[1] != self.gcn_channels[2]:
       
        self.gcn_downsample_2.append(
            nn.Sequential(
                # nn.Linear(self.gcn_channels[1], self.gcn_channels[2], bias=False),
                nn.Linear(2048//8, 1, bias=False),
            ))

        # self.gcn_downsample_3 = nn.ModuleList()
        # # if self.gcn_channels[1] != self.gcn_channels[2]:
       
        # self.gcn_downsample_3.append(
        #     nn.Sequential(
        #         # nn.Linear(self.gcn_channels[1], self.gcn_channels[2], bias=False),
        #         nn.Linear(2048//8, 1, bias=False),
        #     ))

        # fc for martrix
        # self.fc_gcn_weight_1 = nn.Linear(2048, 2048//8)
        # self.fc_gcn_weight_2 = nn.Linear(2048//8, 1)

    def edges_mat(self, feat, reduce_diag=True, alpha=0.1):
        features = feat.clone()
        b, n, m = features.shape
        edges = features.bmm(features.permute(0, 2, 1)) / m    # bmm 规定维度得相同    permute 参数即维度

        edges = torch.where(edges > self.gate, edges,
                                torch.zeros([b, n, n]).to(device=self.device, dtype=edges.dtype))

        edges = F.normalize(edges, p=1, dim=-1)     # L1-normalization to restrain the weight

        if reduce_diag:      # 去掉了自己和自己的计算
            diag = torch.diag(torch.ones([n])).to(self.device)
            edges = torch.where(diag > 0, torch.zeros([b, n, n]).to(self.device), edges)

        return edges

        # edges = edges + alpha * self.adj_mat.to(self.device)

        # return edges

    def matrix_norm(self, edges):
        b, m, n = edges.shape
        edges += torch.diag(torch.ones(m).to(self.device))    # add an identity matrix In to keep the A' dominance
        #  re-normalization trick   Graph-Laplacian
        D_mat = torch.sum(edges, dim=-1).to(self.device)
        D_mat = torch.pow(D_mat, -0.5)
        D_mat = torch.diag_embed(D_mat)
        edges = D_mat.matmul(edges).matmul(D_mat).type(torch.float32)

        return edges


    def run_gcn(self, features, edges):    #, num):
        x1 = torch.matmul(edges, features)
        x1 = torch.matmul(x1, self.gcn_weight_1) + self.gcn_bias_1
        # x1 = self.fc_gcn_weight_1(x1)
        x1 = self.gcn_dropout_1[0](x1)
        x1 = self.gcn_bn_1[0](x1)
        x1 = F.relu(x1)
        x1 = self.gcn_downsample_1[0](features) + x1

        x2 = torch.matmul(edges, x1)
        x2 = torch.matmul(x2, self.gcn_weight_2) + self.gcn_bias_2
        # x2 = self.fc_gcn_weight_2(x2)
        x2 = self.gcn_dropout_2[0](x2)
        x2 = self.gcn_bn_2[0](x2)
        x2 = F.relu(x2)
        x2 = self.gcn_downsample_2[0](x1) + x2

        # x3 = torch.matmul(edges, x2)
        # x3 = torch.matmul(x3, self.gcn_weight_3) + self.gcn_bias_3
        # x3 = self.gcn_dropout_3[0](x3)
        # x3 = self.gcn_bn_3[0](x3)
        # x3 = F.relu(x3)
        # x3 = self.gcn_downsample_3[0](x2) + x3

        return x2

    def forward(self, x, vis):
        f_vis = vis.clone().unsqueeze(-1)
        b, n, m = f_vis.shape
        
        edges_vis = f_vis.bmm(f_vis.permute(0, 2, 1))     # turn off 
        edges_vis = F.normalize(edges_vis, p=1, dim=-1)    # turn off 

        edges = self.edges_mat(x)    # , reduce_diag=self.reduce_diag)
        edges = edges * edges_vis     # turn off 
        # print(x)
        # raise "ss"
        # edges = self.edges_mat(x)
        # print(edges)
        # raise "sssss"
        edges = self.matrix_norm(edges)
        gcn_feats = self.run_gcn(x, edges)    #, idx)
        return gcn_feats
# class FPA(nn.Module):
#     def __init__(self, channels=2048):
#         """
#         Feature Pyramid Attention
#         :type channels: int
#         """
#         super(FPA, self).__init__()
#         channels_mid = int(channels/4)

#         self.channels_cond = channels

#         # Master branch
#         self.conv_master = nn.Conv2d(self.channels_cond, channels, kernel_size=1, bias=False)
#         self.bn_master = nn.BatchNorm2d(channels)

#         # Global pooling branch
#         self.conv_gpb = nn.Conv2d(self.channels_cond, channels, kernel_size=1, bias=False)
#         self.bn_gpb = nn.BatchNorm2d(channels)

#         # C333 because of the shape of last feature maps is (16, 16).
#         self.conv7x7_1 = nn.Conv2d(self.channels_cond, channels_mid, kernel_size=(7, 7), stride=2, padding=3, bias=False)
#         self.bn1_1 = nn.BatchNorm2d(channels_mid)
#         self.conv5x5_1 = nn.Conv2d(channels_mid, channels_mid, kernel_size=(5, 5), stride=2, padding=2, bias=False)
#         self.bn2_1 = nn.BatchNorm2d(channels_mid)
#         self.conv3x3_1 = nn.Conv2d(channels_mid, channels_mid, kernel_size=(3, 3), stride=2, padding=1, bias=False)
#         self.bn3_1 = nn.BatchNorm2d(channels_mid)

#         self.conv7x7_2 = nn.Conv2d(channels_mid, channels_mid, kernel_size=(7, 7), stride=1, padding=3, bias=False)
#         self.bn1_2 = nn.BatchNorm2d(channels_mid)
#         self.conv5x5_2 = nn.Conv2d(channels_mid, channels_mid, kernel_size=(5, 5), stride=1, padding=2, bias=False)
#         self.bn2_2 = nn.BatchNorm2d(channels_mid)
#         self.conv3x3_2 = nn.Conv2d(channels_mid, channels_mid, kernel_size=(3, 3), stride=1, padding=1, bias=False)
#         self.bn3_2 = nn.BatchNorm2d(channels_mid)

#         # Convolution Upsample
#         self.conv_upsample_3 = nn.ConvTranspose2d(channels_mid, channels_mid, kernel_size=4, stride=2, padding=1, bias=False)
#         self.bn_upsample_3 = nn.BatchNorm2d(channels_mid)

#         self.conv_upsample_2 = nn.ConvTranspose2d(channels_mid, channels_mid, kernel_size=4, stride=2, padding=1, bias=False)
#         self.bn_upsample_2 = nn.BatchNorm2d(channels_mid)

#         self.conv_upsample_1 = nn.ConvTranspose2d(channels_mid, channels, kernel_size=4, stride=2, padding=1, bias=False)
#         self.bn_upsample_1 = nn.BatchNorm2d(channels)

#         self.relu = nn.ReLU(inplace=True)

#     def forward(self, x):
#         """
#         :param x: Shape: [b, 2048, h, w]
#         :return: out: Feature maps. Shape: [b, 2048, h, w]    [1, 2048, 16, 16]
#         """
#         # Master branch
#         x_master = self.conv_master(x)
#         x_master = self.bn_master(x_master)

#         # Global pooling branch
#         x_gpb = nn.AvgPool2d(x.shape[2:])(x).view(x.shape[0], self.channels_cond, 1, 1)
#         x_gpb = self.conv_gpb(x_gpb)
#         x_gpb = self.bn_gpb(x_gpb)

#         # Branch 1
#         x1_1 = self.conv7x7_1(x)
#         x1_1 = self.bn1_1(x1_1)
#         x1_1 = self.relu(x1_1)
#         x1_2 = self.conv7x7_2(x1_1)
#         x1_2 = self.bn1_2(x1_2)

#         # Branch 2
#         x2_1 = self.conv5x5_1(x1_1)
#         x2_1 = self.bn2_1(x2_1)
#         x2_1 = self.relu(x2_1)
#         x2_2 = self.conv5x5_2(x2_1)
#         x2_2 = self.bn2_2(x2_2)

#         # Branch 3
#         x3_1 = self.conv3x3_1(x2_1)
#         x3_1 = self.bn3_1(x3_1)
#         x3_1 = self.relu(x3_1)
#         x3_2 = self.conv3x3_2(x3_1)
#         x3_2 = self.bn3_2(x3_2)

#         # Merge branch 1 and 2
#         x3_upsample = self.relu(self.bn_upsample_3(self.conv_upsample_3(x3_2)))
#         x2_merge = self.relu(x2_2 + x3_upsample)
#         x2_upsample = self.relu(self.bn_upsample_2(self.conv_upsample_2(x2_merge)))
#         x1_merge = self.relu(x1_2 + x2_upsample)

#         x_master = x_master * self.relu(self.bn_upsample_1(self.conv_upsample_1(x1_merge)))

#         #
#         out = self.relu(x_master + x_gpb)

#         return out



# class GAU(nn.Module):
#     def __init__(self, channels_high, channels_low, id):
#         super(GAU, self).__init__()
#         # Global Attention Upsample
#         if id == 0:
#             self.conv3x3 = nn.Conv2d(channels_low, channels_high, kernel_size=3, padding=1, bias=False)
#         elif id == 1:
#             self.conv3x3 = nn.Conv2d(channels_low, channels_high, kernel_size=3, padding=1, stride=2, bias=False)
#         elif id == 2:
#             self.conv3x3 = nn.Conv2d(channels_low, channels_high, kernel_size=3, padding=0, stride=4, bias=False)
#         self.bn_low = nn.BatchNorm2d(channels_high)

#         self.conv1x1 = nn.Conv2d(channels_high, channels_high, kernel_size=1, padding=0, bias=False)
#         self.bn_high = nn.BatchNorm2d(channels_high)

#         self.conv_reduction = nn.Conv2d(channels_high, channels_high, kernel_size=1, padding=0, bias=False)
#         self.bn_reduction = nn.BatchNorm2d(channels_high)
#         self.relu = nn.ReLU(inplace=True)

#     def forward(self, fms_high, fms_low, fm_mask=None):
#         """
#         Use the high level features with abundant catagory information to weight the low level features with pixel
#         localization information. In the meantime, we further use mask feature maps with catagory-specific information
#         to localize the mask position.
#         :param fms_high: Features of high level. Tensor.
#         :param fms_low: Features of low level.  Tensor.
#         :param fm_mask:
#         :return: fms_att_upsample
#         """
#         b, c, h, w = fms_high.shape

#         fms_high_gp = nn.AvgPool2d(fms_high.shape[2:])(fms_high).view(len(fms_high), c, 1, 1)
#         fms_high_gp = self.conv1x1(fms_high_gp)
#         fms_high_gp = self.bn_high(fms_high_gp)
#         fms_high_gp = self.relu(fms_high_gp)

#         # fms_low_mask = torch.cat([fms_low, fm_mask], dim=1)
#         fms_low_mask = self.conv3x3(fms_low)
#         fms_low_mask = self.bn_low(fms_low_mask)

#         fms_att = fms_low_mask * fms_high_gp

#         out = self.relu(
#             self.bn_reduction(self.conv_reduction(fms_high)) + fms_att)

#         return out


# class CAF(nn.Module):
#     def __init__(self, channels_high, channels_low, id):
#         super(CAF, self).__init__()
#         # Global Attention Upsample
#         if id == 0:
#             self.conv3x3 = nn.Conv2d(channels_low, channels_high, kernel_size=3, padding=1, bias=False)
#         elif id == 1:
#             self.conv3x3 = nn.Conv2d(channels_low, channels_high, kernel_size=3, padding=1, stride=2, bias=False)
#         elif id == 2:
#             self.conv3x3 = nn.Conv2d(channels_low, channels_high, kernel_size=3, padding=0, stride=4, bias=False)
#         self.bn_low = nn.BatchNorm2d(channels_high)

#         self.conv1x1 = nn.Conv2d(channels_high, channels_high, kernel_size=1, padding=0, bias=False)
#         self.bn_high = nn.BatchNorm2d(channels_high)

#         self.conv_reduction = nn.Conv2d(channels_high, channels_high, kernel_size=1, padding=0, bias=False)
#         self.bn_reduction = nn.BatchNorm2d(channels_high)
#         self.relu = nn.ReLU(inplace=True)
#         self.d_k = 2048/8
#         self.dropout=nn.Dropout(0.3)

#     def forward(self, fms_high, fms_low, fm_mask=None):
#         """
#         Use the high level features with abundant catagory information to weight the low level features with pixel
#         localization information. In the meantime, we further use mask feature maps with catagory-specific information
#         to localize the mask position.
#         :param fms_high: Features of high level. Tensor.
#         :param fms_low: Features of low level.  Tensor.
#         :param fm_mask:
#         :return: fms_att_upsample
#         """
#         b, c, h, w = fms_high.shape

#         # fms_high_gp = nn.AvgPool2d(fms_high.shape[2:])(fms_high)
#         # fms_high_gp = self.relu(fms_high_gp)
#         # fms_high = fms_high + fms_high*torch.sigmoid(fms_high_gp)

#         fms_high = self.conv1x1(fms_high)
#         fms_high = self.bn_high(fms_high)
#         fms_high = self.relu(fms_high)

#         fms_high_cn = fms_high.reshape(b, c, h * w)
#         # fms_high_cn = self.bn_high(fms_high_cn)
#         # fms_low_mask = torch.cat([fms_low, fm_mask], dim=1)
#         fms_low_cn = self.conv3x3(fms_low).reshape(b, c, h*w)
#         # fms_low_cn = self.bn_low(fms_low_cn)
#         mask = torch.transpose(fms_high_cn, dim0=1, dim1=2)@fms_low_cn
#         mask = mask/np.sqrt(self.d_k)
#         att = torch.softmax(mask, -1)
#         att=self.dropout(att)
#         # mask = self.dropout(mask)
#         mask_out = (fms_high_cn@att).reshape(b, c, h, w)
    
#         # out = self.relu(self.bn_reduction(self.conv_reduction(fms_high) + mask_out))

#         # out = self.relu(
#         #     self.bn_reduction(self.conv_reduction(fms_high + mask_out)))

#         out = fms_high + mask_out
#         return out