import torch
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import math
from torch.autograd import Variable
import numpy as np

from resnet import resnet50 # 实际用的是 50 而不是 18
from vgg import vgg16


config_vgg = {'convert': [[128,256,512,512,512],
                          [64,128,256,512,512]],
                'psfem': [[128, 256, 128, 3,1], 
                            [256, 512, 256, 3, 1], 
                            [512, 0, 512, 5, 2], 
                            [512, 0, 512, 5, 2],
                            [512, 0, 512, 7, 3]], 
                    'o2ogm': [[128], [256, 512, 512, 512]]}  
# no convert layer, no conv6 这点在论文中有说明

config_resnet = {'convert': [[64,256,512,1024,2048],[128,256,512,512,512]],
                'deep_pool': [[512, 512, 256, 256, 128],
                                [512, 256, 256, 128, 128],
                                [False, True, True, True, False],
                                    [True, True, True, True, False]],
                'score': 256, 
                'edgeinfo':[[16, 16, 16, 16], 128, [16,8,4,2]],
                'edgeinfoc':[64,128], 
                'block': [[512, [16]], [256, [16]], [256, [16]], [128, [16]]], 
                'fuse': [[16, 16, 16, 16], True], 
                'fuse_ratio': [[16,1], [8,1], [4,1], [2,1]], 
                    'psfem': [[128, 256, 128, 3,1],
                                [256, 512, 256, 3, 1], 
                                [512, 0, 512, 5, 2],
                                [512, 0, 512, 5, 2],
                                [512, 0, 512, 7, 3]],
                                    'o2ogm': [[128], [256, 512, 512, 512]]}
# 分别为 VGG 和 ResNet 网络定义了层的参数

class ConvertLayer(nn.Module):
    # 将输入特征的通道数 转换为 指定的通道数
    def __init__(self, list_k):
        super(ConvertLayer, self).__init__()
        up0 =[]
        for i in range(len(list_k[0])): 
            # list_k[0] 表示输入特征的通道数列表，list_k[1] 表示对应的输出特征图的通道数列表
            up0.append(nn.Sequential(
                  nn.Conv2d(list_k[0][i], list_k[1][i], 1, 1, bias=False),
                  # Conv2d(in_channels,out_channels,kernel_size,stride, ...)
                  nn.ReLU(inplace=True))
                )
            # 按顺序先通过 1x1 卷积层进行通道数转换，再 relu 激活
        self.convert0 = nn.ModuleList(up0)


    def forward(self, list_x):
        resl = []
        for i in range(len(list_x)):
            resl.append(self.convert0[i](list_x[i]))
        return resl # 对 (list_x[i])应用卷积+relu，返回通道数转换后的特征图列表


        

class PSFEM(nn.Module): 
    # 逐步处理特征图，通过多层卷积和插值操作，逐步提取和细化显著性特征
    def __init__(self, list_k):
        # list_k 就是 config_resnet 中的 'psfem' 列表，控制每次卷积的通道数
        super(PSFEM, self).__init__()
        self.list_k = list_k
        trans, up, score = [], [], [] 
        for ik in list_k:
            # list_k 是一个有 5 个列表的列表 
            # ik是一个有 5 项的列表，如 [128, 256, 128, 3,1]
            # ik[0]: Input channels
            # ik[1]: 中间转换通道数 (Intermediate transform channels)
            # ik[2]: Output channels
            # ik[3]: Kernel size
            # ik[4]: Padding
            if ik[1] > 0:
                trans.append(nn.Sequential(
                    nn.Conv2d(ik[1], ik[0], 1, 1, bias=False),
                      nn.ReLU(inplace=True)))

           # 增加了三个卷积层 并在每个卷积层后添加 ReLU 层以保证非线性。
            up.append(nn.Sequential(
                nn.Conv2d(ik[0], ik[2], ik[3], 1, ik[4]),
                  nn.ReLU(inplace=True),
                    nn.Conv2d(ik[2], ik[2], ik[3], 1, ik[4]),
                      nn.ReLU(inplace=True),
                        nn.Conv2d(ik[2], ik[2], ik[3], 1, ik[4]),
                          nn.ReLU(inplace=True))
                          )
            # 对每条侧支进行深度监督,采用单个卷积层 将特征图转换为 单通道预测掩膜
            score.append(nn.Conv2d(ik[2], 1, 3, 1, 1))
        
        trans.append(nn.Sequential(
            nn.Conv2d(512, 128, 1, 1, bias=False),
              nn.ReLU(inplace=True)))
        # 总共会有 至少 1 个 Conv2d+relu 在 trans 中，用于通道转换

        self.trans, self.up, self.score = nn.ModuleList(trans), nn.ModuleList(up), nn.ModuleList(score)
        self.relu =nn.ReLU()

    def forward(self, list_x, x_size): # Q here
        # list_x = list of Tensor  x_size = image.size
        up_edge, up_sal, edge_feature, sal_feature = [], [], [], []
        
        num_f = len(list_x)
        tmp = self.up[num_f - 1](list_x[num_f-1]) # 通过 up 层处理 最后一个特征图
        sal_feature.append(tmp)
        U_tmp = tmp

        up_sal.append(
            F.interpolate( # 对评分图插值，调整到与输入图像相同的大小
              self.score[num_f - 1](tmp), # 将 tmp 输入到 score 中，得到一个评分图
                x_size, mode='bilinear', align_corners=True) # 双线性插值
            )
        
        for j in range(2, num_f ):
            i = num_f - j
            # 除了最后一个之外的所有特征图：关键，对应论文中的 融合特征 部分
            if list_x[i].size()[1] < U_tmp.size()[1]:
                # 如果小于，通道转换后插值
                U_tmp = list_x[i] + F.interpolate(
                    (self.trans[i](U_tmp)), 
                    list_x[i].size()[2:], mode='bilinear', align_corners=True)
            else: # 否则直接插值
                U_tmp = list_x[i] + F.interpolate(
                    (U_tmp), list_x[i].size()[2:], mode='bilinear', align_corners=True)
                       
            tmp = self.up[i](U_tmp) # 将融合后的特征图 U_tmp 输入到 up 层进行处理，得到显著性特征图
            U_tmp = tmp
            sal_feature.append(tmp)
            up_sal.append(F.interpolate(
                self.score[i](tmp),  # 融合后特征图过评分层，再插值得到显著性图，添加到 up_sal 列表中
                x_size, mode='bilinear', align_corners=True))

        U_tmp = list_x[0] + F.interpolate(
            (self.trans[-1](sal_feature[0])), 
            list_x[0].size()[2:], mode='bilinear', align_corners=True)
        # 将第一个特征图 list_x[0] 与第一个处理后的显著性特征图 sal_feature[0] 转换合并

        tmp = self.up[0](U_tmp) 
        edge_feature.append(tmp)
       
        up_edge.append(F.interpolate(
            self.score[0](tmp), # 融合图 评分并插值处理
              x_size, mode='bilinear', align_corners=True)) 
        return up_edge, edge_feature, up_sal, sal_feature     
        # 返回 处理后的边缘图、边缘特征图、显著性图和显著性特征
        
class O2OGM(nn.Module): 
    # 通过多条子侧支路径将 特征信息 融合到 边缘信息 中，进行1 vs 1 的特征融合、引导
    def __init__(self, list_k):
        super(O2OGM, self).__init__()
        self.list_k = list_k # [[128], [256, 512, 512, 512]]
        trans, up, score = [], [], []
        for i in list_k[0]: # i = 128
            tmp = []
            tmp_up = []
            tmp_score = []
            feature_k = [[3,1],[5,2], [5,2], [7,3]]
            for idx, j in enumerate(list_k[1]):
                tmp.append(nn.Sequential(
                    nn.Conv2d(j, i, 1, 1, bias=False), 
                    nn.ReLU(inplace=True)))

                tmp_up.append(nn.Sequential(
                    nn.Conv2d(i , i, feature_k[idx][0], 1, feature_k[idx][1]),
                      nn.ReLU(inplace=True),
                        nn.Conv2d(i, i,  feature_k[idx][0],1 , feature_k[idx][1]), 
                        nn.ReLU(inplace=True), 
                        nn.Conv2d(i, i, feature_k[idx][0], 1, feature_k[idx][1]), 
                        nn.ReLU(inplace=True)))
                tmp_score.append(nn.Conv2d(i, 1, 3, 1, 1))
                
            trans.append(nn.ModuleList(tmp))
            up.append(nn.ModuleList(tmp_up)) # 自顶向下的位置传播
            score.append(nn.ModuleList(tmp_score))
            

        self.trans, self.up, self.score = nn.ModuleList(trans), nn.ModuleList(up), nn.ModuleList(score)       
        self.final_score = nn.Sequential(
            nn.Conv2d(list_k[0][0], list_k[0][0], 5, 1, 2),
              nn.ReLU(inplace=True),
                nn.Conv2d(list_k[0][0], 1, 3, 1, 1))
        self.relu =nn.ReLU()

    def forward(self, list_x, list_y, x_size):
        #  一对一引导模块
        up_score, tmp_feature = [], []
        list_y = list_y[::-1]

        
        for i, i_x in enumerate(list_x):
            for j, j_x in enumerate(list_y):                              
                tmp = F.interpolate(
                    self.trans[i][j](j_x),
                      i_x.size()[2:], mode='bilinear', align_corners=True) + i_x                
                
                tmp_f = self.up[i][j](tmp)             
                
                up_score.append(F.interpolate(
                    self.score[i][j](tmp_f),
                      x_size, mode='bilinear', align_corners=True))                  
                tmp_feature.append(tmp_f)
       
        tmp_fea = tmp_feature[0]
        for i_fea in range(len(tmp_feature) - 1):
            # 在每条子侧支路径中，将显著边缘特征 融合到 增强的显著目标特征
            # 使用 add 像素级相加[*]
            tmp_fea = self.relu(
                torch.add(tmp_fea, F.interpolate(
                              (tmp_feature[i_fea+1]),
                                tmp_feature[0].size()[2:], mode='bilinear', align_corners=True)))
        
        up_score.append(
            F.interpolate(
                self.final_score(tmp_fea),
                  x_size, mode='bilinear', align_corners=True))
    

        return up_score
       


# extra part：根据基础网络的配置（VGG 或 ResNet），创建额外的层
def extra_layer(base_model_cfg, vgg):
    if base_model_cfg == 'vgg':
        config = config_vgg
    elif base_model_cfg == 'resnet':
        config = config_resnet
    PSFEM_layer = PSFEM(config['psfem'])
    O2OGM_layer = O2OGM(config['o2ogm'])

    return vgg, PSFEM_layer, O2OGM_layer


# TUN network
class TUN_bone(nn.Module):
    # 整合基础网络（VGG 或 ResNet）和额外的层，构建整个网络模型
    def __init__(self, base_model_cfg, base, PSFEM_layer, O2OGM_layer):
        # base_model_cfg = resnet
        super(TUN_bone, self).__init__()
        self.base_model_cfg = base_model_cfg
        if self.base_model_cfg == 'vgg':

            self.base = base
            self.psfem = PSFEM_layer
            self.o2ogm = O2OGM_layer

        elif self.base_model_cfg == 'resnet':
            self.convert = ConvertLayer(config_resnet['convert'])
            self.base = base
            self.psfem = PSFEM_layer
            self.o2ogm = O2OGM_layer

    def forward(self, x): # 前向传递进行训练
        x_size = x.size()[2:]
        conv2merge = self.base(x)        
        if self.base_model_cfg == 'resnet':            
            conv2merge = self.convert(conv2merge) # 通道转换
        up_edge, edge_feature, up_sal, sal_feature = self.psfem(conv2merge, x_size)
        up_sal_final = self.o2ogm(edge_feature, sal_feature, x_size)
        # 先后经过 psfem 和 o2ogm 传递完毕
        return up_edge, up_sal, up_sal_final


# build the whole network
def build_model(base_model_cfg='vgg'):
    if base_model_cfg == 'vgg':
        return TUN_bone(base_model_cfg, *extra_layer(base_model_cfg, vgg16()))
    elif base_model_cfg == 'resnet':
        return TUN_bone(base_model_cfg, *extra_layer(base_model_cfg, resnet50()))


# weight init
def xavier(param):
    # init.xavier_uniform(param)
    init.xavier_uniform_(param)


def weights_init(m):
    if isinstance(m, nn.Conv2d):
        # xavier(m.weight.data)
        m.weight.data.normal_(0, 0.01)
        if m.bias is not None:
            m.bias.data.zero_()

if __name__ == '__main__':
    from torch.autograd import Variable
    net = TUN(*extra_layer(vgg(base['tun'], 3), vgg(base['tun_ex'], 512), config['merge_block'], config['fuse'])).cuda()
    img = Variable(torch.randn((1, 3, 256, 256))).cuda()
    out = net(img, mode = 2)
    print(len(out))
    print(len(out[0]))
    print(out[0].shape)
    print(len(out[1]))
    # print(net)
    input('Press Any to Continue...')
