import torch.nn.functional as  F
import torch.nn as nn
import numpy as np
from torchvision.transforms import ToTensor
from torch.autograd import Variable
import torch

def function_residual2_edge2(im):
    sobel_kernel = np.array([[-1,-1,-1],[-1,8,-1],[-1,-1,-1]],dtype = 'float32')
    sobel_kernel = sobel_kernel.reshape((1,1,3,3))
    sobel_kernel = np.repeat(sobel_kernel,96,axis=1)
    sobel_kernel = np.repeat(sobel_kernel,96,axis=0)
    #sobel_kernel.size = [96,96,3,3]
    # sobel_kernel.cuda()
    weight2 = Variable(torch.from_numpy(sobel_kernel))
    edge_2 = F.conv2d(Variable(im),weight2,stride=1,padding=1)
    return edge_2

def function_residual3_edge3(im):
    sobel_kernel = np.array([[-1,-1,-1],[-1,8,-1],[-1,-1,-1]],dtype='float32')
    sobel_kernel = sobel_kernel.reshape(1,1,3,3)
    sobel_kernel = np.repeat(sobel_kernel,96,axis=1)
    sobel_kernel = np.repeat(sobel_kernel,96,axis=0)
    weight3 = Variable(torch.from_numpy(sobel_kernel))
    edge_3 = F.conv2d(Variable(im),weight3,stride=1,padding=1)
    return edge_3

def function_residual1_edge1(im):
    sobel_kernel = np.array([[-1,-1,-1],[-1,8,-1],[-1,-1,-1]],dtype= 'float32')
    sobel_kernel = sobel_kernel.reshape(1,1,3,3)
    sobel_kernel = np.repeat(sobel_kernel,1,axis=1)#out_channel 1
    sobel_kernel = np.repeat(sobel_kernel,1,axis=0)#in_channel 96
    weight1 = Variable(torch.from_numpy(sobel_kernel))
    edge_1 = F.conv2d(Variable(im),weight1,stride=1,padding=1)
    return edge_1

#==================================================================
# 自定义filter layer 卷积 2020 11 17
class self_define_filterlayer(nn.Module):
    def __init__(self):
        super(self_define_filterlayer, self).__init__()
        self.weight = nn.Parameter(torch.randn(16,1,5,5))
    def forward(self,x):
        x = x.view(x.size(0),-1)
        out = F.conv2d(x,self.weight)
#===================================================================

class My_model(nn.Module):
    def __init__(self,out_ch=96):
        super(My_model,self).__init__()
        #卷积层，抽取特征
        self.conv1 = nn.Conv2d(1,out_ch,kernel_size=3,stride=1,padding=1)
        self.conv = nn.Conv2d(out_ch,out_ch,kernel_size=3,stride=1,padding=1)
        #反卷积层，用于恢复细节 nn.ConvTransposed2d()
        self.conv_reverse = nn.ConvTranspose2d(out_ch,out_ch,kernel_size=3,stride=1,padding=1)
        self.conv_reverse_last = nn.ConvTranspose2d(out_ch,1,kernel_size=3,stride=1,padding=1)#k_size，padding=5,1
        self.relu = nn.ReLU()
        self._initialize_weights()

    def _initialize_weights(self): #初始化权重
        for m in self.modules():
            if isinstance(m,nn.Conv2d):
                nn.init.normal_(m.weight.data,mean=0.0,std=0.001)
                nn.init.zeros_(m.bias.data)
            elif isinstance(m,nn.ConvTranspose2d):
                nn.init.normal(m.weight.data,mean =0.0,std =0.002)
                nn.init.zeros_(m.bias.data)

    def forward(self,x):
        # encoding
        residual_1 = x.clone()
        #residual_1 = function_residual1_edge1(residual_1)

        out = self.relu(self.conv1(x))
        out = self.relu(self.conv(out))
        residual_2 = out.clone()
        #residual_2 = function_residual2_edge2(residual_2)

        out = self.relu(self.conv(out))
        out = self.relu(self.conv(out))
        residual_3 = out.clone()
        #residual_3 = function_residual3_edge3(residual_3)

        out = self.relu(self.conv(out))
        #print("out shape is",out.size())

        #skip connection decoding
        out = self.conv_reverse(out)
        out += residual_3

        out = self.relu(out)
        out = self.conv_reverse(out)
        out = self.conv_reverse(self.relu(out))

        out += residual_2
        out = self.relu(out)
        out = self.conv_reverse(out)

        out = self.conv_reverse_last(self.relu(out))
        out += residual_1
        out = self.relu(out)
        return out





















