import torch
import torch.nn as nn
import numpy as np
import math
from collections import OrderedDict
import torch.nn.functional as F
from myNet.kspace_pytorch_lxj import *

def get_upsample_filter(size):
    """Make a 2D bilinear kernel suitable for upsampling"""
    factor = (size + 1) // 2
    if size % 2 == 1:
        center = factor - 1
    else:
        center = factor - 0.5
    og = np.ogrid[:size, :size]
    filter = (1 - abs(og[0] - center) / factor) * \
             (1 - abs(og[1] - center) / factor)
    return torch.from_numpy(filter).float()

class ConvBlock(nn.Module):
    def __init__(self, in_channels, out_channels, affine=True):
        super(ConvBlock, self).__init__()
        self.blcok = torch.nn.Sequential(OrderedDict({
            'conv1': nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, bias=True),
            'norm1': nn.BatchNorm2d(out_channels, affine=affine),
            'relu1': torch.nn.ReLU(),

            'conv2': nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1, bias=True),
            'norm2': nn.BatchNorm2d(out_channels, affine=affine),
            'relu2': torch.nn.ReLU(),
        }))

    def forward(self, x):
        return self.blcok(x)
    
class UNet(nn.Module):
    dn_steps = 4  # 输入图像要先经过2次2倍的max pooling, 所以，输入图像的w和h必须是4的整数倍

    def __init__(self, c_in=6, c_out=3, b_res=True,b_interleaved=True,b_out_relu=False,b_out_mask=True,b_out_clip=False):
        # super(UNet, self).__init__()
       
        super(UNet, self).__init__()
        print("Unet.init()")
        self.b_interleaved = b_interleaved
        self.b_out_relu = b_out_relu
        self.b_out_mask = b_out_mask
        self.b_out_clip = b_out_clip
        self.b_res = b_res
        self.c_out = c_out
        self.c_in = c_in

        affine = True
        self.blcok1 = ConvBlock(c_in, 64, affine)
        self.blcok2 = ConvBlock(64, 128, affine)
        self.blcok3 = ConvBlock(128, 256, affine)
        

        self.deconv1 = nn.ConvTranspose2d(256, 128, 4, stride=2, padding=1, bias=False)
        self.blcok4 = ConvBlock(256, 128, affine)
        self.deconv2 = nn.ConvTranspose2d(128, 64, 4, stride=2, padding=1, bias=False)
        self.blcok5 = ConvBlock(128, 64, affine)

        self.conv_out = nn.Conv2d(64, c_out, kernel_size=3, padding=1, bias=True)

        # self.upsample = nn.UpsamplingBilinear2d(scale_factor=2)
#         self._initialize_weights()

    def forward(self, x):
        x1 = self.blcok1(x)
        x2 = self.blcok2(F.max_pool2d(x1, 2))
        x3 = self.blcok3(F.max_pool2d(x2, 2))
        x4 = F.relu(self.deconv1(x3))
        x5 = self.blcok4(torch.cat((x4, x2), 1))

        x6 = F.relu(self.deconv2(x5))
        x7 = self.blcok5(torch.cat((x6, x1), 1))
        

        y = self.conv_out(x7)
        if self.b_res:          
            y = self.sum_out(x, y)

        return self.filter_out(x, y)
    
    def sum_out(self, x, y):
        if self.b_interleaved:
            y += x[:, 1::2, :]           
            return y


        y += x[:, -y.shape[1]:, :]
        
        return y
    def filter_out(self, x, y):

        if self.b_out_relu:
            y = F.relu(y)

#         if self.b_out_mask:
#             if self.b_interleaved:
#                 y[x[:, ::2, :] == 0] = 0
#             else:
#                 y[x[:, :self.c_out, :] == 0] = 0

        y[x[:, :self.c_out, :] == 0] = 0
        if self.b_out_clip:
            # y = y.clip(0, 1.0)
            y = torch.clamp(y, 0, 1.0)
        return y
    
    def _initialize_weights(self):
        if self.seed is not None:
            torch.manual_seed(self.seed)
            torch.cuda.manual_seed(self.seed)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.weight.data.normal_(0, 0.02)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                m.bias.data.zero_()
            elif isinstance(m, nn.ConvTranspose2d):
                m.weight.data.normal_(0, 0.02)
                if m.bias is not None:
                    m.bias.data.zero_()

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        print("Net.init()")
        

        self.net1 = UNet(b_interleaved=False)
        self.net2 = UNet(b_interleaved=False) 
        self.dc1 = DataConsistencyInKspace(norm='ortho')    
        self.dc2 = DataConsistencyInKspace(norm='ortho') 
        
#         if self.seed is not None:
#             torch.manual_seed(self.seed)
#             torch.cuda.manual_seed(self.seed)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.weight.data.normal_(0, 0.02)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                m.bias.data.zero_()
            elif isinstance(m, nn.ConvTranspose2d):
                m.weight.data.normal_(0, 0.02)
                if m.bias is not None:
                    m.bias.data.zero_() 
        
                    
    def make_layer(self, block):
        layers = []
        layers.append(block())
        return nn.Sequential(*layers)

    def forward(self, t1,t2Dn,mask):  
        x = torch.cat((t1,t2Dn),1)  

        convt_R1 = self.net1(x)
#         HR_2x = convt_R1
        
        HR_2x = self.dc1.perform(convt_R1,t2Dn,mask)

        x_2 = torch.cat((t1,HR_2x),1)
        convt_R2 = self.net2(x_2)
#         HR_4x = convt_R2
        HR_4x = self.dc2.perform(convt_R2,t2Dn,mask)
        
       
        return HR_2x, HR_4x
    
    

        
class L1_Charbonnier_loss(nn.Module):
    """L1 Charbonnierloss."""
    def __init__(self):
        super(L1_Charbonnier_loss, self).__init__()
        self.eps = 1e-6

    def forward(self, X, Y):
        diff = torch.add(X, -Y)
        error = torch.sqrt( diff * diff + self.eps )
        loss = torch.sum(error) 
        return loss