import sys
import torch
import torch.nn as nn

# from utils import *
# from option import args
from collections import OrderedDict


import functools

class img_encoder(nn.Module):
    def __init__(self):
        super(img_encoder, self).__init__()
        norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
        activation = nn.ReLU(inplace=True)
        self.model = nn.Sequential(
            *[nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1), norm_layer(64), activation,
              nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1), norm_layer(64), activation,
              nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1), norm_layer(128), activation])
    def forward(self,image):
        return self.model(image)

class img_construction(nn.Module):
    def __init__(self):
        super(img_construction, self).__init__()
        norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
        activation = nn.ReLU(inplace=True)
        self.model = nn.Sequential(
            *[nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1), norm_layer(64), activation,
              nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=1), norm_layer(32), activation,
              nn.Conv2d(32, 3, kernel_size=3, stride=1, padding=1), nn.Tanh()])
    def forward(self,image):
        return self.model(image)

def activation(act_type='prelu', slope=0.2, n_prelu=1):
    act_type = act_type.lower()
    if act_type == 'prelu':
        layer = nn.PReLU(num_parameters=n_prelu, init=slope)
    elif act_type == 'lrelu':
        layer = nn.LeakyReLU(negative_slope=slope, inplace=True)
    else:
        raise NotImplementedError('[ERROR] Activation layer [%s] is not implemented!' % act_type)
    return layer

def pad(pad_type, padding):
    pad_type = pad_type.lower()
    if padding == 0:
        return None

def norm(n_feature, norm_type='bn'):
    norm_type = norm_type.lower()
    if norm_type == 'bn':
        layer = nn.BatchNorm2d(n_feature)
    else:
        raise NotImplementedError('[ERROR] %s.sequential() does not support OrderedDict' % norm_type)
    return layer

def get_valid_padding(kernel_size, dilation):
    kernel_size = kernel_size + (kernel_size - 1) * (dilation - 1)
    padding = (kernel_size - 1) // 2
    return padding

def sequential(*args):
    if len(args) == 1:
        if isinstance(args[0], OrderedDict):
            raise NotImplementedError('[ERROR] %s.sequential() does not support OrderedDict' % sys.modules[__name__])
        else:
            return args[0]
    modules = []
    for module in args:
        if isinstance(module, nn.Sequential):
            for submodule in module:
                modules.append(submodule)
        elif isinstance(module, nn.Module):
            modules.append(module)
    return nn.Sequential(*modules)



def ConvBlock(in_channels, out_channels, kernel_size, stride=1, dilation=1, bias=True, valid_padding=True, padding=0,
              act_type='prelu', norm_type='bn', pad_type='zero'):
    if valid_padding:
        padding = get_valid_padding(kernel_size, dilation)
    else:
        pass
    p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None
    conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation,
                     bias=bias)

    act = activation(act_type) if act_type else None
    n = norm(out_channels, norm_type) if norm_type else None
    return sequential(p, conv, n, act)

class DenseLayer(nn.Module):
    def __init__(self, num_channels, growth):
        super(DenseLayer, self).__init__()
        self.conv = ConvBlock(num_channels, growth, kernel_size=3, act_type='lrelu', norm_type=None)

    def forward(self, x):
        out = self.conv(x)
        out = torch.cat((x, out), 1)
        return out


class DenseNet(nn.Module):
    def __init__(self):
        super(DenseNet, self).__init__()
        # in_channels=3
        # num_features=44
        # growth=44
        # num_layers=5
        # self.num_channels = 2 * in_channels
        # self.num_features = num_features
        # self.growth = growth
        # modules = []
        # self.conv_1 = ConvBlock(18, self.num_features, kernel_size=3, act_type='lrelu', norm_type=None)
        # self.conv_0 = ConvBlock(self.num_channels, 18, kernel_size=3, act_type='lrelu', norm_type=None)
        # for i in range(num_layers):
        #     modules.append(DenseLayer(self.num_features, self.growth))
        #     self.num_features += self.growth
        # self.dense_layers = nn.Sequential(*modules)
        # self.sub = nn.Sequential(ConvBlock(self.num_features, 128, kernel_size=3, act_type='lrelu', norm_type=None),
        #                          ConvBlock(128, 64, kernel_size=3, act_type='lrelu', norm_type=None),
        #                          ConvBlock(64, 32, kernel_size=3, act_type='lrelu', norm_type=None),
        #                          nn.Conv2d(32, in_channels, kernel_size=3, stride=1, padding=1),
        #                          nn.Tanh())
        # self.sub1=nn.Sequential(ConvBlock(self.num_features, 128, kernel_size=3, act_type='lrelu', norm_type=None),
        #                          ConvBlock(128, 64, kernel_size=3, act_type='lrelu', norm_type=None),
        #                          ConvBlock(64, 32, kernel_size=3, act_type='lrelu', norm_type=None),
        #                          nn.Conv2d(32, in_channels*2, kernel_size=3, stride=1, padding=1),
        #                          nn.Tanh())
        norm_layer = nn.BatchNorm2d
        activation = nn.ReLU(inplace=True)
        # model=[nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1),norm_layer(16), activation]
        # self.conv_ir_first=nn.Sequential(*model)
        # model=[nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1),norm_layer(16), activation]
        # model+=[nn.Conv2d(16, 48, kernel_size=3, stride=2, padding=1),norm_layer(48), activation]
        # model+=[nn.Conv2d(48, 96, kernel_size=3, stride=2, padding=1),norm_layer(96), activation]
        # self.conv_ir=nn.Sequential(*model)
        # model = [nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1), norm_layer(16), activation]
        # self.conv_vis_first=nn.Sequential(*model)
        # model=[nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1),norm_layer(16), activation]
        # model+=[nn.Conv2d(16, 48, kernel_size=3, stride=2, padding=1),norm_layer(48), activation]
        # model+=[nn.Conv2d(48, 96, kernel_size=3, stride=2, padding=1),norm_layer(96), activation]
        # self.conv_vis=nn.Sequential(*model)
        # model=[nn.ConvTranspose2d(288, 144, kernel_size=3, stride=2, padding=1,output_padding=1),norm_layer(144), activation]
        # model+=[nn.ConvTranspose2d(144, 96, kernel_size=3, stride=2, padding=1,output_padding=1),norm_layer(96), activation]
        # model+=[nn.ConvTranspose2d(96, 48, kernel_size=3, stride=2, padding=1,output_padding=1),norm_layer(48), activation]
        # self.fusion=nn.Sequential(*model)
        fine1=[
              nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1),norm_layer(128), activation,
              ]
        fine2=[nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1),norm_layer(64), activation,
              nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=1), norm_layer(32), activation,
              nn.ReflectionPad2d(3), nn.Conv2d(32, 3, kernel_size=7, padding=0), nn.Tanh()]

        self.fine1=nn.Sequential(*fine1)
        self.fine2=nn.Sequential(*fine2)
        self.ir_encoder=img_encoder()
        self.vis_encoder=img_encoder()
        self.vis_construction=img_construction()
        self.ir_construction=img_construction()

    def forward(self, vis, ir):
        vis=torch.tensor(vis/255).unsqueeze(0)
        ir=torch.tensor(ir/255).unsqueeze(0)
        vis=torch.einsum('bhwc->bchw',vis).float()
        ir=torch.einsum('bhwc->bchw',ir).float()

        # vis=vis+vis_feature
        # ir=ir+ir_feature

        # import my_show_img
        # my_show_img.write_real_road_norm(vis, vis, ir, 1000, '1.jpg', './')


        ir_feature=self.ir_encoder(ir)
        vis_feature=self.vis_encoder(vis)
        fusion_feature=self.fine1(torch.cat([vis_feature, ir_feature], dim=1))
        pre_vis=self.vis_construction(fusion_feature)
        pre_ir=self.ir_construction(fusion_feature)
        fusion=self.fine2(fusion_feature)
        return fusion,pre_vis, pre_ir


# if __name__ == '__main__':
#     Net = DenseNet()
#     print(Net)

