import os
import cv2
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms

def mkfiledir(filedirlist):
    path = '.'
    for filesubdir in filedirlist:
        path = os.path.join(path,filesubdir)
        if not os.path.exists(path):
            os.mkdir(path)
    return path

from models.cyclegan.networks import init_net

def rgb2gray(img):
    r, g, b = torch.split(img, 1, dim=1)
    return torch.mul(r, 0.299) + torch.mul(g, 0.587) + torch.mul(b, 0.114)

def assign_adain_params(adain_params, model):
    for m in model.modules():
        if m.__class__.__name__ == "AdaptiveInstanceNorm2d":
            mean = adain_params[:, :m.num_features]
            std = adain_params[:, m.num_features:2*m.num_features]
            m.bias = mean.contiguous().view(-1)
            m.weight = std.contiguous().view(-1)
            if adain_params.size(1) > 2*m.num_features:
                adain_params = adain_params[:, 2*m.num_features:]

def define_G(input_dim=3, output_dim=3, ndf=32):

    net_decoder = decoder(output_dim, ndf)
    net_encoder_polar = encoder(input_dim, ndf, get_num_adain_params(net_decoder))
    net_encoder_vis = encoder(input_dim, ndf, get_num_adain_params(net_decoder))

    # net_decoder = torch.nn.DataParallel(net_decoder).cuda()
    # net_encoder_polar = torch.nn.DataParallel(net_encoder_polar).cuda()
    # net_encoder_vis = torch.nn.DataParallel(net_encoder_vis).cuda()

    return net_encoder_polar, net_encoder_vis, net_decoder


class encoder(nn.Module):
    def __init__(self, input_dim, ndf=32, h_dim=256):
        super(encoder, self).__init__()

        self.conv = nn.Sequential(
            convblock(input_dim, ndf, 5, 1, 2),   # 128
            convblock(ndf, 2 * ndf, 3, 2, 1),     # 64
            convblock(2 * ndf, 4 * ndf, 3, 2, 1), # 32
            convblock(4 * ndf, 8 * ndf, 3, 2, 1), # 16
            convblock(8 * ndf, 8 * ndf, 3, 2, 1), # 8
            convblock(8 * ndf, 8 * ndf, 3, 2, 1)  # 4
        )

        self.fc_enc = nn.Linear(8 * ndf * 8 * 8, 256)

        self.fc_style = nn.Sequential(
            nn.Linear(256, 1024),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Linear(1024, 2048),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Linear(2048, h_dim)
        )

    def forward(self, x, state='enc'):
        if state == 'style':
            x = F.normalize(x, p=2, dim=1)
            style = self.fc_style(x)
            return style

        elif state == 'enc':
            x = self.conv(x)
            x = x.view(x.size(0), -1)
            x = self.fc_enc(x)
            return F.normalize(x, p=2, dim=1)


class decoder(nn.Module):
    def __init__(self, output_dim=3, ndf=32):
        super(decoder, self).__init__()

        self.fc = nn.Sequential(
            nn.Linear(256+256, 4 * ndf * 8 * 8),
            nn.LeakyReLU(0.2, inplace=True)
        )

        self.conv = nn.Sequential(
            deconvblock(4 * ndf, 4 * ndf, 2, 2, 0),  # 8
            G_resblock(4 * ndf, 4 * ndf),
            deconvblock(4 * ndf, 4 * ndf, 2, 2, 0),  # 16
            G_resblock(4 * ndf, 4 * ndf),
            deconvblock(4 * ndf, 2 * ndf, 2, 2, 0),  # 32
            G_resblock(2 * ndf, 2 * ndf),
            deconvblock(2 * ndf, 2 * ndf, 2, 2, 0),  # 64
            G_resblock(2 * ndf, 2 * ndf),
            deconvblock(2 * ndf, ndf, 2, 2, 0, norm='adain'),  # 128
            G_resblock(ndf, ndf, norm='adain'),
            convblock(ndf, ndf, 3, 1, 1, norm='adain'),
            G_resblock(ndf, ndf, norm='adain'),
            convblock(ndf, ndf, 3, 1, 1, norm='adain')
        )

        self.polar_output = nn.Conv2d(ndf, output_dim, 1, 1, 0)
        self.vis_output = nn.Conv2d(ndf, output_dim, 1, 1, 0)

    def forward(self, x, modality='polar'):
        assign_adain_params(x[:,512:],self)
        x = x[:,:512]
        x = self.fc(x)
        x = x.view(x.size(0), -1, 8, 8)
        x = self.conv(x)

        if modality == 'polar':
            x = self.polar_output(x)
        elif modality == 'vis':
            x = self.vis_output(x)
        return torch.sigmoid(x)


# basic module
class G_resblock(nn.Module):
    def __init__(self, input_dim, output_dim, norm='in'):
        super(G_resblock, self).__init__()

        self.conv1 = convblock(input_dim, output_dim, 3, 1, 1, norm)
        self.conv2 = convblock(output_dim, output_dim, 3, 1, 1, norm)

    def forward(self, x):
        y = self.conv1(x)
        y = self.conv2(y)
        return x + y

class convblock(nn.Module):
    def __init__(self, input_dim, output_dim, kernel_size, stride, padding, norm='in'):
        super(convblock, self).__init__()

        self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, padding)

        if norm == 'bn':
            self.norm = nn.BatchNorm2d(output_dim)
        elif norm == 'in':
            self.norm = nn.InstanceNorm2d(output_dim)
        elif norm == 'adain':
            self.norm = AdaptiveInstanceNorm2d(output_dim)

        self.activation = nn.LeakyReLU(0.2, inplace=True)

    def forward(self, x):
        x = self.conv(x)
        x = self.norm(x)
        x = self.activation(x)
        return x

class deconvblock(nn.Module):
    def __init__(self, input_dim, output_dim, kernel_size, stride, padding, norm='in'):
        super(deconvblock, self).__init__()

        self.conv = nn.ConvTranspose2d(input_dim, output_dim, kernel_size, stride, padding)

        if norm == 'bn':
            self.norm = nn.BatchNorm2d(output_dim)
        elif norm == 'in':
            self.norm = nn.InstanceNorm2d(output_dim)
        elif norm == 'adain':
            self.norm = AdaptiveInstanceNorm2d(output_dim)

        self.activation = nn.LeakyReLU(0.2, inplace=True)

    def forward(self, x):
        x = self.conv(x)
        x = self.norm(x)
        x = self.activation(x)
        return x

# AdaIN from: https://github.com/NVlabs/MUNIT
def get_num_adain_params(model):
    # return the number of AdaIN parameters needed by the model
    num_adain_params = 0
    for m in model.modules():
        if m.__class__.__name__ == "AdaptiveInstanceNorm2d":
            num_adain_params += 2*m.num_features
    return num_adain_params

class AdaptiveInstanceNorm2d(nn.Module):
    def __init__(self, num_features, eps=1e-5, momentum=0.1):
        super(AdaptiveInstanceNorm2d, self).__init__()
        self.num_features = num_features
        self.eps = eps
        self.momentum = momentum
        # weight and bias are dynamically assigned
        self.weight = None
        self.bias = None
        # just dummy buffers, not used
        self.register_buffer('running_mean', torch.zeros(num_features))
        self.register_buffer('running_var', torch.ones(num_features))

    def forward(self, x):
        assert self.weight is not None and self.bias is not None, "Please assign weight and bias before calling AdaIN!"
        b, c = x.size(0), x.size(1)
        running_mean = self.running_mean.repeat(b)
        running_var = self.running_var.repeat(b)

        # Apply instance norm
        x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:])

        out = F.batch_norm(
            x_reshaped, running_mean, running_var, self.weight, self.bias,
            True, self.momentum, self.eps)

        return out.view(b, c, *x.size()[2:])

    def __repr__(self):
        return self.__class__.__name__ + '(' + str(self.num_features) + ')'



def define_ID():
    net_dec = dec()
    # net_dec = torch.nn.DataParallel(net_dec).cuda()
    return net_dec


class dec(nn.Module):
    def __init__(self):
        super(dec, self).__init__()

        self.fc = nn.Sequential(
            nn.Linear(256, 1024),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Linear(1024, 1024),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Linear(1024, 512),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Linear(512, 256)
        )

    def forward(self, x):
        x = self.fc(x)
        return F.normalize(x, p=2, dim=1)

def LightCNN_29v2(opt):#(num_classes=8, is_train=True):
    net = network_29layers_v2(resblock, [1, 2, 3, 4], opt.input_nc, opt.ndf, opt.output_nc, opt.isTrain, num_classes=opt.num_classes,cm = opt.cm)
    # net = torch.nn.DataParallel(net).cuda()
    return init_net(net,gpu_ids=opt.gpu_ids)


class mfm(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, type=1):
        super(mfm, self).__init__()
        self.out_channels = out_channels
        if type == 1:
            self.filter = nn.Conv2d(in_channels, 2 * out_channels, kernel_size=kernel_size, stride=stride,
                                    padding=padding)
        else:
            self.filter = nn.Linear(in_channels, 2 * out_channels)

    def forward(self, x):
        x = self.filter(x)
        out = torch.split(x, self.out_channels, 1)
        return torch.max(out[0], out[1])


class group(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
        super(group, self).__init__()
        self.conv_a = mfm(in_channels, in_channels, 1, 1, 0)
        self.conv = mfm(in_channels, out_channels, kernel_size, stride, padding)

    def forward(self, x):
        x = self.conv_a(x)
        x = self.conv(x)
        return x


class resblock(nn.Module):
    def __init__(self, in_channels, out_channels):
        super(resblock, self).__init__()
        self.conv1 = mfm(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
        self.conv2 = mfm(out_channels, out_channels, kernel_size=3, stride=1, padding=1)

    def forward(self, x):
        res = x
        out = self.conv1(x)
        out = self.conv2(out)
        out = out + res
        return out


class network_29layers_v2(nn.Module):
    def __init__(self, block, layers, input_nc=1,ndf=128,output_nc=256,is_train=False,num_classes=80013,cm = False):
        super(network_29layers_v2, self).__init__()
        self.is_train = is_train
        self.conv1 = mfm(input_nc, 48, 5, 1, 2)
        self.block1 = self._make_layer(block, layers[0], 48, 48)
        self.group1 = group(48, 96, 3, 1, 1)
        self.block2 = self._make_layer(block, layers[1], 96, 96)
        self.group2 = group(96, 192, 3, 1, 1)
        self.block3 = self._make_layer(block, layers[2], 192, 192)
        self.group3 = group(192, 128, 3, 1, 1)
        self.block4 = self._make_layer(block, layers[3], 128, 128)
        self.group4 = group(128, ndf, 3, 1, 1)
        self.fc = nn.Linear(16 * 16 * ndf, output_nc)

        # if self.is_train:
        self.fc2_ = nn.Linear(output_nc, num_classes, bias=False)
        # Cross modal mapping
        self.cm = cm
        if cm:
            self.fcCM_map = nn.Linear(output_nc, output_nc, bias=False)

    def _make_layer(self, block, num_blocks, in_channels, out_channels):
        layers = []
        for i in range(0, num_blocks):
            layers.append(block(in_channels, out_channels))
        return nn.Sequential(*layers)

    def forward(self, x):
        x = self.conv1(x)
        x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)

        x = self.block1(x)
        x = self.group1(x)
        x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)

        x = self.block2(x)
        x = self.group2(x)
        x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)

        x = self.block3(x)
        x = self.group3(x)
        x = self.block4(x)
        x = self.group4(x)
        x = F.max_pool2d(x, 2) + F.avg_pool2d(x, 2)

        x = x.view(x.size(0), -1)
        fc = self.fc(x)

        # if self.is_train:
        x = F.dropout(fc, training=self.training)
        out = self.fc2_(x)
        fc_out = {}
        fc_out['fc'] = F.normalize(fc, p=2, dim=1)
        if self.cm:
            fc_CM = self.fcCM_map(fc)
            fc_out['fc_CM'] = F.normalize(fc_CM, p=2, dim=1)
        
        
        
        return out, fc_out
        # else:
        #     return F.normalize(fc, p=2, dim=1)
        
# def load_model(model, pretrained):
#     weights = torch.load(pretrained)
#     pretrained_dict = weights["state_dict"]
#     model_dict = model.state_dict()
#     # 1. filter out unnecessary keys
#     pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
#     # 2. overwrite entries in the existing state dict
#     model_dict.update(pretrained_dict)
#     # 3. load the new state dict
#     model.load_state_dict(model_dict)

def set_requires_grad(nets, requires_grad=False):
    if not isinstance(nets, list):
        nets = [nets]
    for net in nets:
        if net is not None:
            for param in net.parameters():
                param.requires_grad = requires_grad

# def get_model():
#     # lightCNN = LightCNN_29v2(is_train=False)
#     # if os.path.exists('datasets/datasetp/pre_train/dvg/LightCNN_29Layers_V2_checkpoint.pth.tar'):
#     #     # load_model(lightCNN, 'datasets/datasetp/dvg/checkpoints/pre_train/LightCNN_29Layers_V2_checkpoint.pth.tar')
#     #     checkpoint = torch.load('datasets/datasetp/pre_train/dvg/LightCNN_29Layers_V2_checkpoint.pth.tar')
#     #     lightCNN.load_state_dict(checkpoint["state_dict"])
#     # set_requires_grad([lightCNN], False)
#     # lightCNN.eval()
#     s0lightCNN = LightCNN_29v2(num_classes=76,is_train=True)
#     s02labelpth = 'datasets/datasetp/multi_gen/checkpoints/LightCNN_pretrain/s02label/best.pth'
#     if os.path.exists(s02labelpth):
#         checkpoint = torch.load(s02labelpth,map_location='cpu')
#         s0lightCNN.load_state_dict(checkpoint["state_dict"])
#         # load_model(dolplightCNN, )
#     set_requires_grad([s0lightCNN], False)
#     s0lightCNN.eval()
    
#     dolp2labelpth='datasets/datasetp/multi_gen/checkpoints/LightCNN_pretrain/dolp2label/best.pth'
#     dolplightCNN = LightCNN_29v2(num_classes=76,is_train=True)
#     if os.path.exists(dolp2labelpth):
#         checkpoint = torch.load(dolp2labelpth,map_location='cpu')
#         dolplightCNN.load_state_dict(checkpoint["state_dict"])
#         # load_model(dolplightCNN, )
#     set_requires_grad([dolplightCNN], False)
#     dolplightCNN.eval()
    
#     dec = define_ID()
#     if os.path.exists('datasets/datasetp/pre_train/dvg/dec_epoch_45.pth.tar'):
#         # load_model(dec, 'datasets/datasetp/dvg/checkpoints/pre_train/dec_epoch_45.pth.tar')
#         checkpoint = torch.load('datasets/datasetp/pre_train/dvg/dec_epoch_45.pth.tar',map_location='cpu')
#     set_requires_grad([dec], False)
#     dec.eval()
    
#     encoder_polar, encoder_vis, decoder = define_G(input_dim=3, output_dim=3, ndf=32)
#     return {
#         "s0lightCNN":s0lightCNN,
#         "dolplightCNN":dolplightCNN,
#         # "lightCNN":lightCNN,
#         "dec":dec,
#         "encoder_polar":encoder_polar,
#         "encoder_vis":encoder_vis,
#         "decoder":decoder,
#     }

# def load_model(model,pth_path):
#     checkpoint=None
#     if os.path.exists(pth_path):
#         checkpoint = torch.load(os.path.join(pth_path),map_location='cpu')
#         model['encoder_polar'].load_state_dict(checkpoint['encoder_polar_state_dict'])
#         model['encoder_vis'].load_state_dict(checkpoint['encoder_vis_state_dict'])
#         model['decoder'].load_state_dict(checkpoint['decoder_state_dict'])
#     return checkpoint
# def get_loss():
#     criterionPix = torch.nn.L1Loss()
#     return criterionPix
# def get_npoints():
#     return None
# def predeal(data,config=None):
#     T = transforms.ToTensor()
#     for key in data.keys():
#         if key == 'label':
#             continue
#         data[key] = T(data[key]).unsqueeze(0)
#     return data
# def resdeal(data,config=None):
#     T = transforms.ToPILImage()
#     # data = data.permute(0,2,3,1)[0].detach().cpu().numpy()[:,:,0]
#     # data = (255*(data-data.min())/(data.max()-data.min())).astype(np.uint8)[:,:,0]
#     for key in data.keys():
#         data[key] = cv2.cvtColor(np.asarray(T(data[key].cpu()[0,:,:,:])),cv2.COLOR_RGB2BGR)
#         filedirlist = ['outputs',config.datatsetstype[0],config.arch,config.category]
#         path = mkfiledir(filedirlist)
#         cv2.imwrite(os.path.join(path,key)+'.jpg',data[key])
#     return data['fake_polar']
# def execdeal(model,data,config=None):
        
#     for key in data.keys():
#         if key == 'label':
#             continue
#         data[key] = data[key].to(config.device)
    
#     id_vis = model['s0lightCNN'](rgb2gray(data[config.input]))[1]
#     noise = torch.zeros(data[config.input].shape[0], 256).normal_(0, 1).to(config.device)
#     id_noise = model['dec'](noise)
    
#     # arange = torch.arange(data[config.input].shape[0]).to(config.device)
#     # idx = torch.randperm(data[config.input].shape[0]).to(config.device)
#     # while 0.0 in (idx - arange):
#     #     idx = torch.randperm(data[config.input].shape[0]).to(config.device)
    
#     z_polar = model['encoder_polar'](data[config.output], "enc")
#     z_vis = model['encoder_vis'](data[config.input], "enc")
    
#     style_polar = model['encoder_polar'](z_polar, "style")
#     style_vis = model['encoder_vis'](z_vis, "style")
    
#     fake_polar = model['decoder'](torch.cat([id_noise, z_polar,style_polar], dim=1), "polar")
    
#     rec_polar = model['decoder'](torch.cat([id_vis, z_polar,style_polar], dim=1), "polar")
#     # rec_polar_idx = model['decoder'](torch.cat([id_vis[idx, :], z_polar,style_polar], dim=1), "polar")
#     fake_polar = model['decoder'](torch.cat([id_noise, z_polar,style_polar], dim=1), "polar")

#     # assign_adain_params(style_vis, model['decoder'])
#     rec_vis = model['decoder'](torch.cat([id_vis, z_vis, style_vis], dim=1), "vis")
#     # rec_vis_idx = model['decoder'](torch.cat([id_vis[idx, :], z_vis, style_vis], dim=1), "vis")
#     fake_vis = model['decoder'](torch.cat([id_noise, z_vis, style_vis], dim=1), "vis")
    
#     data = {}
#     data['rec_polar'] = rec_polar
#     data['fake_polar'] = fake_polar
#     data['rec_vis'] = rec_vis
#     data['fake_vis'] = fake_vis
    
#     return data