import os
import pickle

import dnnlib
import legacy
import numpy as np
import torch
import torch.nn as nn
from segmentation.utils import eval_net_few_shot, init_get_representation
from segmentation.network import DownBlock, UpBlock, BiFPN
from torch.nn import Parameter


class Teacher(nn.Module):
    def __init__(self, batch_size, G_pkl_dir, SegNet_pkl_dir, return_repre=False, **kwargs):
        super(Teacher, self).__init__()
        which_repre_layers = [4, 8, 16, 32, 64, 128, 256]
        self.batch_size = batch_size
        self.return_repre = return_repre

        with dnnlib.util.open_url(os.path.join(G_pkl_dir)) as f:
            snapshot_data = legacy.load_network_pkl(f)
            self.G = snapshot_data['G_ema'].eval().requires_grad_(False)
            del snapshot_data

        self.get_representation = init_get_representation(self.G, which_repre_layers, 256, 'const')

        with open(os.path.join(SegNet_pkl_dir), 'rb') as f:
            self.net = pickle.load(f)['net'].eval().requires_grad_(False)
    
    def forward(self):
        z_samples = torch.randn(self.batch_size, self.G.z_dim).cuda()
        w_samples = self.G.mapping(z_samples, None)  # [N, L, C]

        image, representation = self.get_representation(w_samples, False)
        pred = self.net(representation)

        if self.return_repre:
            return image, pred, representation
        else:
            return image, pred, None


class Student(nn.Module):
    def __init__(self, in_ch=1, out_ch=3, return_repre=False, **kwargs):
        super(Student, self).__init__()
        self.out_ch = out_ch
        self.return_repre = return_repre

        channels_of_layer = {4:512, 8:512, 16:512, 32:512, 64:256, 128:128, 256:64}
        in_chs=[in_ch, 64,  128, 256, 512, 512, 512, 1024, 1024, 1024, 768, 384, 192]
        out_chs = [64, 128, 256, 512, 512, 512, 512, 512,  512,  512,  256, 128, 64]
        #           256  128  64   32   16   8    4    8     16    32    64   128  256
        print('using UNET')

        # in_chs=[in_ch, 64,  128, 256, 512, 1024, 512, 256, 128]
        # out_chs = [64, 128, 256, 512, 512, 256,  128, 64,  64]
        #         # 256  128  64   32   16   32    64   128  256
        # print('using unet')

        self.downBlocks = nn.ModuleList()
        self.upBlocks = nn.ModuleList()

        nBlock = (len(in_chs)+1)//2
        for i in range(nBlock):
            self.downBlocks += [DownBlock(in_chs[i], out_chs[i], is_first_block=i==0, nConv=2, kernel_size=3, stride=1, padding=1)]
        for i in range(nBlock, len(in_chs)):
            self.upBlocks += [UpBlock(in_chs[i], out_chs[i], nConv=2, kernel_size=3, stride=1, padding=1)]
        
        self.BiFPN = BiFPN(out_ch=out_ch, height=nBlock, n_block=5, chns=out_chs[-nBlock:])

        self.param_count = 0
        for p in self.parameters():
            self.param_count += p.numel()
        print(f'Student param_count: {self.param_count}')

    def forward(self, x):
        x_skip = []
        i = 0
        for downBlock in self.downBlocks:
            x = downBlock(x)
            x_skip.append(x)
            i += 1
        for upBlock in self.upBlocks:
            x_skip[i-2] = upBlock(x_skip[i-1], x_skip[i-2])
            i -= 1

        x_skip = x_skip[::-1]

        if self.return_repre:
            return self.BiFPN(x_skip), x_skip
        else:
            return self.BiFPN(x_skip), None
            

class DownBlock(nn.Module):
    def __init__(self, in_ch, out_ch, is_first_block=False, nConv=2, kernel_size=3, stride=1, padding=1, do_batchnorm=True):
        super(DownBlock, self).__init__()

        block = [] if is_first_block else [nn.MaxPool2d(kernel_size=2)]
        for _ in range(nConv):
            block += [nn.Conv2d(in_ch, out_ch, kernel_size, stride, padding)]
            block += [nn.BatchNorm2d(out_ch)] if do_batchnorm else []
            block += [nn.ReLU(inplace=True)]
            in_ch = out_ch
            
        self.block = nn.Sequential(*block)

    def forward(self, x):
        return self.block(x)


class UpBlock(nn.Module):
    def __init__(self, in_ch, out_ch, nConv=2, kernel_size=3, stride=1, padding=1, do_batchnorm=True):
        super(UpBlock, self).__init__()

        out_chs = []
        out_ch_ = in_ch
        for n in range(nConv):
            out_ch_ = out_ch_//2
            if out_ch_ >= out_ch: out_chs.append(out_ch_)
            else: out_chs.append(out_ch)

        block = []
        for n in range(nConv):
            block += [nn.Conv2d(in_ch, out_chs[n], kernel_size, stride, padding)]
            block += [nn.BatchNorm2d(out_chs[n])] if do_batchnorm else []
            block += [nn.ReLU(inplace=True)]
            in_ch = out_chs[n]
            
        self.block = nn.Sequential(*block)

    def forward(self, x, x_skip):
        x = nn.UpsamplingBilinear2d(scale_factor=2)(x)
        x = torch.cat([x, x_skip], dim=1)
        return self.block(x)


class UNet(nn.Module):
    def __init__(self, in_ch=3, out_ch=1, nConv=2, nBlock=5, ch=64, kernel_size=3, stride=1, padding=1, **kwargs):
        super(UNet, self).__init__()
        self.nBlock = nBlock
        self.out_ch = out_ch

        in_chs = [in_ch] + [ch*2**n for n in range(nBlock-1)] + [ch*2**n for n in range(nBlock-1,0,-1)] + [ch]
        out_chs = [ch*2**n for n in range(nBlock-1)] + [ch*2**n for n in range(nBlock-2,-1,-1)] + [ch, out_ch]
        # [3,  64,  128, 256, 512, 1024, 512, 256, 128, 64]
        # [64, 128, 256, 512, 512, 256,  128, 64,  64,  10]

        self.downBlocks, self.upBlocks = [], []
        for i in range(nBlock):
            self.downBlocks += [DownBlock(in_chs[i], out_chs[i], is_first_block=i==0, nConv=nConv, kernel_size=kernel_size, stride=stride, padding=padding)]
        for i in range(nBlock-1):
            self.upBlocks += [UpBlock(in_chs[i+nBlock], out_chs[i+nBlock], nConv=nConv, kernel_size=kernel_size, stride=stride, padding=padding)]
        
        self.downBlocks = nn.ModuleList(self.downBlocks)
        self.upBlocks = nn.ModuleList(self.upBlocks)
        self.last_conv = nn.Conv2d(in_chs[-1], out_chs[-1], 1)

        # initialise weights
        # for m in self.modules():
        #     if isinstance(m, nn.Conv2d):
        #         init_weights(m, init_type='kaiming')
        #     elif isinstance(m, nn.BatchNorm2d):
        #         init_weights(m, init_type='kaiming')

        # parameters count
        self.param_count = 0
        for p in self.parameters():
            self.param_count += p.numel()
        print(f'U-Net param_count: {self.param_count}')

    def forward(self, x):
        x_skip = []
        i = 0
        for downBlock in self.downBlocks:
            x = downBlock(x)
            if i<self.nBlock-1:
                x_skip.append(x)
            i += 1
        for upBlock in self.upBlocks:
            x = upBlock(x, x_skip.pop())
            
        return self.last_conv(x), None

# =========================================================

def l2normalize(v, eps=1e-12):
    return v / (v.norm() + eps)

class SpectralNorm(nn.Module):
    def __init__(self, module, name='weight', power_iterations=1):
        super(SpectralNorm, self).__init__()
        self.module = module
        self.name = name
        self.power_iterations = power_iterations
        if not self._made_params():
            self._make_params()

    def _update_u_v(self):
        u = getattr(self.module, self.name + "_u")
        v = getattr(self.module, self.name + "_v")
        w = getattr(self.module, self.name + "_bar")

        height = w.data.shape[0]
        for _ in range(self.power_iterations):
            v.data = l2normalize(torch.mv(torch.t(w.view(height,-1).data), u.data))
            u.data = l2normalize(torch.mv(w.view(height,-1).data, v.data))

        # sigma = torch.dot(u.data, torch.mv(w.view(height,-1).data, v.data))
        sigma = u.dot(w.view(height, -1).mv(v))
        setattr(self.module, self.name, w / sigma.expand_as(w))

    def _made_params(self):
        try:
            u = getattr(self.module, self.name + "_u")
            v = getattr(self.module, self.name + "_v")
            w = getattr(self.module, self.name + "_bar")
            return True
        except AttributeError:
            return False

    def _make_params(self):
        w = getattr(self.module, self.name)

        height = w.data.shape[0]
        width = w.view(height, -1).data.shape[1]

        u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
        v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)
        u.data = l2normalize(u.data)
        v.data = l2normalize(v.data)
        w_bar = Parameter(w.data)

        del self.module._parameters[self.name]

        self.module.register_parameter(self.name + "_u", u)
        self.module.register_parameter(self.name + "_v", v)
        self.module.register_parameter(self.name + "_bar", w_bar)

    def forward(self, *args):
        self._update_u_v()
        return self.module.forward(*args)

class Self_Attn(nn.Module):
    """ Self attention Layer"""
    def __init__(self,in_dim,activation):
        super(Self_Attn,self).__init__()
        self.chanel_in = in_dim
        self.activation = activation
        
        self.query_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim//8 , kernel_size= 1)
        self.key_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim//8 , kernel_size= 1)
        self.value_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim , kernel_size= 1)
        self.gamma = nn.Parameter(torch.zeros(1))

        self.softmax  = nn.Softmax(dim=-1) #
    def forward(self,x):
        """
            inputs :
                x : input feature maps( B X C X W X H)
            returns :
                out : self attention value + input feature 
                attention: B X N X N (N is Width*Height)
        """
        m_batchsize,C,width ,height = x.size()
        proj_query  = self.query_conv(x).view(m_batchsize,-1,width*height).permute(0,2,1) # B X CX(N)
        proj_key =  self.key_conv(x).view(m_batchsize,-1,width*height) # B X C x (*W*H)
        energy =  torch.bmm(proj_query,proj_key) # transpose check
        attention = self.softmax(energy) # BX (N) X (N) 
        proj_value = self.value_conv(x).view(m_batchsize,-1,width*height) # B X C X N

        out = torch.bmm(proj_value,attention.permute(0,2,1) )
        out = out.view(m_batchsize,C,width,height)
        
        out = self.gamma*out + x
        return out,attention

class Generator(nn.Module):
    """Generator."""

    def __init__(self, batch_size, image_size=64, z_dim=100, conv_dim=64):
        super(Generator, self).__init__()
        self.imsize = image_size
        layer1 = []
        layer2 = []
        layer3 = []
        last = []

        repeat_num = int(np.log2(self.imsize)) - 3
        mult = 2 ** repeat_num # 8
        layer1.append(SpectralNorm(nn.ConvTranspose2d(z_dim, conv_dim * mult, 4)))
        layer1.append(nn.BatchNorm2d(conv_dim * mult))
        layer1.append(nn.ReLU())

        curr_dim = conv_dim * mult

        layer2.append(SpectralNorm(nn.ConvTranspose2d(curr_dim, int(curr_dim / 2), 4, 2, 1)))
        layer2.append(nn.BatchNorm2d(int(curr_dim / 2)))
        layer2.append(nn.ReLU())

        curr_dim = int(curr_dim / 2)

        layer3.append(SpectralNorm(nn.ConvTranspose2d(curr_dim, int(curr_dim / 2), 4, 2, 1)))
        layer3.append(nn.BatchNorm2d(int(curr_dim / 2)))
        layer3.append(nn.ReLU())

        if self.imsize == 64:
            layer4 = []
            curr_dim = int(curr_dim / 2)
            layer4.append(SpectralNorm(nn.ConvTranspose2d(curr_dim, int(curr_dim / 2), 4, 2, 1)))
            layer4.append(nn.BatchNorm2d(int(curr_dim / 2)))
            layer4.append(nn.ReLU())
            self.l4 = nn.Sequential(*layer4)
            curr_dim = int(curr_dim / 2)

        self.l1 = nn.Sequential(*layer1)
        self.l2 = nn.Sequential(*layer2)
        self.l3 = nn.Sequential(*layer3)

        last.append(nn.ConvTranspose2d(curr_dim, 3, 4, 2, 1))
        last.append(nn.Tanh())
        self.last = nn.Sequential(*last)

        self.attn1 = Self_Attn( 128, 'relu')
        self.attn2 = Self_Attn( 64,  'relu')

    def forward(self, z):
        z = z.view(z.size(0), z.size(1), 1, 1)
        out=self.l1(z)
        out=self.l2(out)
        out=self.l3(out)
        out,p1 = self.attn1(out)
        out=self.l4(out)
        out,p2 = self.attn2(out)
        out=self.last(out)

        return out, p1, p2


class Discriminator(nn.Module):
    """Discriminator, Auxiliary Classifier."""

    def __init__(self, preprocess_GAN_mode, input_channel, batch_size=64, image_size=64, conv_dim=64):
        super(Discriminator, self).__init__()
        self.imsize = image_size
        layer1 = []
        layer2 = []
        layer3 = []
        layer4 = []
        last = []

        #layer1.append(SpectralNorm(nn.Conv2d(3, conv_dim, 4, 2, 1)))
        layer1.append(SpectralNorm(nn.Conv2d(input_channel, conv_dim, 4, 2, 1)))
        # layer1.append(nn.Conv2d(input_channel, conv_dim, 4, 2, 1))
        layer1.append(nn.LeakyReLU(0.1))

        curr_dim = conv_dim

        layer2.append(SpectralNorm(nn.Conv2d(curr_dim, curr_dim * 2, 4, 2, 1)))
        # layer2.append(nn.Conv2d(curr_dim, curr_dim * 2, 4, 2, 1))
        layer2.append(nn.LeakyReLU(0.1))
        curr_dim = curr_dim * 2

        layer3.append(SpectralNorm(nn.Conv2d(curr_dim, curr_dim * 2, 4, 2, 1)))
        # layer3.append(nn.Conv2d(curr_dim, curr_dim * 2, 4, 2, 1))
        layer3.append(nn.LeakyReLU(0.1))
        curr_dim = curr_dim * 2

        layer4.append(SpectralNorm(nn.Conv2d(curr_dim, curr_dim * 2, 4, 2, 1)))
        layer4.append(nn.LeakyReLU(0.1))
        curr_dim = curr_dim * 2

        self.l1 = nn.Sequential(*layer1)
        self.l2 = nn.Sequential(*layer2)
        self.l3 = nn.Sequential(*layer3)
        self.l4 = nn.Sequential(*layer4)

        last.append(SpectralNorm(nn.Conv2d(curr_dim, curr_dim, 4, 2, 1)))
        last.append(nn.LeakyReLU(0.1))
        last.append(SpectralNorm(nn.Conv2d(curr_dim, curr_dim, 4, 2, 1)))
        last.append(nn.LeakyReLU(0.1))
        last.append(nn.Conv2d(curr_dim, 1, 4))
        self.last = nn.Sequential(*last)

        self.attn1 = Self_Attn(256, 'relu')
        self.attn2 = Self_Attn(512, 'relu')

        if preprocess_GAN_mode == 1: #'bn':
            self.preprocess_additional = nn.BatchNorm2d(input_channel)
        elif preprocess_GAN_mode == 2: #'tanh':
            self.preprocess_additional = nn.Tanh()
        elif preprocess_GAN_mode == 3:
            self.preprocess_additional = lambda x: 2*(x/255 - 0.5)
        else:
            raise ValueError('preprocess_GAN_mode should be 1:bn or 2:tanh or 3:-1 - 1')

        # parameters count
        self.param_count = 0
        for p in self.parameters():
            self.param_count += p.numel()
        print(f'D param_count: {self.param_count}')

    def forward(self, x):
        #import pdb;pdb.set_trace()
        x = self.preprocess_additional(x)
        out = self.l1(x)
        out = self.l2(out)
        out = self.l3(out)
        out,p1 = self.attn1(out)
        out=self.l4(out)
        out,p2 = self.attn2(out)
        out=self.last(out)

        #return [out.squeeze(), p1, p2]
        return [out, p1, p2]

if __name__ == '__main__':
    D_model = Discriminator(4, 65, 64)
    img = torch.tensor.zeros((4, 3, 65, 65))
    out = D_model(img)
    print(out.shape)
