from time import time

import torch
import torch.nn as nn
import torch.optim
import numpy as np
import utils
from model_tools import *
import torch.distributions as dists
# from tqdm import tqdm

import FrEIA.framework as Ff
import FrEIA.modules as Fm

torch.manual_seed(1234)
np.random.seed(42)


class CondNet2(nn.Module):
    '''conditioning network'''
    def __init__(self, cond_shape, cond_layer_thicknesses=[64, 128, 128, 512]):
        super().__init__()

        ct = cond_layer_thicknesses

        class Flatten(nn.Module):
            def __init__(self, *args):
                super().__init__()
            def forward(self, x):
                return x.view(x.shape[0], -1)

        self.resolution_levels = nn.ModuleList([
                           nn.Sequential(nn.Conv2d(3,  ct[0], 3, padding=1),
                                         nn.LeakyReLU(),
                                         nn.Conv2d(ct[0], ct[0], 3, padding=1)),

                           nn.Sequential(nn.LeakyReLU(),
                                         nn.Conv2d(ct[0], ct[1], 3, padding=1),
                                         nn.LeakyReLU(),
                                         nn.Conv2d(ct[1], ct[1], 3, padding=1, stride=2)),

                           nn.Sequential(nn.LeakyReLU(),
                                         nn.Conv2d(ct[1], ct[2], 3, padding=1, stride=2)),

                           nn.Sequential(nn.LeakyReLU(),
                                         nn.AvgPool2d(4),
                                         Flatten(),
                                         nn.Linear(ct[2] * cond_shape[1] // 16 * cond_shape[2] // 16, ct[3]))])

    def forward(self, c):
        outputs = [c]
        for m in self.resolution_levels:
            outputs.append(m(outputs[-1]))
        return outputs[1:]


class CondConvINN2(nn.Module):
    '''cINN, including the ocnditioning network'''
    def __init__(self, 
        input_shape             = [3,64,64], 
        cond_shape              = [3,64,64], 
        num_conv_layers         = [2,4,4], 
        num_fc_layers           = [4], 
        cond_layer_thicknesses  = [64, 128, 128, 512],
        device                  = None):

        super().__init__()

        self.device = device if device != None else DEVICE
        self.input_shape = input_shape
        self.cond_shape = cond_shape
        self.num_conv_layers = num_conv_layers
        self.num_fc_layers = num_fc_layers
        self.cond_layer_thicknesses = cond_layer_thicknesses
        # nodes = [Ff.InputNode(*input_shape, name='cond_input')]
        # ndim_x = np.prod(input_shape)

        self.model = self.build_inn().to(device)
        self.trainable_parameters = [p for p in self.model.parameters() if p.requires_grad]
        for p in self.trainable_parameters: p.data = 0.02 * torch.randn_like(p)
        self.cond_net = CondNet2(cond_shape=self.cond_shape, cond_layer_thicknesses=cond_layer_thicknesses).to(self.device)
        self.trainable_parameters += list(self.cond_net.parameters())
        self.identifier = "CondConvINN2-" + '-'.join(['{}']*(len(num_fc_layers) + len(num_conv_layers))).format(*num_conv_layers, *num_fc_layers)

        # define the latent variable distribution 
        self.latent_dist = dists.normal.Normal(loc=torch.tensor(0, dtype=torch.float32).to(self.device), scale=torch.tensor(1, dtype=torch.float32).to(self.device))

    def build_inn(self):

        input_shape = self.input_shape
        ct = self.cond_layer_thicknesses

        def sub_conv(ch_hidden, kernel):
            pad = kernel // 2
            return lambda ch_in, ch_out: nn.Sequential(
                                            nn.Conv2d(ch_in, ch_hidden, kernel, padding=pad),
                                            nn.ReLU(),
                                            nn.Conv2d(ch_hidden, ch_out, kernel, padding=pad))

        def sub_fc(ch_hidden):
            return lambda ch_in, ch_out: nn.Sequential(
                                            nn.Linear(ch_in, ch_hidden),
                                            nn.ReLU(),
                                            nn.Linear(ch_hidden, ch_out))

        nodes = [Ff.InputNode(*input_shape)]
        # outputs of the cond. net at different resolution levels
        conditions = [Ff.ConditionNode(ct[0], input_shape[1], input_shape[2]),
                      Ff.ConditionNode(ct[1], input_shape[1] // 2, input_shape[2] // 2),
                      Ff.ConditionNode(ct[2], input_shape[1] // 4, input_shape[1] // 4),
                      Ff.ConditionNode(ct[3])]

        split_nodes = []

        subnet = sub_conv(32, 3)
        for k in range(self.num_conv_layers[0]):
            nodes.append(Ff.Node(nodes[-1], Fm.GLOWCouplingBlock,
                                 {'subnet_constructor':subnet, 'clamp':1.0},
                                 conditions=conditions[0]))

        nodes.append(Ff.Node(nodes[-1], Fm.HaarDownsampling, {'order_by_wavelet': True, 'rebalance':0.5}))

        for k in range(self.num_conv_layers[1]):
            subnet = sub_conv(64, 3 if k%2 else 1)

            nodes.append(Ff.Node(nodes[-1], Fm.GLOWCouplingBlock,
                                 {'subnet_constructor':subnet, 'clamp':1.0},
                                 conditions=conditions[1]))
            nodes.append(Ff.Node(nodes[-1], Fm.PermuteRandom, {'seed':k}))

        #split off 75% of the channels
        section_sizes = [ input_shape[0], 3*input_shape[0] ]
        nodes.append(Ff.Node(nodes[-1], Fm.Split,
                             {'section_sizes':section_sizes, 'dim':0}))
        split_nodes.append(Ff.Node(nodes[-1].out1, Fm.Flatten, {}))

        nodes.append(Ff.Node(nodes[-1], Fm.HaarDownsampling, {'order_by_wavelet': True, 'rebalance':0.5}))

        for k in range(self.num_conv_layers[2]):
            subnet = sub_conv(128, 3 if k%2 else 1)

            nodes.append(Ff.Node(nodes[-1], Fm.GLOWCouplingBlock,
                                 {'subnet_constructor':subnet, 'clamp':0.6},
                                 conditions=conditions[2]))
            nodes.append(Ff.Node(nodes[-1], Fm.PermuteRandom, {'seed':k}))

        #split off 50% ch
        section_sizes = [2*input_shape[0], 2*input_shape[0]]
        nodes.append(Ff.Node(nodes[-1], Fm.Split,
                             {'section_sizes':section_sizes, 'dim':0}))
        split_nodes.append(Ff.Node(nodes[-1].out1, Fm.Flatten, {}))
        nodes.append(Ff.Node(nodes[-1], Fm.Flatten, {}, name='flatten'))

        # fully_connected part
        subnet = sub_fc(512)
        for k in range(self.num_fc_layers[0]):
            nodes.append(Ff.Node(nodes[-1], Fm.GLOWCouplingBlock,
                                 {'subnet_constructor':subnet, 'clamp':0.6},
                                 conditions=conditions[3]))
            nodes.append(Ff.Node(nodes[-1], Fm.PermuteRandom, {'seed':k}))

        # concat everything
        nodes.append(Ff.Node([s.out0 for s in split_nodes] + [nodes[-1].out0],
                             Fm.Concat1d, {'dim':0}))
        nodes.append(Ff.OutputNode(nodes[-1]))

        return Ff.ReversibleGraphNet(nodes + split_nodes + conditions, verbose=False)

    def forward(self, input, cond):
        z, jac = self.model([input], self.cond_net(cond), jac=True)
        return z, jac

    def sample(self, num_samples, cond, temp=1.):
        # z = torch.randn(num_samples, np.prod(self.input_shape)).to(self.device) * temp
        z = self.latent_dist.sample([num_samples, np.prod(self.input_shape)]) * temp
        cond = torch.cat([cond]*num_samples)
        x,_ = self.model(z, self.cond_net(cond), rev=True)
        x = x.cpu().detach().numpy()
        return x

    def save(self, *args, **kwargs):
        torch.save(self.model.state_dict(), *args, **kwargs)


