from time import time

import torch
import torch.nn as nn
import torch.optim
import numpy as np
import utils
from model_tools import *
import torch.distributions as dists
# from tqdm import tqdm

import FrEIA.framework as Ff
import FrEIA.modules as Fm

torch.manual_seed(1234)
np.random.seed(42)

class ConvINN2(nn.Module):
    '''INN'''
    def __init__(self, 
        input_shape             = [3,64,64], 
        num_conv_layers         = [2,4,4], 
        num_fc_layers           = [4], 
        device                  = None):

        super().__init__()

        self.device = device if device != None else DEVICE
        self.input_shape = input_shape
        self.num_conv_layers = num_conv_layers
        self.num_fc_layers = num_fc_layers

        self.model = self.build_inn().to(device)
        self.trainable_parameters = [p for p in self.model.parameters() if p.requires_grad]
        for p in self.trainable_parameters: p.data = 0.02 * torch.randn_like(p)
        self.identifier = "ConvINN2-" + '-'.join(['{}']*(len(num_fc_layers) + len(num_conv_layers))).format(*num_conv_layers, *num_fc_layers)

        # define the latent variable distribution 
        self.latent_dist = dists.normal.Normal(loc=torch.tensor(0, dtype=torch.float32).to(self.device), scale=torch.tensor(1, dtype=torch.float32).to(self.device))

    def build_inn(self):

        input_shape = self.input_shape

        def sub_conv(ch_hidden, kernel):
            pad = kernel // 2
            return lambda ch_in, ch_out: nn.Sequential(
                                            nn.Conv2d(ch_in, ch_hidden, kernel, padding=pad),
                                            nn.ReLU(),
                                            nn.Conv2d(ch_hidden, ch_out, kernel, padding=pad))

        def sub_fc(ch_hidden):
            return lambda ch_in, ch_out: nn.Sequential(
                                            nn.Linear(ch_in, ch_hidden),
                                            nn.ReLU(),
                                            nn.Linear(ch_hidden, ch_out))

        nodes = [Ff.InputNode(*input_shape)]
        # outputs of the cond. net at different resolution levels

        split_nodes = []

        subnet = sub_conv(32, 3)
        for k in range(self.num_conv_layers[0]):
            nodes.append(Ff.Node(nodes[-1], Fm.GLOWCouplingBlock,
                                 {'subnet_constructor':subnet, 'clamp':1.0}))

        nodes.append(Ff.Node(nodes[-1], Fm.HaarDownsampling, {'order_by_wavelet': True, 'rebalance':0.5}))

        for k in range(self.num_conv_layers[1]):
            subnet = sub_conv(64, 3 if k%2 else 1)

            nodes.append(Ff.Node(nodes[-1], Fm.GLOWCouplingBlock,
                                 {'subnet_constructor':subnet, 'clamp':1.0}))
            nodes.append(Ff.Node(nodes[-1], Fm.PermuteRandom, {'seed':k}))

        #split off 75% of the channels
        section_sizes = [ input_shape[0], 3*input_shape[0] ]
        nodes.append(Ff.Node(nodes[-1], Fm.Split,
                             {'section_sizes':section_sizes, 'dim':0}))
        split_nodes.append(Ff.Node(nodes[-1].out1, Fm.Flatten, {}))

        nodes.append(Ff.Node(nodes[-1], Fm.HaarDownsampling, {'order_by_wavelet': True, 'rebalance':0.5}))

        for k in range(self.num_conv_layers[2]):
            subnet = sub_conv(128, 3 if k%2 else 1)

            nodes.append(Ff.Node(nodes[-1], Fm.GLOWCouplingBlock,
                                 {'subnet_constructor':subnet, 'clamp':0.6}))
            nodes.append(Ff.Node(nodes[-1], Fm.PermuteRandom, {'seed':k}))

        #split off 50% ch
        section_sizes = [2*input_shape[0], 2*input_shape[0]]
        nodes.append(Ff.Node(nodes[-1], Fm.Split,
                             {'section_sizes':section_sizes, 'dim':0}))
        split_nodes.append(Ff.Node(nodes[-1].out1, Fm.Flatten, {}))
        nodes.append(Ff.Node(nodes[-1], Fm.Flatten, {}, name='flatten'))

        # fully_connected part
        subnet = sub_fc(512)
        for k in range(self.num_fc_layers[0]):
            nodes.append(Ff.Node(nodes[-1], Fm.GLOWCouplingBlock,
                                 {'subnet_constructor':subnet, 'clamp':0.6}))
            nodes.append(Ff.Node(nodes[-1], Fm.PermuteRandom, {'seed':k}))

        # concat everything
        nodes.append(Ff.Node([s.out0 for s in split_nodes] + [nodes[-1].out0],
                             Fm.Concat1d, {'dim':0}))
        nodes.append(Ff.OutputNode(nodes[-1]))

        return Ff.ReversibleGraphNet(nodes + split_nodes, verbose=False)

    def forward(self, input):
        z, jac = self.model([input], jac=True)
        return z, jac

    def sample(self, num_samples, temp=1.):
        # z = torch.randn(num_samples, np.prod(self.input_shape)).to(self.device) * temp
        z = self.latent_dist.sample([num_samples, np.prod(self.input_shape)]) * temp
        x,_ = self.model(z, rev=True)
        x = x.cpu().detach().numpy()
        return x

    def save(self, *args, **kwargs):
        torch.save(self.model.state_dict(), *args, **kwargs)


class Glow(nn.Module):
    def __init__(
        self, input_shape, num_flow, num_block, 
        filter_size     = 512, 
        affine          = True, 
        conv_lu         = True, 
        device          = None,
    ):
        super().__init__()

        self.device = device if device != None else DEVICE
        self.blocks = nn.ModuleList()
        self.input_shape = input_shape
        self.num_flow = num_flow
        self.num_block = num_block
        n_channel = input_shape[0]
        for i in range(num_block - 1):
            self.blocks.append(InvBlock(n_channel, num_flow, affine=affine, filter_size=filter_size, conv_lu=conv_lu))
            n_channel *= 2
        self.blocks.append(InvBlock(n_channel, num_flow, split=False, filter_size=filter_size, affine=affine))

        self.z_shapes, self.cum_idxs = self.calc_z_shapes()

        self.identifier = f"Glow-flow{num_flow}-block{num_block}"
        self.trainable_parameters = self.parameters()

        # define the latent variable distribution 
        self.latent_dist = dists.normal.Normal(loc=torch.tensor(0, dtype=torch.float32).to(self.device), scale=torch.tensor(1, dtype=torch.float32).to(self.device))

    def calc_z_shapes(self):
        z_shapes = []
        input_size = self.input_shape[1]
        n_channel = self.input_shape[0]

        for i in range(self.num_block - 1):
            input_size //= 2
            n_channel *= 2

            z_shapes.append((n_channel, input_size, input_size))

        input_size //= 2
        z_shapes.append((n_channel * 4, input_size, input_size))

        z_inds = [np.prod(zz) for zz in z_shapes]
        z_inds_c = [np.sum(z_inds[:i+1]) for i in range(len(z_shapes))]
        return z_shapes, z_inds_c

    def initialize_actnorm(self, data_batch):
        print(self.device)
        with torch.no_grad():
            _,_,_ = self(data_batch.to(self.device))

    def list2array_z(self, z):
        bs = z[0].shape[0]
        assert np.prod( [ zz.shape[0] == bs for zz in z ] ), "Wrong shapes in z"

        if isinstance(z[0], np.ndarray):
            return np.concatenate([zz.reshape(bs, -1) for zz in z], axis=1)
        elif isinstance(z[0], torch.Tensor):
            return torch.cat( [torch.reshape(zz, [bs, -1]) for zz in z], dim=1)
    
    def array2list_z(self, z):
        bs = z.shape[0]
        z_shapes = [(0,)] + self.z_shapes
        z_idxs = [0] + self.cum_idxs
        z_list = []
        for i in range(1,len(z_shapes)):
            z_list.append( z[:,z_idxs[i-1]:z_idxs[i]].reshape(bs, *z_shapes[i]) )
        return z_list

    def forward(self, input):
        log_p_sum = 0
        logdet = 0
        out = input
        z_outs = []

        for block in self.blocks:
            out, det, log_p, z_new = block(out)
            z_outs.append(z_new)
            logdet = logdet + det

            if log_p is not None:
                log_p_sum = log_p_sum + log_p

        z_outs = self.list2array_z(z_outs)
        return z_outs, log_p_sum, logdet

    def reverse(self, z, reconstruct=False):
        z_list = self.array2list_z(z)
        for i, block in enumerate(self.blocks[::-1]):
            if i == 0:
                input = block.reverse(z_list[-1], z_list[-1], reconstruct=reconstruct)

            else:
                input = block.reverse(input, z_list[-(i + 1)], reconstruct=reconstruct)

        return input, None

    def sample(self, num_samples, temp=1.):
        z = self.latent_dist.sample([num_samples, np.prod(self.input_shape)]) * temp
        # z = torch.randn(num_samples, np.prod(self.input_shape)).to(self.device) * temp
        x,_ = self.reverse(z)
        x = x.cpu().detach().numpy()
        return x

    def get_loss(self, data_batch, num_bits=0):

        npixel = np.prod(self.input_shape)
        if num_bits == 0:
            _,log_p,logdet = self(data_batch.to(self.device))
            loss = log_p + logdet
        else:
            num_bins = 2 ** num_bits
            dbatch = data_batch + torch.rand_like(data_batch) / num_bins
            _,log_p,logdet = self(dbatch.to(self.device))
            loss = log_p + logdet - np.log(num_bins) * npixel
        return - torch.mean( loss / np.log(2) / npixel )