import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np

def GetOuterExpIndex(x, d=2):
    if x <= 1:
        return 0
    p:int = 1
    q:int = 0
    while p < x:
        p *= d
        q += 1
    return q


class ImageEncoder(nn.Module):
    def __init__(self, input_size, input_channel, output_channel, num_feats=16, convs_in_group=2):
        super(ImageEncoder, self).__init__()
        # make the size be 2^N
        self.num_group = GetOuterExpIndex(max(input_size[:]))
        self.pad_x0 = (2**self.num_group - input_size[0])//2
        self.pad_x1 = 2**self.num_group - input_size[0] - self.pad_x0
        self.pad_y0 = (2**self.num_group - input_size[1])//2
        self.pad_y1 = 2**self.num_group - input_size[1] - self.pad_y0
        self.convs_in_group = convs_in_group
        self.relu = nn.LeakyReLU(0.3)
        self.down = nn.PixelUnshuffle(2)
        self.flatten = nn.Flatten()
        self.groups = []
        self.input_channel = input_channel
        self.output_channel = output_channel
        # head conv
        self.head = nn.Conv2d(input_channel, num_feats*4, 3, 1, 1)
        for i in range(self.num_group):
            self.groups += [[]]
            for j in range(convs_in_group):
                conv = nn.Conv2d(
                    num_feats*4 if j==0 else num_feats*6, 
                    num_feats, 3, 1, 1)
                self.add_module('group%d-conv%d' % (i+1, j+1), conv)
                self.groups[i] += [conv]
        # add the last fc layer
        self.fc = nn.Linear(num_feats*4, output_channel)
        self.dropout = nn.Dropout(0.3)
        
    def forward(self, x0):
        init_x = None
        last_x = None
        # center padding to 32x32
        x =  F.pad(x0, (self.pad_y0, self.pad_y1, self.pad_x0, self.pad_x1))
        x = self.head(x)
        for i in range(self.num_group):
            init_x = x
            for j in range(self.convs_in_group):
                if j==0:
                    x = self.groups[i][j](x)
                else:
                    x = self.groups[i][j](torch.cat((x, last_x, init_x), 1))
                x = self.relu(x)
                last_x = x
            x = self.down(x) + nn.functional.interpolate(init_x, scale_factor=0.5)
        x = self.flatten(x)
        x = self.dropout(self.fc(x))
        return x
    
class ImageDecoder(nn.Module):
    def __init__(self, output_size, input_channel, output_channel, num_feats=16, convs_in_group=2):
        super(ImageDecoder, self).__init__()
        self.num_feats = num_feats
        self.fc = nn.Linear(input_channel, num_feats*4)
        self.up = nn.PixelShuffle(2)

        self.num_group = GetOuterExpIndex(max(output_size[:]))
        self.crop_x0 = (2**self.num_group - output_size[0])//2
        self.crop_x1 = output_size[0] + self.crop_x0
        self.crop_y0 = (2**self.num_group - output_size[1])//2
        self.crop_y1 = output_size[1] + self.crop_y0
        self.convs_in_group = convs_in_group
        self.relu = nn.LeakyReLU(0.3)
        
        self.groups = []
        self.input_channel = input_channel
        self.output_channel = output_channel
        for i in range(self.num_group):
            self.groups += [[]]
            for j in range(convs_in_group):
                conv = nn.Conv2d(
                    num_feats if j==0 else num_feats*3, 
                    num_feats*4 if (j==convs_in_group-1) else num_feats, 
                    3, 1, 1)
                self.add_module('group%d-conv%d' % (i+1, j+1), conv)
                self.groups[i] += [conv]
        # the last conv to restore image channel
        self.last_conv = nn.Conv2d(num_feats*4, output_channel, 3, 1, 1)
        
    def forward(self, x):
        init_x = None
        last_x = None
        x = self.fc(x)
        x = torch.reshape(x, [-1, self.num_feats*4, 1, 1])
        for i in range(self.num_group):
            if i==0:
                x = self.up(x)
            else:
                x = self.up(x) + nn.functional.interpolate(init_x, scale_factor=2)
            init_x = x
            for j in range(self.convs_in_group):
                if j==0:
                    x = self.groups[i][j](x)
                else:
                    x = self.groups[i][j](torch.cat((x, last_x, init_x), 1))
                x = self.relu(x)
                last_x = x
        x = self.last_conv(x)
        # center cropping to 32x32
        x = x[:, :, self.crop_y0:self.crop_y1, self.crop_x0:self.crop_x1]
        return x



class UnionModelResNet(nn.Module):
    def __init__(self, n_class):
        super(UnionModelResNet, self).__init__()
        im_size = (28, 28)
        im_channel = 1
        im_latent_dim = 16
        im_num_feats = 16
        im_convs_in_group = 2
        fusion_feats = 16
        
        self.im_encoder = ImageEncoder(im_size, im_channel, im_latent_dim, im_num_feats, im_convs_in_group)
        self.relu = nn.LeakyReLU(0.3)
        self.dropout = nn.Dropout(0.3)
        self.mix1 = nn.Linear(
            in_features=im_latent_dim + n_class, 
            out_features=fusion_feats, 
            bias=True)
        self.mix2 = nn.Linear(
            in_features=fusion_feats + im_latent_dim + n_class,
            out_features=fusion_feats,
            bias=True)
        self.mix3 = nn.Linear(
            in_features=fusion_feats + fusion_feats,
            out_features=fusion_feats,
            bias=True)
        self.im_decoder = ImageDecoder(im_size, im_latent_dim, im_channel, im_num_feats, im_convs_in_group)
        self.fc1 = nn.Linear(fusion_feats, fusion_feats, bias=True)
        self.fc2 = nn.Linear(fusion_feats, n_class, bias=True)

    def forward(self, x1, x2):
        im_feats = self.im_encoder(x1)
        label_feats = self.dropout(x2)
        union_feats = torch.cat((im_feats, label_feats), 1)
        feats_mix_1 = self.relu(self.mix1(union_feats))
        feats_mix_2 = self.relu(self.mix2(torch.cat((feats_mix_1, union_feats), axis=1)))
        feats_mix_3 = self.relu(self.mix3(torch.cat((feats_mix_2, feats_mix_1), axis=1)))
        x1_restored = self.im_decoder(feats_mix_3)
        x2_restored = self.fc2(self.fc1(feats_mix_3))
        return x1_restored, x2_restored
        
