import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as T
import torchvision.models as models
import matplotlib.pyplot as plt
from src.models.multiview_base import MultiviewBase
from src.models.mvselect import CamSelect
from PIL import Image
INVALID_UV = -1.0
class down(nn.Module):
    def __init__(self, in_ch, out_ch):
        super(down, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(in_ch, out_ch, 4, stride=2, padding=1),
            nn.InstanceNorm2d(out_ch),
            nn.LeakyReLU(0.2, inplace=True)
        )

    def forward(self, x):
        x = self.conv(x)
        return x


class up(nn.Module):
    def __init__(self, in_ch, out_ch, output_pad=0, concat=True, final=False):
        super(up, self).__init__()
        self.concat = concat
        self.final = final
        if self.final:
            self.conv = nn.Sequential(
                nn.ConvTranspose2d(in_ch, out_ch, 4, stride=2, padding=1, output_padding=output_pad),
                nn.InstanceNorm2d(out_ch),
                nn.Tanh()
            )
        else:
            self.conv = nn.Sequential(
                nn.ConvTranspose2d(in_ch, out_ch, 4, stride=2, padding=1, output_padding=output_pad),
                nn.InstanceNorm2d(out_ch),
                nn.LeakyReLU(0.2, inplace=True)
            )

    def forward(self, x1, x2):
        if self.concat:
            diffY = x2.size()[2] - x1.size()[2]
            diffX = x2.size()[3] - x1.size()[3]
            x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
                            diffY // 2, diffY - diffY // 2])
            x1 = torch.cat((x2, x1), dim=1)
        x1 = self.conv(x1)
        return x1


class UNet(nn.Module):
    def __init__(self, input_channels, output_channels):
        super(UNet, self).__init__()
        self.down1 = down(input_channels, 64)
        self.down2 = down(64, 128)
        self.down3 = down(128, 256)
        self.down4 = down(256, 512)
        self.down5 = down(512, 512)
        self.up1 = up(512, 512, output_pad=1, concat=False)
        self.up2 = up(1024, 512)
        self.up3 = up(768, 256)
        self.up4 = up(384, 128)
        self.up5 = up(192, output_channels, final=True)

    def forward(self, x):
        x1 = self.down1(x)
        x2 = self.down2(x1)
        x3 = self.down3(x2)
        x4 = self.down4(x3)
        x5 = self.down5(x4)
        x = self.up1(x5, None)
        x = self.up2(x, x4)
        x = self.up3(x, x3)
        x = self.up4(x, x2)
        x = self.up5(x, x1)
        return x

class HierarchicalStaticNeuralTexture(nn.Module):
    def __init__(self, texture_dimensions, texture_features, random_init=False):
        super(HierarchicalStaticNeuralTexture, self).__init__()
        self.texture_dimensions = texture_dimensions #256 #texture dimensions
        self.out_ch = texture_features # output feature, after evaluating the texture
        if random_init:
            self.register_parameter('data', torch.nn.Parameter(torch.randn(1, self.out_ch, 2 * self.texture_dimensions, self.texture_dimensions, requires_grad=True)))
        else:
            self.register_parameter('data', torch.nn.Parameter(torch.zeros(1, self.out_ch, 2 * self.texture_dimensions, self.texture_dimensions, requires_grad=True)))

    def forward_once(self, uvs, texture_id=0):

        # texture_id=0
        offsetY = 0
        w = self.texture_dimensions
        self.high_level_tex = self.data[texture_id:texture_id+1, :, offsetY:offsetY+w, :w]#[0,c,0:256,0:256]
        high_level = torch.nn.functional.grid_sample(self.high_level_tex, uvs, mode='bilinear', padding_mode='border')
        offsetY += w
        w = w // 2
        self.medium_level_tex = self.data[texture_id:texture_id+1, :, offsetY:offsetY+w, :w]#[0,c,256:256+128,0:128]
        medium_level = torch.nn.functional.grid_sample(self.medium_level_tex, uvs, mode='bilinear', padding_mode='border')
        offsetY += w
        w = w // 2
        self.low_level_tex = self.data[texture_id:texture_id+1, :, offsetY:offsetY+w, :w]#[0,c,256+128:256+128+64,:64]
        low_level = torch.nn.functional.grid_sample(self.low_level_tex, uvs, mode='bilinear', padding_mode='border')
        offsetY += w
        w = w // 2
        self.lowest_level_tex = self.data[texture_id:texture_id+1, :, offsetY:offsetY+w, :w]#[0,c,256+128+64:256+128+64+32,:32]
        lowest_level = torch.nn.functional.grid_sample(self.lowest_level_tex, uvs, mode='bilinear', padding_mode='border')

        # print("Texture shape:",high_level.shape,medium_level.shape,low_level.shape,lowest_level.shape,self.data.shape)
        return high_level + medium_level + low_level + lowest_level

    def forward_once_3D(self, uvs):

        texture_id=0
        offsetY = 0
        w = self.texture_dimensions
        self.high_level_tex = self.data[texture_id:texture_id+1, :, offsetY:offsetY+w, :w, :w]
        high_level = torch.nn.functional.grid_sample(self.high_level_tex, uvs, mode='bilinear', padding_mode='border')
        offsetY += w
        w = w // 2
        self.medium_level_tex = self.data[texture_id:texture_id+1, :, offsetY:offsetY+w, :w, :w]
        medium_level = torch.nn.functional.grid_sample(self.medium_level_tex, uvs, mode='bilinear', padding_mode='border')
        offsetY += w
        w = w // 2
        self.low_level_tex = self.data[texture_id:texture_id+1, :, offsetY:offsetY+w, :w, :w]
        low_level = torch.nn.functional.grid_sample(self.low_level_tex, uvs, mode='bilinear', padding_mode='border')
        offsetY += w
        w = w // 2
        self.lowest_level_tex = self.data[texture_id:texture_id+1, :, offsetY:offsetY+w, :w, :w]
        lowest_level = torch.nn.functional.grid_sample(self.lowest_level_tex, uvs, mode='bilinear', padding_mode='border')

        # print("Texture shape:",high_level.shape,medium_level.shape,low_level.shape,lowest_level.shape,self.data.shape)
        return high_level + medium_level + low_level + lowest_level

    def forward(self, uv_inputs):
        b = uv_inputs.shape[0] # batchsize
        if b != 1:
            print('ERROR: HierarchicalStaticNeuralTexture forward only implemented for batchsize==1')
            exit(-1)
        uvs = torch.stack([uv_inputs[:,0,:,:], uv_inputs[:,1,:,:]], 3)
        return self.forward_once(uvs)


    def regularizer(self, high_weight=8.0, medium_weight=2.0, low_weight=1.0, lowest_weight=0.0):
        regularizerTex  = torch.mean(torch.pow( self.high_level_tex,   2.0 )) * high_weight
        regularizerTex += torch.mean(torch.pow( self.medium_level_tex, 2.0 )) * medium_weight
        regularizerTex += torch.mean(torch.pow( self.low_level_tex,    2.0 )) * low_weight
        regularizerTex += torch.mean(torch.pow( self.lowest_level_tex, 2.0 )) * lowest_weight
        return regularizerTex

    def SaveToFile(self, filename):
        dim_range = torch.arange(0, self.texture_dimensions, dtype=torch.float) / (self.texture_dimensions - 1.0) * 2.0 - 1.0
        ones = torch.ones(self.texture_dimensions, dtype=torch.float) * 1.0
        v = torch.ger(dim_range, ones) # outer product
        u = torch.ger(ones, dim_range)
        uv_id = torch.cat([u.unsqueeze(0).unsqueeze(0),v.unsqueeze(0).unsqueeze(0)], 1)
        uv_id = uv_id.to(self.data.device)

        image_tensor = self.forward(uv_id)[0,0:3,:,:]
        image_numpy = image_tensor.clone().cpu().float().detach().numpy()
        image_numpy = np.clip(image_numpy, -1.0, 1.0)
        image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
        image_numpy = image_numpy.astype(np.uint8)
        image_pil = Image.fromarray(image_numpy)
        image_pil.save(filename)

def define_Texture(tex_dim=2048, texture_features=16, random_init=False, init_type='normal', gain=0.02):
    net = HierarchicalStaticNeuralTexture(texture_dimensions=tex_dim, texture_features=texture_features, random_init=random_init)
    def init_func(m):
        classname = m.__class__.__name__
        if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
            if init_type == 'normal':
                nn.init.normal_(m.weight.data, 0.0, gain)
            elif init_type == 'xavier':
                nn.init.xavier_normal_(m.weight.data, gain=gain)
            elif init_type == 'kaiming':
                nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
            elif nn.init_type == 'orthogonal':
                nn.init.orthogonal_(m.weight.data, gain=gain)
            else:
                raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
            if hasattr(m, 'bias') and m.bias is not None:
                nn.init.constant_(m.bias.data, 0.0)
        elif classname.find('BatchNorm2d') != -1:
            nn.init.normal_(m.weight.data, 1.0, gain)
            nn.init.constant_(m.bias.data, 0.0)

    print('initialize network with %s' % init_type)
    net.apply(init_func)
    return net

class MVDNR(MultiviewBase):
    def __init__(self, dataset, aggregation='max', tex_dim=2048, texture_features=16, random_init=False, init_type='normal', gain=0.02):
        super().__init__(dataset, aggregation)

        self.texture = define_Texture(tex_dim=tex_dim, texture_features=texture_features, random_init=random_init, init_type=init_type, gain=gain)
        self.unet = UNet(texture_features,3)
        # select camera based on initialization
        self.select_module = CamSelect(dataset.num_cam, texture_features, 1, aggregation)

    def get_feat(self, imgs):
        B, N, _, H, W = imgs.shape
        imgs_feat = []
        masks = []
        for batch_ind in range(B):
            for cam_ind in range(N):
                img = imgs[batch_ind,cam_ind,:,:,:].unsqueeze(0)
                imgs_feat.append(self.texture(img).squeeze(0))
                mask = ( (img[:,0,:,:] != INVALID_UV) | (img[:,1,:,:] != INVALID_UV) | (img[:,2,:,:] != INVALID_UV))
                masks.append(mask.squeeze(0))
        imgs_feat = torch.stack(imgs_feat)
        masks = torch.stack(masks)
        imgs_feat = imgs_feat.unflatten(0, [B, N])
        self.mask = masks.unflatten(0, [B, N])
        return imgs_feat, None

    def get_output(self, overall_feat, sel_ind):
        overall_result = {}
        outputs = self.unet(overall_feat)
        sel_masks = self.mask[:,sel_ind,:,:]
        overall_result["texture"] = overall_feat[:,:3,:,:]
        overall_result["outputs"] = outputs
        overall_result["sel_masks"] = sel_masks
        return overall_result


if __name__ == '__main__':
    from src.datasets import imgDataset
    from torch.utils.data import DataLoader
    from thop import profile
    import itertools

    dataset = imgDataset('/home/houyz/Data/modelnet/modelnet40_images_new_12x', 12)
    dataloader = DataLoader(dataset, 1, False, num_workers=0)
    imgs, tgt, keep_cams = next(iter(dataloader))
    model = MVCNN(dataset).cuda()
    init_prob = F.one_hot(torch.tensor([0, 1]), num_classes=dataset.num_cam)
    keep_cams[0, 3] = 0
    model.train()
    res = model(imgs.cuda(), None, 2, init_prob, 3, keep_cams)
    # macs, params = profile(model, inputs=(imgs[:, :2].cuda(),))
    # macs, params = profile(model.select_module, inputs=(torch.randn([1, 12, 512, 1, 1]).cuda(),
    #                                                     F.one_hot(torch.tensor([1]), num_classes=20).cuda()))
    # macs, params = profile(model, inputs=(torch.randn([1, 512, 1, 1]).cuda(),))
    pass
