import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as T
import torchvision.models as models
import matplotlib.pyplot as plt
from src.models.multiview_base import MultiviewBase
from src.models.mvselect import CamSelect
from PIL import Image
import math
INVALID_UV = -1.0
class down(nn.Module):
    def __init__(self, in_ch, out_ch):
        super(down, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(in_ch, out_ch, 4, stride=2, padding=1),
            nn.InstanceNorm2d(out_ch),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(out_ch, out_ch, 3, padding=1),
            nn.InstanceNorm2d(out_ch),
            nn.LeakyReLU(0.2, inplace=True),
        )
        self.skip = nn.Sequential(
            nn.Conv2d(in_ch, out_ch, 4, stride=2, padding=1, bias=False),
            nn.InstanceNorm2d(out_ch),
            nn.LeakyReLU(0.2, inplace=True),
        )
        self.out = nn.Sequential(
            nn.Conv2d(out_ch, out_ch, 3, padding=1),
            nn.InstanceNorm2d(out_ch),
            nn.LeakyReLU(0.2, inplace=True),
        )

    def forward(self, x):
        x = self.conv(x) + self.skip(x)
        return self.out(x)


class up(nn.Module):
    def __init__(self, in_ch, out_ch, output_pad=0, concat=True, final=False):
        super(up, self).__init__()
        self.concat = concat
        self.final = final
        self.up_sample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
        if self.final:
            # self.conv = nn.Sequential(
            #     nn.ConvTranspose2d(in_ch, out_ch, 4, stride=2, padding=1, output_padding=output_pad),
            #     nn.InstanceNorm2d(out_ch),
            #     nn.Tanh()
            # )
            self.conv = nn.Sequential(
                nn.Conv2d(in_ch, in_ch//2, 3, padding=1),
                nn.InstanceNorm2d(in_ch//2),
                nn.LeakyReLU(0.2, inplace=True),
                nn.Conv2d(in_ch//2, out_ch, 3, padding=1),
                nn.InstanceNorm2d(out_ch),
                nn.Tanh()
            )
        else:
            # self.conv = nn.Sequential(
            #     nn.ConvTranspose2d(in_ch, out_ch, 4, stride=2, padding=1, output_padding=output_pad),
            #     nn.InstanceNorm2d(out_ch),
            #     nn.LeakyReLU(0.2, inplace=True)
            # )
            self.conv = nn.Sequential(
                nn.Conv2d(in_ch, in_ch//2, 3, padding=1),
                nn.InstanceNorm2d(in_ch//2),
                nn.LeakyReLU(0.2, inplace=True),
                nn.Conv2d(in_ch//2, out_ch, 3, padding=1),
                nn.InstanceNorm2d(out_ch),
                nn.LeakyReLU(0.2, inplace=True),
            )
            self.skip = nn.Sequential(
                nn.Conv2d(in_ch, out_ch, 1, stride=1, padding=0, bias=False),
                nn.InstanceNorm2d(out_ch),
                nn.LeakyReLU(0.2, inplace=True),
            )
            self.out = nn.Sequential(
                nn.Conv2d(out_ch, out_ch, 3, padding=1),
                nn.InstanceNorm2d(out_ch),
                nn.LeakyReLU(0.2, inplace=True),
            )

    def forward(self, x1, x2):
        if self.concat:
            diffY = x2.size()[2] - x1.size()[2]
            diffX = x2.size()[3] - x1.size()[3]
            x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
                            diffY // 2, diffY - diffY // 2])
            x1 = torch.cat((x2, x1), dim=1)
        x1 = self.up_sample(x1)
        if self.final:
            x1 = self.conv(x1)
        else:
            x1 = self.out(self.conv(x1) + self.skip(x1))
        return x1


class UNet(nn.Module):
    def __init__(self, input_channels, output_channels):
        super(UNet, self).__init__()
        self.down1 = down(input_channels, 64)
        self.down2 = down(64, 128)
        self.down3 = down(128, 256)
        self.down4 = down(256, 512)
        self.down5 = down(512, 512)
        self.up1 = up(512, 512, output_pad=1, concat=False)
        self.up2 = up(1024, 512)
        self.up3 = up(768, 256)
        self.up4 = up(384, 128)
        self.up5 = up(192, output_channels, final=True)

    def forward(self, x):
        x1 = self.down1(x)
        x2 = self.down2(x1)
        x3 = self.down3(x2)
        x4 = self.down4(x3)
        x5 = self.down5(x4)
        x = self.up1(x5, None)
        x = self.up2(x, x4)
        x = self.up3(x, x3)
        x = self.up4(x, x2)
        x = self.up5(x, x1)
        return x

class HierarchicalStaticNeuralTexture(nn.Module):
    def __init__(self, texture_dimensions, texture_features, random_init=False):
        super(HierarchicalStaticNeuralTexture, self).__init__()
        self.texture_dimensions = texture_dimensions #256 #texture dimensions
        self.out_ch = texture_features # output feature, after evaluating the texture
        # if random_init:
        #     self.register_parameter('data', torch.nn.Parameter(torch.randn(3, self.out_ch, 2 * self.texture_dimensions, self.texture_dimensions, requires_grad=True)))
        # else:
        #     self.register_parameter('data', torch.nn.Parameter(torch.zeros(3, self.out_ch, 2 * self.texture_dimensions, self.texture_dimensions, requires_grad=True)))
        # self.conv = nn.Conv2d(self.out_ch * 3, self.out_ch, kernel_size=3, padding=1, stride=1)
        
        if random_init:
            self.register_parameter('data', torch.nn.Parameter(torch.randn(1, self.out_ch, 2 * self.texture_dimensions, self.texture_dimensions, requires_grad=True)))
        else:
            self.register_parameter('data', torch.nn.Parameter(torch.zeros(1, self.out_ch, 2 * self.texture_dimensions, self.texture_dimensions, requires_grad=True)))

        ####
        # if random_init:
        #     self.register_parameter('data', torch.nn.Parameter(torch.randn(1, self.out_ch, 2 * self.texture_dimensions, self.texture_dimensions, self.texture_dimensions, requires_grad=True)))
        # else:
        #     self.register_parameter('data', torch.nn.Parameter(torch.zeros(1, self.out_ch, 2 * self.texture_dimensions, self.texture_dimensions, self.texture_dimensions, requires_grad=True)))
    def forward_once(self, uvs, texture_id=0):
        # w = self.texture_dimensions
        # self.high_level_tex = self.data[texture_id:texture_id+1, :, :w, :w]#[0,c,0:256,0:256]
        # high_level = torch.nn.functional.grid_sample(self.high_level_tex, uvs, mode='bilinear', padding_mode='border', align_corners=True)
        # w = w // 2
        # self.higher_level_tex = self.data[texture_id:texture_id+1, :, :w, :w]#[0,c,256:256+128,0:128]
        # higher_level = torch.nn.functional.grid_sample(self.higher_level_tex, uvs, mode='bilinear', padding_mode='border', align_corners=True)
        # w = w // 2
        # self.medium_level_tex = self.data[texture_id:texture_id+1, :, :w, :w]#[0,c,256:256+128,0:128]
        # medium_level = torch.nn.functional.grid_sample(self.medium_level_tex, uvs, mode='bilinear', padding_mode='border', align_corners=True)
        # w = w // 2
        # self.low_level_tex = self.data[texture_id:texture_id+1, :, :w, :w]#[0,c,256+128:256+128+64,:64]
        # low_level = torch.nn.functional.grid_sample(self.low_level_tex, uvs, mode='bilinear', padding_mode='border', align_corners=True)
        # w = w // 2
        # self.lowest_level_tex = self.data[texture_id:texture_id+1, :, :w, :w]#[0,c,256+128+64:256+128+64+32,:32]
        # lowest_level = torch.nn.functional.grid_sample(self.lowest_level_tex, uvs, mode='bilinear', padding_mode='border', align_corners=True)
        # Z_level = high_level + higher_level + medium_level + low_level + lowest_level
        # return Z_level

        offsetY = 0
        w = self.texture_dimensions
        self.high_level_tex = self.data[texture_id:texture_id+1, :, offsetY:offsetY+w, :w]#[0,c,0:256,0:256]
        high_level = torch.nn.functional.grid_sample(self.high_level_tex, uvs, mode='bilinear', padding_mode='border', align_corners=True)
        offsetY += w
        w = w // 2
        self.higher_level_tex = self.data[texture_id:texture_id+1, :, offsetY:offsetY+w, :w]#[0,c,256:256+128,0:128]
        higher_level = torch.nn.functional.grid_sample(self.higher_level_tex, uvs, mode='bilinear', padding_mode='border', align_corners=True)
        offsetY += w
        w = w // 2
        self.medium_level_tex = self.data[texture_id:texture_id+1, :, offsetY:offsetY+w, :w]#[0,c,256:256+128,0:128]
        medium_level = torch.nn.functional.grid_sample(self.medium_level_tex, uvs, mode='bilinear', padding_mode='border', align_corners=True)
        offsetY += w
        w = w // 2
        self.low_level_tex = self.data[texture_id:texture_id+1, :, offsetY:offsetY+w, :w]#[0,c,256+128:256+128+64,:64]
        low_level = torch.nn.functional.grid_sample(self.low_level_tex, uvs, mode='bilinear', padding_mode='border', align_corners=True)
        offsetY += w
        w = w // 2
        self.lowest_level_tex = self.data[texture_id:texture_id+1, :, offsetY:offsetY+w, :w]#[0,c,256+128+64:256+128+64+32,:32]
        lowest_level = torch.nn.functional.grid_sample(self.lowest_level_tex, uvs, mode='bilinear', padding_mode='border', align_corners=True)
        # print("Texture shape:",high_level.shape,medium_level.shape,low_level.shape,lowest_level.shape,self.data.shape)
        Y_level = high_level + higher_level + medium_level + low_level + lowest_level
        return Y_level
    
        offsetX = 0
        w = self.texture_dimensions
        self.high_level_tex = self.data[texture_id:texture_id+1, :, :w, offsetX:offsetX+w]#[0,c,0:256,0:256]
        high_level = torch.nn.functional.grid_sample(self.high_level_tex, uvs, mode='bilinear', padding_mode='border', align_corners=True)
        offsetX += w
        w = w // 2
        self.higher_level_tex = self.data[texture_id:texture_id+1, :, :w, offsetX:offsetX+w]#[0,c,256:256+128,0:128]
        higher_level = torch.nn.functional.grid_sample(self.higher_level_tex, uvs, mode='bilinear', padding_mode='border', align_corners=True)
        offsetX += w
        w = w // 2
        self.medium_level_tex = self.data[texture_id:texture_id+1, :, :w, offsetX:offsetX+w]#[0,c,256:256+128,0:128]
        medium_level = torch.nn.functional.grid_sample(self.medium_level_tex, uvs, mode='bilinear', padding_mode='border', align_corners=True)
        offsetX += w
        w = w // 2
        self.low_level_tex = self.data[texture_id:texture_id+1, :, :w, offsetX:offsetX+w]#[0,c,256+128:256+128+64,:64]
        low_level = torch.nn.functional.grid_sample(self.low_level_tex, uvs, mode='bilinear', padding_mode='border', align_corners=True)
        offsetX += w
        w = w // 2
        self.lowest_level_tex = self.data[texture_id:texture_id+1, :, :w, offsetX:offsetX+w]#[0,c,256+128+64:256+128+64+32,:32]
        lowest_level = torch.nn.functional.grid_sample(self.lowest_level_tex, uvs, mode='bilinear', padding_mode='border', align_corners=True)
        # print("Texture shape:",high_level.shape,medium_level.shape,low_level.shape,lowest_level.shape,self.data.shape)
        X_level = high_level + higher_level + medium_level + low_level + lowest_level

        return Y_level + X_level + Z_level

    def forward_once_3D(self, uvs):

        texture_id=0
        offsetY = 0
        w = self.texture_dimensions
        self.high_level_tex = self.data[texture_id:texture_id+1, :, offsetY:offsetY+w, :w, :w]
        high_level = torch.nn.functional.grid_sample(self.high_level_tex, uvs, mode='bilinear', padding_mode='border')
        offsetY += w
        w = w // 2
        self.medium_level_tex = self.data[texture_id:texture_id+1, :, offsetY:offsetY+w, :w, :w]
        medium_level = torch.nn.functional.grid_sample(self.medium_level_tex, uvs, mode='bilinear', padding_mode='border')
        offsetY += w
        w = w // 2
        self.low_level_tex = self.data[texture_id:texture_id+1, :, offsetY:offsetY+w, :w, :w]
        low_level = torch.nn.functional.grid_sample(self.low_level_tex, uvs, mode='bilinear', padding_mode='border')
        offsetY += w
        w = w // 2
        self.lowest_level_tex = self.data[texture_id:texture_id+1, :, offsetY:offsetY+w, :w, :w]
        lowest_level = torch.nn.functional.grid_sample(self.lowest_level_tex, uvs, mode='bilinear', padding_mode='border')

        # print("Texture shape:",high_level.shape,medium_level.shape,low_level.shape,lowest_level.shape,self.data.shape)
        return high_level + medium_level + low_level + lowest_level

    def forward(self, uv_inputs):
        b = uv_inputs.shape[0] # batchsize
        if b != 1:
            print('ERROR: HierarchicalStaticNeuralTexture forward only implemented for batchsize==1')
            exit(-1)
        # uvs_rg = torch.stack([uv_inputs[:,0,:,:], uv_inputs[:,1,:,:]], 3)
        # texture_rg = self.forward_once(uvs_rg, texture_id=0)
        # uvs_gb = torch.stack([uv_inputs[:,1,:,:], uv_inputs[:,2,:,:]], 3)
        # texture_gb = self.forward_once(uvs_gb, texture_id=1)
        # uvs_rb = torch.stack([uv_inputs[:,0,:,:], uv_inputs[:,2,:,:]], 3)
        # texture_rb = self.forward_once(uvs_rb, texture_id=2)
        # # return (texture_rg+texture_gb+texture_rb)/3.0

        uvs_rg = torch.stack([uv_inputs[:,0,:,:], uv_inputs[:,1,:,:]], 3)
        texture_rg = self.forward_once(uvs_rg, texture_id=0)
        return texture_rg
        # uvs_gb = torch.stack([uv_inputs[:,1,:,:], uv_inputs[:,2,:,:]], 3)
        # texture_gb = self.forward_once(uvs_gb, texture_id=0)
        # uvs_rb = torch.stack([uv_inputs[:,0,:,:], uv_inputs[:,2,:,:]], 3)
        # texture_rb = self.forward_once(uvs_rb, texture_id=0)
        # return (texture_rg+texture_gb+texture_rb)/3.0

        # texture_concat = torch.concat([texture_rg, texture_gb, texture_rb], 1)
        # return self.conv(texture_concat)

        # uvs_rgb = torch.stack([uv_inputs[:,0,:,:], uv_inputs[:,1,:,:], uv_inputs[:,2,:,:]], 3).unsqueeze(0)
        # texture_rgb = self.forward_once_3D(uvs_rgb).squeeze(2)
        # return texture_rgb

    def regularizer(self, high_weight=8.0, medium_weight=2.0, low_weight=1.0, lowest_weight=0.0):
        regularizerTex  = torch.mean(torch.pow( self.high_level_tex,   2.0 )) * high_weight
        regularizerTex += torch.mean(torch.pow( self.medium_level_tex, 2.0 )) * medium_weight
        regularizerTex += torch.mean(torch.pow( self.low_level_tex,    2.0 )) * low_weight
        regularizerTex += torch.mean(torch.pow( self.lowest_level_tex, 2.0 )) * lowest_weight
        return regularizerTex

    def SaveToFile(self, filename):
        dim_range = torch.arange(0, self.texture_dimensions, dtype=torch.float) / (self.texture_dimensions - 1.0) * 2.0 - 1.0
        ones = torch.ones(self.texture_dimensions, dtype=torch.float) * 1.0
        v = torch.ger(dim_range, ones) # outer product
        u = torch.ger(ones, dim_range)
        uv_id = torch.cat([u.unsqueeze(0).unsqueeze(0),v.unsqueeze(0).unsqueeze(0)], 1)
        uv_id = uv_id.to(self.data.device)

        image_tensor = self.forward(uv_id)[0,0:3,:,:]
        image_numpy = image_tensor.clone().cpu().float().detach().numpy()
        image_numpy = np.clip(image_numpy, -1.0, 1.0)
        image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
        image_numpy = image_numpy.astype(np.uint8)
        image_pil = Image.fromarray(image_numpy)
        image_pil.save(filename)

def define_Texture(tex_dim=2048, texture_features=16, random_init=False, init_type='normal', gain=0.02):
    net = HierarchicalStaticNeuralTexture(texture_dimensions=tex_dim, texture_features=texture_features, random_init=random_init)
    def init_func(m):
        classname = m.__class__.__name__
        if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
            if init_type == 'normal':
                nn.init.normal_(m.weight.data, 0.0, gain)
            elif init_type == 'xavier':
                nn.init.xavier_normal_(m.weight.data, gain=gain)
            elif init_type == 'kaiming':
                nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
            elif nn.init_type == 'orthogonal':
                nn.init.orthogonal_(m.weight.data, gain=gain)
            else:
                raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
            if hasattr(m, 'bias') and m.bias is not None:
                nn.init.constant_(m.bias.data, 0.0)
        elif classname.find('BatchNorm2d') != -1:
            nn.init.normal_(m.weight.data, 1.0, gain)
            nn.init.constant_(m.bias.data, 0.0)

    print('initialize network with %s' % init_type)
    net.apply(init_func)
    return net

class MVDNR(MultiviewBase):
    def __init__(self, dataset, aggregation='max', tex_dim=2048, texture_features=16, random_init=False, init_type='kaiming', gain=0.02):
        super().__init__(dataset, aggregation)

        self.texture = define_Texture(tex_dim=tex_dim, texture_features=texture_features, random_init=random_init, init_type=init_type, gain=gain)
        self.unet = UNet(texture_features,3)
        # select camera based on initialization
        self.select_module = CamSelect(dataset.num_cam, texture_features, dataset.num_cam , 1, aggregation)

    def _spherical_harmonics_basis(self, extrinsics):
        '''
        extrinsics: a tensor shaped (N, 3)
        output: a tensor shaped (N, 9)
        '''
        sh_bands = torch.ones((9), dtype=torch.float)
        coff_0 = 1 / (2.0*math.sqrt(np.pi))
        coff_1 = math.sqrt(3.0) * coff_0
        coff_2 = math.sqrt(15.0) * coff_0
        coff_3 = math.sqrt(1.25) * coff_0
        # l=0
        sh_bands[0] = coff_0
        # l=1
        sh_bands[1] = extrinsics[1] * coff_1
        sh_bands[2] = extrinsics[2] * coff_1
        sh_bands[3] = extrinsics[0] * coff_1
        # l=2
        sh_bands[4] = extrinsics[0] * extrinsics[1] * coff_2
        sh_bands[5] = extrinsics[1] * extrinsics[2] * coff_2
        sh_bands[6] = (3.0 * extrinsics[2] * extrinsics[2] - 1.0) * coff_3
        sh_bands[7] = extrinsics[2] * extrinsics[0] * coff_2
        sh_bands[8] = (extrinsics[0] * extrinsics[0] - extrinsics[2] * extrinsics[2]) * coff_2
        return sh_bands

    def get_feat(self, imgs, extrinsics=[]):
        B, N, _, H, W = imgs.shape
        imgs_feat = []
        masks = []
        for batch_ind in range(B):
            for cam_ind in range(N):
                img = imgs[batch_ind,cam_ind,:,:,:].unsqueeze(0)
                neural_texture = self.texture(img).squeeze(0)
                if len(extrinsics) > 0:
                    extrinsics_sel = extrinsics[batch_ind,cam_ind,0:3,3]
                    basis = self._spherical_harmonics_basis(extrinsics_sel).cuda()
                    basis = basis.view(basis.shape[0], 1, 1)
                    neural_texture[3:12,:,:] = neural_texture[3:12,:,:] * basis
                imgs_feat.append(neural_texture)
                mask = ( (img[:,0,:,:] != INVALID_UV) | (img[:,1,:,:] != INVALID_UV) | (img[:,2,:,:] != INVALID_UV))
                masks.append(mask.squeeze(0))
        imgs_feat = torch.stack(imgs_feat)
        masks = torch.stack(masks)
        imgs_feat = imgs_feat.unflatten(0, [B, N])
        self.mask = masks.unflatten(0, [B, N])
        return imgs_feat, None

    def get_crop_output(self, overall_feat, sel_ind, feat_crops):
        overall_result = {}
        B,pN,C,H,W = feat_crops.shape
        feat_crops = feat_crops.view(B*pN,C,H,W)
        outputs_patches = self.unet(feat_crops)
        outputs_patches = outputs_patches.view(B,pN,3,H,W)
        sel_masks = self.mask[:,sel_ind,:,:]
        overall_result["texture"] = overall_feat[:,:3,:,:]
        overall_result["outputs"] = -1.0*torch.ones_like(overall_feat[:,:3,:,:])
        overall_result["outputs_patches"] = outputs_patches
        overall_result["sel_masks"] = sel_masks
        return overall_result

    def get_output(self, overall_feat, sel_ind):
        overall_result = {}
        outputs = self.unet(overall_feat)
        sel_masks = self.mask[:,sel_ind,:,:]
        overall_result["texture"] = overall_feat[:,:3,:,:]
        overall_result["outputs"] = outputs
        overall_result["sel_masks"] = sel_masks
        return overall_result


if __name__ == '__main__':
    from src.datasets import imgDataset
    from torch.utils.data import DataLoader
    from thop import profile
    import itertools

    dataset = imgDataset('/home/houyz/Data/modelnet/modelnet40_images_new_12x', 12)
    dataloader = DataLoader(dataset, 1, False, num_workers=0)
    imgs, tgt, keep_cams = next(iter(dataloader))
    model = MVCNN(dataset).cuda()
    init_prob = F.one_hot(torch.tensor([0, 1]), num_classes=dataset.num_cam)
    keep_cams[0, 3] = 0
    model.train()
    res = model(imgs.cuda(), None, 2, init_prob, 3, keep_cams)
    # macs, params = profile(model, inputs=(imgs[:, :2].cuda(),))
    # macs, params = profile(model.select_module, inputs=(torch.randn([1, 12, 512, 1, 1]).cuda(),
    #                                                     F.one_hot(torch.tensor([1]), num_classes=20).cuda()))
    # macs, params = profile(model, inputs=(torch.randn([1, 512, 1, 1]).cuda(),))
    pass
