import einops
import os
import torch
import torch as th
import torch.nn as nn
import torchvision.transforms as transforms
import sys
import numpy as np
import random
import json
import torch.nn.functional as F
from einops import rearrange, repeat,reduce
from torchvision import transforms
from PIL import Image

# from vit_pytorch.train_V1_sep_new import Column_trans_rot_lgn
from hypercolumn.vit_pytorch.train_V1_sep_new import Column_trans_rot_lgn

from torchvision.utils import make_grid


def disabled_train(self, mode=True):
    """Overwrite model.train with this function to make sure train/eval mode
    does not change anymore."""
    return self


class HyperColumnLGN(nn.Module):
    def __init__(self, restore_ckpt = '/home/bsliu/gitprojects/OmniGen/checkpoint/hypercolumn/imagenet/equ_nv16_vl4_rn1_Bipolar_norm.pth'):
        super().__init__()
        ckpt = torch.load(restore_ckpt)
        hc = Column_trans_rot_lgn(ckpt['arg'])
        hc.load_state_dict(ckpt['state_dict'], strict=False)
        self.lgn_ende = hc.lgn_ende[0].eval()
        self.lgn_ende.train = disabled_train
        for param in self.lgn_ende.parameters():
            param.requires_grad = False
        
        self.groups = [[0,1,4,8,9,15], [2,3], [5,6,7,10,11,12,13,14]]
        self.hyper_category = [i for i in range(16)]

        temp = np.exp(-0.62*np.arange(16))
        self.p = temp / np.sum(temp)

        self.norm_mean = np.array([0.50705882, 0.48666667, 0.44078431])
        self.norm_std = np.array([0.26745098, 0.25568627, 0.27607843])
        self.norm = transforms.Normalize(self.norm_mean, self.norm_std)

        self.vn_mean = np.array([0.5, 0.5, 0.5])
        self.vn_std = np.array([0.5, 0.5, 0.5])


    def padding_feature(self, img, feature):
        # img:3 h w, feature:n 3 h' w'
        pad_height = img.shape[1] - feature.shape[2]
        pad_width = img.shape[2] - feature.shape[3]
        pad_top = pad_height // 2
        pad_bottom = pad_height - pad_top
        pad_left = pad_width // 2
        pad_right = pad_width - pad_left
        padded_feature = F.pad(feature, (pad_left, pad_right, pad_top, pad_bottom), mode='constant', value=0)
        return padded_feature
    
    def single_max_min_norm(self, tensor):
        return (tensor - tensor.min()) / (tensor.max() - tensor.min())
    
    def max_min_norm(self, input_tensor):
        num_hc, _, _, _ = input_tensor.shape
        max_vals, _ = input_tensor.view(num_hc, -1).max(dim=1, keepdim=True)
        min_vals, _ = input_tensor.view(num_hc, -1).min(dim=1, keepdim=True)
        return (input_tensor - min_vals.view(num_hc, 1, 1, 1)) / (max_vals.view(num_hc, 1, 1, 1) - min_vals.view(num_hc, 1, 1, 1))
    
    def vae_norm(self, tensor):
        vn_mean = torch.tensor(self.vn_mean, device=tensor.device, requires_grad=False, dtype=torch.float32)
        vn_std = torch.tensor(self.vn_std, device=tensor.device, requires_grad=False, dtype=torch.float32) 
        return (tensor - vn_mean.view(1,-1,1,1)) / vn_std.view(1,-1,1,1)
    
    def forward(self, x, temp):

        r = torch.zeros(len(temp),16,1,1).to(x.device)
        for index, item in enumerate(temp):
            r[index,item,:,:] = 1
        temp = torch.tensor(temp).to(x.device)
        # norm_mean = torch.tensor(self.norm_mean, device=x.device, requires_grad=False, dtype=torch.float32)
        # norm_std = torch.tensor(self.norm_std, device=x.device, requires_grad=False, dtype=torch.float32) 

        r = repeat(r,'n c h w -> n (c repeat) h w',repeat=4)
        out = self.lgn_ende(self.norm(x))*r
        out = self.lgn_ende.deconv(out)
        # out = out * norm_std.view(1,-1,1,1) + norm_mean.view(1,-1,1,1)
        out = self.max_min_norm(out)
        out = self.padding_feature(x, out) # n 3 h w
        # out = self.vae_norm(out)
        return out, temp
    
    def forward_infer(self, x, temp):
        r = torch.zeros(len(temp),16,1,1).to(x.device)
        for index, item in enumerate(temp):
            r[index,item,:,:] = 1
        temp = torch.tensor(temp).to(x.device)
        # norm_mean = torch.tensor(self.norm_mean, device=x.device, requires_grad=False, dtype=torch.float32)
        # norm_std = torch.tensor(self.norm_std, device=x.device, requires_grad=False, dtype=torch.float32) 

        r = repeat(r,'n c h w -> n (c repeat) h w',repeat=4)
        # import pdb;pdb.set_trace()
        out = self.lgn_ende(self.norm(x))*r
        out = self.lgn_ende.deconv(out)
        # out = out * norm_std.view(1,-1,1,1) + norm_mean.view(1,-1,1,1)
        out = self.max_min_norm(out)
        out = self.padding_feature(x, out) # n 3 h w
        # out = self.vae_norm(out)
        return out, temp
    

if __name__ == "__main__":
    hypercolumn = HyperColumnLGN().cuda().eval()
    x = Image.open("/share/project/dataset_raw/imagenet/val/n07753592/ILSVRC2012_val_00002187.JPEG")
    piltotensor = transforms.Compose([
                transforms.ToTensor(),  
            ])
    image_tensor = piltotensor(x).cuda() # (0,1)
    img, temp = hypercolumn(image_tensor) # (-2.254, 2.636)

    to_pil = transforms.ToPILImage()
    print(temp)
    for i in range(len(img)):
        print(img[i].min(), img[i].max())
        pil = img[i]
        pil = to_pil(pil)
        pil.save(f"./{temp[i]}.jpg")