import os
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import sys
import pickle


import torch
from torch.utils.data import DataLoader

# ./FloorplanGAN/
sys.path.append("./FloorplanGAN/")
from main import real_loss, fake_loss, setup
from dataset import generate_random_layout, wireframeDataset_Rplan
from models import Generator, WireframeDiscriminator, weight_init, renderer_g2v
from utils import bounds_check, get_figure, draw_table, negative_wh_check
# ./
from linear_classifier_probe import LinearClassifierProbe, train_probe_and_get_loss, get_probe_error
from saliency_map import fig2data


#saliency
color_table = {
    0: (210, 121, 98),  # Living room
    1: (238, 216, 98),  # Master room
    2: (83, 103, 52),  # Kitchen
    3: (118, 142, 168),  # Bathroom
    4: (82, 79, 115),  # Dining room # Second room
    5: (227, 152, 68),  # Child room  # Balcony
    6: (145, 177, 101),  # Study room
    7: (59, 105, 138),  # Second room
    8: (36, 35, 42),  # Guest room
    9: (221, 209, 212),  # Balcony
}

room_types = [
    "Living room",
    "Master room",
    "Kitchen",
    "Bathroom",
    "Dining room",
    "Child room",
    "Study room",
    "Second room",
    "Guest room",
    "Balcony",
]

room_ids = [0,1,2,3,7,9]
color_table_filtered = {k:color_table[k] for k in room_ids}
room_types_filtered = [room_types[i] for i in room_ids]

def get_TLAR_table(floorplan_vector,grads=None): # B,6,10
    if not grads:
        grads = floorplan_vector.grad.data.abs() # B,6,10
    saliency_type = grads[:,:,:6].max(dim=-1).values #B,6
    saliency_position = grads[:,:,6:-2].max(dim=-1).values #B,6
    saliency_area = grads[:,:,-2] #B,6
    saliency_ratio = grads[:,:,-1] #B,6
    saliency_table = torch.stack([saliency_type,saliency_position,saliency_area,saliency_ratio],dim=-1) # B,6,4
    return saliency_table

def draw_TLAR_table(floorplan_vector,grads=None): # B,6,10
    """
    Draw saliency table for Type, Location, Area, Ratio
    """
    xticks = ["type","postion","area","ratio"]
    saliency_table = get_TLAR_table(floorplan_vector,grads=grads)
    #saliency_table_softmax = torch.nn.functional.softmax(saliency_table,dim=1).detach().cpu().numpy() #B,6,4
    #saliency_table_softmax_images = []
    saliency_table_normalize = (saliency_table-saliency_table.min(dim=1,keepdim=True)[0])/(saliency_table.max(dim=1,keepdim=True)[0]-saliency_table.min(dim=1,keepdim=True)[0]) #B,6,4
    saliency_table_normalize = saliency_table_normalize.detach().cpu().numpy() #B,6,4
    saliency_table_normalize_images = []
    for batch_i in range(saliency_table_normalize.shape[0]):
        table = saliency_table_normalize[batch_i]
        fig = plt.figure(figsize=(6,6))
        plt.imshow(table,cmap="gray")
        plt.xticks(ticks = range(saliency_table_normalize.shape[-1]),labels=xticks,fontsize=18,rotation=30)
        plt.yticks(ticks = range(saliency_table_normalize.shape[-2]),labels=room_types_filtered,fontsize=16)
        plt.margins(0)
        saliency_table_normalize_images.append(fig2data(fig))
        fig.clf()
        plt.close()
    return saliency_table_normalize_images

def multichannel_layout_to_rgb_layout(layouts):#B,W,H,C
    try:
        layouts = layouts.detach().cpu().numpy()
    except:
        pass
    layouts = layouts.transpose(0,3,1,2) #B,C,W,H
    batch_size = layouts.shape[0]
    num_channel = layouts.shape[1]
    layouts_imgs=[]
    for i in range(batch_size):
        img_stack = []
        for j in range(num_channel):
            #img = render[i,j,:,:].view(rendered_size,rendered_size,1).expand(rendered_size,rendered_size,3)
            img = np.tile(layouts[i, j, :, :, np.newaxis], (1, 1, 3)) #W,H,3
            img *= np.array(color_table[j])#3
            img = img.astype(np.uint8)
            img_stack.append(img)
        img_stack = np.array(img_stack)
        img_stack = np.max(img_stack, axis=0)
        layouts_imgs.append(img_stack)
    return layouts_imgs

def show_saliency(layouts_imgs,layout_saliency,saliency_table,nrow=16):
    n_plots=3
    batch_size = layout_saliency.shape[0]

    fig = plt.figure(figsize=(batch_size//nrow*n_plots*4,4*nrow))
    for i in range(batch_size):
        ax_image = fig.add_subplot(nrow,batch_size//nrow*n_plots,n_plots*i+1)
        ax_image.imshow(layouts_imgs[i])
        ax_image.axis('off')
            
        ax_saliency = fig.add_subplot(nrow,batch_size//nrow*n_plots,n_plots*i+2)
        ax_saliency.imshow(layout_saliency[i])
        ax_saliency.axis('off')
 
        ax_table = fig.add_subplot(nrow,batch_size//nrow*n_plots,n_plots*i+3)
        ax_table.imshow(saliency_table[i])
        ax_table.axis('off')
        if n_plots*i==0 :
            ax_image.set_title("visualizaion of input",fontsize=20)
            ax_saliency.set_title("saliency map",fontsize=20)
            ax_table.set_title("attributions' saliency",fontsize=20)
            
    fig.tight_layout()
    return fig

#linear classifier probes
DISCRIMINATOR_MIDFEATURE_SIZE = {
    "features_rendered":6*64*64,
    "features_cnn0_conv":64*32*32,
    "features_cnn0_bn":64*32*32,
    "features_cnn0_relu":64*32*32,
    "features_cnn1_conv":128*16*16,
    "features_cnn1_bn":128*16*16,
    "features_cnn1_relu":128*16*16,
    "features_cnn2_conv":256*8*8,
    "features_cnn2_bn":256*8*8,
    "features_cnn2_relu":256*8*8,
    "features_classifier_conv":1024*1*1,
    "features_classifier_bn":1024*1*1,
    "features_classifier_relu":1024*1*1,
}

def extract_midlayers_features(discriminator,input_data):
    discriminator.eval()
    feature_names = list(DISCRIMINATOR_MIDFEATURE_SIZE.keys())
    extracted_features = {}
    feature_id = 0

    rendered = discriminator.renderer.render(input_data)
    x = rendered
    extracted_features[feature_names[feature_id]] = x.detach().view(x.shape[0],-1)
    feature_id+=1
    for m in discriminator.cnn:
        for i in range(3):
            x = m[i](x)
            #print(x.shape)
            extracted_features[feature_names[feature_id]] = x.detach().view(x.shape[0],-1)
            feature_id+=1
    #reshaped = conved.reshape(batch_size,-1)
    for i in range(3):
        x = discriminator.classifier[i](x)
        #print(x.shape)
        extracted_features[feature_names[feature_id]] = x.detach().view(x.shape[0],-1)
        feature_id+=1
    x = discriminator.classifier[3](x)
    output = discriminator.classifier[4](x)
    return extracted_features

def train_probes(generator,discriminator,train_dataloader,valid_dataloader, \
    probes, probe_optimizers, n_epochs=1000,decrease_domain_length_in_valid=10):
    generator.eval()
    discriminator.eval()
    device = next(generator.parameters()).device
    os.makedirs("FloorplanGAN_probe_training_result",exist_ok=True)
    loss_epochs = {feature_name:[] for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys()} # 一个 epoch 一个值
    error_epochs = {feature_name:[] for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys()}
    probe_stop = {feature_name:n_epochs for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys()}
    
    for epoch in range(n_epochs):
        # if early stop:
        early_stop=True
        for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys():
            early_stop &= probe_stop[feature_name]<epoch
        if early_stop:
            print(f"early stop at {epoch}")
            break

        # 训练一个 epoch，并计算 loss
        print("training...")
        for k in probes:
            probes[k].train()
        loss_batchs = {feature_name:[] for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys()} # 一个 batch 一个值
        for real_images in tqdm(train_dataloader):
            real_images = [x.to(device) for x in real_images]
            random_images = generate_random_layout(train_dataloader.dataset, batch_size=real_images[0].shape[0])
            random_images = [torch.tensor(x).to(device) for x in random_images]
            fake_images = generator(random_images[0], random_images[1])
            midlayers_features = [extract_midlayers_features(discriminator, real_images[0]),extract_midlayers_features(discriminator, fake_images[0])]
            
            for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys():
                loss = train_probe_and_get_loss(midlayers_features,probes,probe_optimizers,feature_name)
                """if probe_stop[feature_name]>=epoch:
                    loss = train_probe_and_get_loss(midlayers_features,probes,probe_optimizers,feature_name)
                else:
                    loss = loss_batchs[feature_name][-1] if len(loss_batchs[feature_name])>0 else loss_epochs[feature_name][-1]"""
                loss_batchs[feature_name].append(loss)
        # 记录当前 epoch 的 loss 平均值
        for k in loss_epochs:
            loss_epochs[k].append(np.array(loss_batchs[k]).mean())
            
        # 验证，计算当前 epoch 的 error，并确定是否 early stop
        print("validating...")
        for k in probes:
            probes[k].eval()
        #error_probes = validate_probes(generator,discriminator,valid_dataloader,probes)
        error_batchs = {feature_name:[] for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys()}
        for real_images in tqdm(valid_dataloader):
            real_images = [x.to(device) for x in real_images]
            random_images = generate_random_layout(valid_dataloader.dataset, batch_size=real_images[0].shape[0])
            random_images = [torch.tensor(x).to(device) for x in random_images]
            fake_images = generator(random_images[0], random_images[1])
            midlayers_features = [extract_midlayers_features(discriminator, real_images[0]),extract_midlayers_features(discriminator, fake_images[0])]
            
            for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys():
                error = get_probe_error(midlayers_features,probes,feature_name)
                error_batchs[feature_name].append(error) 
        # 记录当前 epoch 的 error 平均值   
        for k in error_epochs:
            error_epochs[k].append(np.array(error_batchs[k]).mean())

        # 判断是否 early stop
        for k in DISCRIMINATOR_MIDFEATURE_SIZE.keys():
            if (len(loss_epochs[k]) >= decrease_domain_length_in_valid \
                and loss_epochs[k][-1*decrease_domain_length_in_valid]<= min(loss_epochs[k][-1*decrease_domain_length_in_valid+1:]) \
                and probe_stop[k]==n_epochs):
                probe_stop[k]=epoch
        
        # 打印信息
        print(f"epoch {epoch}:")
        template = "{0:18}\n\rmean loss: {1:16}\tmean error: {2:16}\tprob stop: {3:5}"
        for k in DISCRIMINATOR_MIDFEATURE_SIZE.keys():
            print(template.format(k.split("features_")[-1],loss_epochs[k][-1],error_epochs[k][-1],probe_stop[k]))

        if epoch%50==0:
            with open("./FloorplanGAN_probe_training_result/losses_and_errors_and_stops_before_epoch_{}.pkl".format(str(epoch)),"wb") as f:
                pickle.dump([loss_epochs, error_epochs, probe_stop],f)
    return loss_epochs, error_epochs, probe_stop
    