import argparse
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from tqdm import tqdm 
import networkx as nx
import cv2
import pickle

import torch
from torch.autograd import Variable
import torchvision.transforms as transforms
# ./
from linear_classifier_probe import LinearClassifierProbe,get_probe_error,train_probe_and_get_loss,valid_probe_and_get_loss
from saliency_map import fig2data
# ./housegan
sys.path.append("./housegan/")
from models import add_pool,Discriminator, Generator, compute_gradient_penalty
from floorplan_dataset_maps import FloorplanGraphDataset, floorplan_collate_fn
from utils import ID_COLOR, CLASS_ROM


#saliency
def batch_split_with_grad(mks,nds,eds,nd_to_sample,ed_to_sample):
    """
    Input:
        mks: n_nodes,32,32 
        nds: n_nodes,10
        eds: n_edges,3
        nd_to_sample: n_nodes
        ed_to_sample: n_edges
    Output:
        layouts: 32,channels,32,32  
        layouts_grads:32,32,32
        room_type: 32, channels
        nodes_grads: 32, channels
    """
    batch_size = nd_to_sample.max().item()+1
    layouts=[]
    layouts_grads = []
    nodes_types_onehot=[]
    nodes_grads = []
    edges=[]
    for i in range(batch_size):
        # nodes
        index_node = (nd_to_sample==i).nonzero(as_tuple=True)[0]
        layouts.append(mks[index_node])
        layouts_grads.append(mks.grad.data.abs()[index_node])
        nodes_types_onehot.append(nds[index_node])
        nodes_grads.append(nds.grad.data.abs()[index_node])
        # edges
        index_edge = (ed_to_sample==i).nonzero(as_tuple=True)[0]
        edges.append(eds[index_edge])
    return layouts, layouts_grads, nodes_types_onehot, nodes_grads, edges

def multichannel_layout_to_rgb_layout(layouts,nodes_types_onehot,scale=2):
    imgs=[]
    for layout,room_type in zip(layouts,nodes_types_onehot):
        layout = (layout+1.)/2.*255. # domain(0., 255.)
        type_id = torch.max(room_type,dim=1)[1]
        n_rooms = layout.shape[0]
        w,h = layout.shape[1:]
        img = np.zeros((w*scale,h*scale,3), dtype=np.uint8)
        drawings =[] 
        masks=[]
        areas = []
        #绘制各个channel
        for i in range(n_rooms):
            drawing = layout[i].unsqueeze(-1).repeat(1,1,3).detach().cpu().numpy() # 32x32x3
            color_string = ID_COLOR[type_id[i].item()+1]
            color_rgb = mcolors.to_rgb(color_string)
            drawing = drawing * np.array(color_rgb).reshape(1,1,-1)
            drawing_scaled = cv2.resize(drawing,(w*scale,h*scale))
            
            x_range = (layout[i].max(dim=1).values>0).nonzero(as_tuple=True)[0]
            x0,x1 = min(x_range),max(x_range)
            y_range = (layout[i].max(dim=0).values>0).nonzero(as_tuple=True)[0]
            y0,y1 = min(y_range),max(y_range)
            drawing_scaled[x0*2-1,y0*2-1:y1*2+1,:]=32
            drawing_scaled[x1*2+1,y0*2-1:y1*2+1,:]=32
            drawing_scaled[x0*2-1:x1*2+1,y0*2-1,:]=32
            drawing_scaled[x0*2-1:x1*2+1,y1*2+1,:]=32
            drawings.append(drawing_scaled)
            layout_i_scaled = cv2.resize(layout[i].detach().cpu().numpy().astype(np.uint8),(w*scale,h*scale))
            mask = layout_i_scaled>1.
            masks.append(mask)
            areas.append(mask.sum())
        #叠合channel，按面积从大到小的顺序
        for drawing, mask, area in sorted(zip(drawings,masks,areas),key=lambda x:x[2],reverse=True):
            img[mask]=drawing[mask]
        imgs.append(img)
    return imgs

def draw_graph_with_saliency(layouts, nodes_types_onehot, nodes_grads, edges,figsize=6):
    #draw graph with grad
    NODE_SIZE_RANGE=(200,5000)
    graph_drawings=[]
    global_node_index=0
    for batch_i in range(len(nodes_grads)): # batch
        layouts_sample = layouts[batch_i]
        w,h = layouts_sample.shape[-2:]

        saliency_nodes_sample = nodes_grads[batch_i].max(dim=-1).values
        size_nodes_sample = (saliency_nodes_sample-saliency_nodes_sample.min())/(saliency_nodes_sample.max()-saliency_nodes_sample.min())*(NODE_SIZE_RANGE[1]-NODE_SIZE_RANGE[0])+NODE_SIZE_RANGE[0]
        size_nodes_sample = size_nodes_sample.detach().cpu().numpy()

        nodes_types_onehot_sample = nodes_types_onehot[batch_i]
        type_ids_sample = torch.max(nodes_types_onehot_sample,dim=1)[1]
        roomnames_sample = [CLASS_ROM[i.item()+1] for i in type_ids_sample]
        color_nodes_sample = [mcolors.to_rgb(ID_COLOR[type_id.item()+1]) for type_id in type_ids_sample]
        edges_sample = edges[batch_i]

        fig = plt.figure(figsize=(figsize,figsize))
        G=nx.Graph()
        pos={}
        for room_id_sample in range(saliency_nodes_sample.shape[0]):
            #node_grad = nodes_grads_sample[room_id_sample]
            roomname = type_id = torch.max(nodes_types_onehot_sample,dim=1)[1]
            G.add_node(global_node_index,roomname=roomnames_sample[room_id_sample])
            
            layout = layouts_sample[room_id_sample]
            x_range = (layout.max(dim=0).values>0).nonzero(as_tuple=True)[0]
            xc = (min(x_range)+max(x_range))/2./w
            xc = (xc-0.5)*0.5+0.5
            xc = xc.detach().cpu().numpy()
            y_range = (layout.max(dim=1).values>0).nonzero(as_tuple=True)[0]
            yc = (min(y_range)+max(y_range))/2./h
            yc = (yc-0.5)*0.5+0.5
            yc = yc.detach().cpu().numpy()
            pos[global_node_index]=[xc,1-yc]

            global_node_index+=1

        for edge_id_sample in range(edges_sample.shape[0]):
            edge = edges_sample[edge_id_sample]
            edge_start = edge[0].item()
            edge_valid = edge[1].item()
            edge_end = edge[2].item()
            if edge_valid>0:
                G.add_edge(edge_start,edge_end)

        options = {
                "node_color": color_nodes_sample,#saliency_nodes_i,
                "node_size":size_nodes_sample,
                "edge_color": "#dddddd",
                "width": 4,
                "edge_cmap": plt.cm.Blues,
                "with_labels": False,
            }
        nx.draw(G,pos,**options)
        node_labels = nx.get_node_attributes(G, 'roomname')
        nx.draw_networkx_labels(G, pos,labels=node_labels,font_size=15)
        plt.margins(0.2)
        graph_drawings.append(fig2data(fig))
        plt.clf()
        plt.close()
    return graph_drawings


def show_saliency(layouts_imgs,layout_saliency,graph_drawings,nrow=32): # 20,3,128,128
    n_plots=3
    batch_size = layout_saliency.shape[0]

    fig = plt.figure(figsize=(batch_size//nrow*n_plots*4,4*nrow))
    for i in range(batch_size):
        ax_image = fig.add_subplot(nrow,batch_size//nrow*n_plots,n_plots*i+1)
        ax_image.imshow(layouts_imgs[i])
        ax_image.axis('off')
        ax_saliency = fig.add_subplot(nrow,batch_size//nrow*n_plots,n_plots*i+2)
        ax_saliency.imshow(layout_saliency[i],cmap='gray')
        ax_saliency.axis('off')
        ax_graphs = fig.add_subplot(nrow,batch_size//nrow*n_plots,n_plots*i+3)
        ax_graphs.imshow(graph_drawings[i])
        ax_graphs.axis('off')
        if i==0:
            ax_image.set_title('ground truth floorplan')
            ax_saliency.set_title('saliency map of pooled room layout')
            ax_graphs.set_title('saliency graph of room type')
    fig.tight_layout()
    return fig


# linear classifier probes
DISCRIMINATOR_MIDFEATURE_SIZE = {
    #"features_l1":8192,
    "features_cat":9216,
    "features_encoder":16384,
    "features_cmp_1":16384,
    "features_downsample_1":4096,
    "features_cmp_2":4096,
    "features_downsample_2":1024,
    "features_decoder":128,
    #"validity_global":1
}

def get_opt():
    parser = argparse.ArgumentParser()
    parser.add_argument("--n_epochs", type=int, default=1000000, help="number of epochs of training")
    parser.add_argument("--batch_size", type=int, default=32, help="size of the batches")
    parser.add_argument("--g_lr", type=float, default=0.0001, help="adam: learning rate")
    parser.add_argument("--d_lr", type=float, default=0.0001, help="adam: learning rate")
    parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
    parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of first order momentum of gradient")
    parser.add_argument("--n_cpu", type=int, default=8, help="number of cpu threads to use during batch generation")
    parser.add_argument("--latent_dim", type=int, default=128, help="dimensionality of the latent space")
    parser.add_argument("--img_size", type=int, default=32, help="size of each image dimension")
    parser.add_argument("--sample_interval", type=int, default=50000, help="interval between image sampling")
    parser.add_argument("--exp_folder", type=str, default='exp', help="destination folder")
    parser.add_argument("--n_critic", type=int, default=1, help="number of training steps for discriminator per iter")
    parser.add_argument("--target_set", type=str, default='A', help="which split to remove")
    opt = parser.parse_args(args=["--n_cpu","0"])
    return opt

def graph_add_pool(features_graph, nd_to_sample):
    features_graph = features_graph.view(features_graph.shape[0],-1)
    pool = add_pool(features_graph, nd_to_sample) # [32, 128]
    return pool

def extract_midlayers_features(discriminator, mks, given_nds, given_eds, nd_to_sample):
    x, given_y, given_w, nd_to_sample = mks, given_nds, given_eds, nd_to_sample

    x = x.view(-1, 1, 32, 32) # [296, 1, 32, 32]
    # include nodes
    features_l1 = discriminator.l1(given_y) 
    features_l1 = features_l1.view(-1, 8, 32, 32) # [296, 8, 32, 32]
    features_cat = torch.cat([x, features_l1], 1) # [296, 9, 32, 32]

    features_encoder = discriminator.encoder(features_cat) # [296, 16, 32, 32]
    features_cmp_1 = discriminator.cmp_1(features_encoder, given_w).view(-1, *features_encoder.shape[1:]) # [296, 16, 32, 32]
    features_downsample_1 = discriminator.downsample_1(features_cmp_1) # [296, 16, 16, 16]
    features_cmp_2 = discriminator.cmp_2(features_downsample_1, given_w).view(-1, *features_downsample_1.shape[1:]) # [296, 16, 16, 16]
    features_downsample_2 = discriminator.downsample_2(features_cmp_2) # [296, 16, 8, 8]
    features_decoder = discriminator.decoder(features_downsample_2.view(-1, features_downsample_2.shape[1], *features_downsample_2.shape[2:])) 
    features_decoder = features_decoder.view(-1, features_decoder.shape[1]) # [296, 128]
    # global loss
    x_g = graph_add_pool(features_decoder, nd_to_sample) # [32, 128]
    validity_global = discriminator.fc_layer_global(x_g) # [32, 1]
    return {
        #"features_l1":graph_add_pool(features_l1, nd_to_sample).detach(), #只包含十个onehot值的映射，无法与label对应
        "features_cat":graph_add_pool(features_cat, nd_to_sample).detach(),
        "features_encoder":graph_add_pool(features_encoder, nd_to_sample).detach(),
        "features_cmp_1":graph_add_pool(features_cmp_1, nd_to_sample).detach(),
        "features_downsample_1":graph_add_pool(features_downsample_1, nd_to_sample).detach(),
        "features_cmp_2":graph_add_pool(features_cmp_2, nd_to_sample).detach(),
        "features_downsample_2":graph_add_pool(features_downsample_2, nd_to_sample).detach(),
        "features_decoder":graph_add_pool(features_decoder, nd_to_sample).detach(),
        #"validity_global":validity_global.detach()
    }

def get_statistic(opt,dataloader,generator,discriminator,device):
    Tensor = torch.cuda.FloatTensor 
    mean =  {feature_name:None for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys()}
    std =  {feature_name:None for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys()}

    #get mean
    num_samples = {feature_name:0 for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys()}
    for batch in tqdm(dataloader):
        # Unpack batch
        mks, nds, eds, nd_to_sample, ed_to_sample = batch
        # Configure input
        real_mks = Variable(mks.type(Tensor)).to(device)
        given_nds = Variable(nds.type(Tensor)).to(device)
        given_eds = eds
        # Generate a batch of images
        z_shape = [real_mks.shape[0], opt.latent_dim]
        z = Variable(Tensor(np.random.normal(0, 1, tuple(z_shape)))).to(device)
        gen_mks = generator(z, given_nds, given_eds)
        midlayers_features = [extract_midlayers_features(discriminator, real_mks, given_nds, given_eds, nd_to_sample),extract_midlayers_features(discriminator, gen_mks, given_nds, given_eds, nd_to_sample)]
        for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys():
            batch_size = midlayers_features[0][feature_name].shape[0]
            if mean[feature_name] is not None:
                mean[feature_name] = (midlayers_features[0][feature_name].sum(axis=0)+midlayers_features[1][feature_name].sum(axis=0))/(num_samples[feature_name]+batch_size*2) \
                    + mean[feature_name]*(num_samples[feature_name]/(num_samples[feature_name]+batch_size*2))
            else:
                mean[feature_name] = (midlayers_features[0][feature_name].sum(axis=0)+midlayers_features[1][feature_name].sum(axis=0))/(batch_size*2)
            num_samples[feature_name] += batch_size*2

    # get std
    num_samples = {feature_name:0 for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys()}
    for batch in tqdm(dataloader):
        # Unpack batch
        mks, nds, eds, nd_to_sample, ed_to_sample = batch
        # Configure input
        real_mks = Variable(mks.type(Tensor)).to(device)
        given_nds = Variable(nds.type(Tensor)).to(device)
        given_eds = eds
        # Generate a batch of images
        z_shape = [real_mks.shape[0], opt.latent_dim]
        z = Variable(Tensor(np.random.normal(0, 1, tuple(z_shape)))).to(device)
        gen_mks = generator(z, given_nds, given_eds)
        midlayers_features = [extract_midlayers_features(discriminator, real_mks, given_nds, given_eds, nd_to_sample),extract_midlayers_features(discriminator, gen_mks, given_nds, given_eds, nd_to_sample)]
        for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys():
            batch_size = midlayers_features[0][feature_name].shape[0]
            if std[feature_name] is not None:
                std[feature_name] = (((midlayers_features[0][feature_name]-mean[feature_name])**2).sum(axis=0)+((midlayers_features[1][feature_name]-mean[feature_name])**2).sum(axis=0))/(num_samples[feature_name]+batch_size*2) \
                    + std[feature_name]*(num_samples[feature_name]/(num_samples[feature_name]+batch_size*2))
            else:
                std[feature_name] = (((midlayers_features[0][feature_name]-mean[feature_name])**2).sum(axis=0)+((midlayers_features[1][feature_name]-mean[feature_name])**2).sum(axis=0))/(batch_size*2)
            num_samples[feature_name] += batch_size*2
    for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys():
        std[feature_name] = std[feature_name]**0.5
    return mean, std

def train_probes(opt, generator, discriminator, fp_dataloader_train, fp_dataloader_val, \
    probes, probe_optimizers, n_epochs=1000,decrease_domain_length_in_valid = 10):
    os.makedirs("housegan_probe_training_result",exist_ok=True)
    device = torch.device("cuda:0")
    Tensor = torch.cuda.FloatTensor 
    loss_epochs_train = {feature_name:[] for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys()}
    loss_epochs_valid = {feature_name:[] for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys()}
    error_epochs = {feature_name:[] for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys()}
    probe_stop = {feature_name:n_epochs for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys()}
    # 计算 mean 和 std
    print("calculating mean and std")
    mean,std = get_statistic(opt,fp_dataloader_train,generator,discriminator,device)
    
    for epoch in range(n_epochs):
        # if early stop:
        early_stop=True
        for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys():
            early_stop &= probe_stop[feature_name]<epoch
        if early_stop:
            print(f"early stop at {epoch}")
            break

        # 训练一个 epoch，并计算 loss
        print("training...")
        for k in probes:
            probes[k].train()
        loss_batchs = {feature_name:[] for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys()} # 一个 batch 一个值
        for batch in tqdm(fp_dataloader_train):
            # Unpack batch
            mks, nds, eds, nd_to_sample, ed_to_sample = batch
            # Configure input
            real_mks = Variable(mks.type(Tensor)).to(device)
            given_nds = Variable(nds.type(Tensor)).to(device)
            given_eds = eds
            # Generate a batch of images
            z_shape = [real_mks.shape[0], opt.latent_dim]
            z = Variable(Tensor(np.random.normal(0, 1, tuple(z_shape)))).to(device)
            gen_mks = generator(z, given_nds, given_eds)
            midlayers_features = [extract_midlayers_features(discriminator, real_mks, given_nds, given_eds, nd_to_sample),extract_midlayers_features(discriminator, gen_mks, given_nds, given_eds, nd_to_sample)]
            midlayers_features[0] = {feature_name:(extracted_features-mean[feature_name])/std[feature_name] for feature_name, extracted_features in midlayers_features[0].items()}
            midlayers_features[1] = {feature_name:(extracted_features-mean[feature_name])/std[feature_name] for feature_name, extracted_features in midlayers_features[1].items()}
    
            for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys():
                loss = train_probe_and_get_loss(midlayers_features,probes,probe_optimizers,feature_name)
                """if probe_stop[feature_name]>=epoch:
                    loss = train_probe_and_get_loss(midlayers_features,probes,probe_optimizers,feature_name)
                else:
                    loss = loss_batchs[feature_name][-1] if len(loss_batchs[feature_name])>0 else loss_epochs[feature_name][-1]"""
                loss_batchs[feature_name].append(loss)
        # 记录当前 epoch 的 loss 平均值
        for k in loss_epochs_train:
            loss_epochs_train[k].append(np.array(loss_batchs[k]).mean())

        # 验证，计算当前 epoch 的 error，并确定是否 early stop
        print("validating...")
        for k in probes:
            probes[k].eval()
        #error_probes = validate_probes(opt, generator,discriminator,fp_dataloader_val,probes)
        loss_batchs = {feature_name:[] for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys()}
        error_batchs = {feature_name:[] for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys()}
        for batch_val in tqdm(fp_dataloader_val):
            # Unpack batch
            mks, nds, eds, nd_to_sample, ed_to_sample = batch_val
            # Configure input
            real_mks = Variable(mks.type(Tensor)).to(device)
            given_nds = Variable(nds.type(Tensor)).to(device)
            given_eds = eds
            # Generate a batch of images
            z_shape = [real_mks.shape[0], opt.latent_dim]
            z = Variable(Tensor(np.random.normal(0, 1, tuple(z_shape)))).to(device)
            gen_mks = generator(z, given_nds, given_eds)
            midlayers_features = [extract_midlayers_features(discriminator, real_mks, given_nds, given_eds, nd_to_sample),extract_midlayers_features(discriminator, gen_mks, given_nds, given_eds, nd_to_sample)]
            midlayers_features[0] = {feature_name:(extracted_features-mean[feature_name])/std[feature_name] for feature_name, extracted_features in midlayers_features[0].items()}
            midlayers_features[1] = {feature_name:(extracted_features-mean[feature_name])/std[feature_name] for feature_name, extracted_features in midlayers_features[1].items()}
    
            for feature_name in DISCRIMINATOR_MIDFEATURE_SIZE.keys():
                loss = valid_probe_and_get_loss(midlayers_features,probes,feature_name)
                loss_batchs[feature_name].append(loss)
                error = get_probe_error(midlayers_features,probes,feature_name)
                error_batchs[feature_name].append(error)
        # 记录当前 epoch 的 error 平均值   
        for k in error_epochs:
            error_epochs[k].append(np.array(error_batchs[k]).mean())
        for k in loss_epochs_train:
            loss_epochs_valid[k].append(np.array(loss_batchs[k]).mean())

        # 判断是否 early stop
        checking_term = loss_epochs_valid
        for k in DISCRIMINATOR_MIDFEATURE_SIZE.keys():
            if (len(checking_term[k]) >= decrease_domain_length_in_valid \
                and checking_term[k][-1*decrease_domain_length_in_valid]<= min(checking_term[k][-1*decrease_domain_length_in_valid+1:]) \
                and probe_stop[k]==n_epochs):
                probe_stop[k]=epoch

        # 打印信息
        print(f"epoch {epoch}:")
        template = "{0:18}\n\rmean loss train: {1:16}\tmean loss valid: {2:16}\tmean error: {3:16}\tprob stop: {4:5}"
        for k in DISCRIMINATOR_MIDFEATURE_SIZE.keys():
            print(template.format(k.split("features_")[-1],loss_epochs_train[k][-1],loss_epochs_valid[k][-1],error_epochs[k][-1],probe_stop[k]))

        if epoch%50==0:
            with open("./housegan_probe_training_result/probe_loss_and_error_epoch_{}.pkl".format(str(epoch)),"wb") as f:
                pickle.dump([loss_epochs_train, loss_epochs_valid, error_epochs, probe_stop],f)
    return loss_epochs_train, loss_epochs_valid, error_epochs, probe_stop


if __name__=="__main__": # python housegan_utils.py --n_cpu 8
    pass