import sys

sys.path.append("..")
sys.path.append(".")
sys.path.append('..')

import numpy as np
import os
import torch
import torch.nn as nn
import torchvision
import torch.utils.data as Data
# import torchvision.datasets as dates
from torch.autograd import Variable
from torch.nn import functional as F
import utils.transforms as trans
import utils.utils as util
import layer.loss as ls
import utils.metric as mc
import shutil
# from net.deeplabv3plus import SiameseNet
from net.segnet import SiameseNet, FeatureResNet
from net.fusenet import fusenet
import cv2
import pickle

import torch
import layer.loss as ls
import layer.function as fun
from torch.autograd import Variable
import numpy as np



datasets = 'CD2014'
import cfgs.CD2014config as cfg
import dataset.CD2014 as dates



def check_dir(dir):
    if not os.path.exists(dir):
        os.makedirs(dir)

def set_base_learning_rate_for_multi_layer(model):

    params_dict = dict(model.named_parameters())
    for layer_name, layer_param in params_dict.items():
        if 'embedding_layer' not in layer_name:
            if 'weight' in layer_name:
                yield layer_param

def set_2x_learning_rate_for_multi_layer(model):

    params_dict = dict(model.named_parameters())
    for layer_name, layer_param in params_dict.items():
        if 'embedding_layer' not in layer_name:
            if 'bias' in layer_name:
                yield layer_param

def set_10x_learning_rate_for_multi_layer(model):

    params_dict = dict(model.named_parameters())
    for layer_name, layer_param in params_dict.items():
        if 'embedding_layer' in layer_name:
            if 'weight' in layer_name:
                yield layer_param

def set_20x_learning_rate_for_multi_layer(model):

    params_dict = dict(model.named_parameters())
    for layer_name, layer_param in params_dict.items():
        if 'embedding_layer' in layer_name:
            if 'bias' in layer_name:
                yield layer_param

def untransform(transform_img,mean_vector):

    transform_img = transform_img.transpose(1,2,0)
    transform_img += mean_vector
    transform_img = transform_img.astype(np.uint8)
    transform_img = transform_img[:,:,::-1]
    return transform_img

def various_distance(out_vec_t0, out_vec_t1,dist_flag):
    if dist_flag == 'l2':
        distance = F.pairwise_distance(out_vec_t0, out_vec_t1, p=2)
    if dist_flag == 'l1':
        distance = F.pairwise_distance(out_vec_t0, out_vec_t1, p=1)
    if dist_flag == 'cos':
        distance = 1 - F.cosine_similarity(out_vec_t0, out_vec_t1)
    return distance

def single_layer_similar_heatmap_visual(output_t0,output_t1,save_change_map_dir,epoch,filename,layer_flag,dist_flag):
    interp = nn.Upsample(size=[cfg.TRANSFROM_SCALES[1],cfg.TRANSFROM_SCALES[0]], mode='bilinear')
    n, c, h, w = output_t0.data.shape

    out_t0_, out_t1_ = output_t0.squeeze(0), output_t1.squeeze(0)

    out_t0_rz = torch.transpose(out_t0_.view(c, h * w), 1, 0)
    out_t1_rz = torch.transpose(out_t1_.view(c, h * w), 1, 0)

    distance = various_distance(out_t0_rz,out_t1_rz,dist_flag=dist_flag)
    similar_distance_map = distance.view(h,w).data.cpu().numpy()
    similar_distance_map_rz = interp(Variable(torch.from_numpy(similar_distance_map[np.newaxis, np.newaxis, :])))
    similar_dis_map_colorize = cv2.applyColorMap(np.uint8(255 * similar_distance_map_rz.data.cpu().numpy()[0][0]), cv2.COLORMAP_JET)
    save_change_map_dir_ = os.path.join(save_change_map_dir, 'epoch_' + str(epoch))
    check_dir(save_change_map_dir_)
    save_change_map_dir_layer = os.path.join(save_change_map_dir_,layer_flag)
    save_weight_fig_dir = os.path.join(save_change_map_dir_layer, filename)

    save_weight_fig_dir_1 = os.path.dirname(save_weight_fig_dir)

    check_dir(save_weight_fig_dir_1)

    save_weight_fig_dir_color = save_weight_fig_dir.replace('_color.jpg', '_gray.jpg')
    cv2.imwrite(save_weight_fig_dir_color, similar_dis_map_colorize)

    save_weight_fig_dir = save_weight_fig_dir.replace('.jpg', '_gray.jpg')
    print(save_weight_fig_dir)
    cv2.imwrite(save_weight_fig_dir, np.uint8(255 * similar_distance_map_rz.data.cpu().numpy()[0][0]))

    return similar_distance_map_rz.data.cpu().numpy()



def normFUSE(d):
    ma = torch.max(d)
    mi = torch.min(d)
    dn = (d-mi)/(ma-mi)
    return dn

def validate(net, val_dataloader,epoch,save_change_map_dir,save_roc_dir):
    net.eval()
    for batch_idx, batch in enumerate(val_dataloader):
        print('Processing pic %s' % batch_idx)
        inputs1, input2, targets, roi, filename, height, width = batch
        height, width, filename = height.numpy()[0], width.numpy()[0], filename[0]
        inputs1, input2, targets, roi = inputs1.cuda(), input2.cuda(), targets.cuda(), roi.cuda()
        inputs1, inputs2, targets, roi = Variable(inputs1, volatile=True), Variable(input2, volatile=True), Variable(targets), Variable(roi)

        fuse, out_middle, out_bottom, out_final = net(inputs1, inputs2)


        fuse = normFUSE(fuse) * roi.float()
        # fuse = fuse * roi.float()
        fuse_image = np.uint8(255 * fuse.data.cpu().numpy()[0][0])
        similar_fuse_map_colorize = cv2.applyColorMap(fuse_image,cv2.COLORMAP_JET)

        save_change_map_dir_ = os.path.join(save_change_map_dir, 'epoch_' + str(epoch))
        check_dir(save_change_map_dir_)
        save_change_map_dir_layer = os.path.join(save_change_map_dir_, 'fuse')
        check_dir(save_change_map_dir_)
        save_weight_fig_dir = os.path.join(save_change_map_dir_layer, filename)
        save_weight_fig_dir_1 = os.path.dirname(save_weight_fig_dir)

        check_dir(save_weight_fig_dir_1)

        save_weight_fig_dir_color = save_weight_fig_dir.replace('.jpg' , '_color.jpg')
        cv2.imwrite(save_weight_fig_dir_color , similar_fuse_map_colorize)

        save_weight_fig_dir = save_weight_fig_dir.replace('.jpg', '_gray.jpg')
        print("save_weight_fig_dir:", save_weight_fig_dir )
        cv2.imwrite(save_weight_fig_dir, fuse_image)


def main():
  torch.backends.cudnn.benchmark = True

  torch.cuda.empty_cache()
  #########  configs ###########
  best_metric = 0
  ######  load datasets ########
  train_transform_det = trans.Compose([
      trans.Scale(cfg.TRANSFROM_SCALES),
  ])
  val_transform_det = trans.Compose([
      trans.Scale(cfg.TRANSFROM_SCALES),
  ])
  val_data = dates.Dataset(cfg.VAL_DATA_PATH,cfg.VAL_LABEL_PATH,
                            cfg.VAL_TXT_PATH,'val',transform=True,
                            transform_med = val_transform_det)
  val_loader = Data.DataLoader(val_data, batch_size= 1,
                                shuffle= False, num_workers= 0, pin_memory= True, drop_last=True)
  ######  build  models ########
  pretrained_net = FeatureResNet()
  Siamese = SiameseNet(pretrained_net)
  model = fusenet(Siamese)
  checkpoint_path = cfg.TRAINED_BEST_PERFORMANCE_CKPT

  model.load_state_dict({k.replace('module.', ''): v for k, v in torch.load(checkpoint_path)['state_dict'].items()})
  print('validate begin.')

  model = model.cuda()
  ab_test_dir = os.path.join(cfg.SAVE_PRED_PATH,'contrastive_loss')
  check_dir(ab_test_dir)
  save_change_map_dir = os.path.join(ab_test_dir, 'changemaps/')
  check_dir(save_change_map_dir)
  save_valid_dir = os.path.join(ab_test_dir,'valid_imgs')
  check_dir(save_valid_dir)
  save_roc_dir = os.path.join(ab_test_dir,'roc')
  check_dir(save_change_map_dir),check_dir(save_valid_dir),check_dir(save_roc_dir)

  validate(model, val_loader, 0, cfg.SAVE_PATH, cfg.SAVE_PATH)
  print('Processing completed!')


if __name__ == '__main__':
   main()
