import sys

sys.path.append("..")
sys.path.append(".")
sys.path.append('..')

import numpy as np
import os
import torch
import torch.nn as nn
import torchvision
import torch.utils.data as Data
# import torchvision.datasets as dates
from torch.autograd import Variable
from torch.nn import functional as F
import utils.transforms as trans
import utils.utils as util
import layer.loss as ls
import utils.metric as mc
import shutil
import cv2

########################################################
import torch
import layer.loss as ls
import layer.function as fun
from torch.autograd import Variable
import numpy as np
import utils.utils as util
# from net.deeplabv3plus import SiameseNet
from net.segnet import SiameseNet, FeatureResNet
from net.fusenet import fusenet
from PIL import Image
##############################

datasets = 'CD2014'
import cfgs.CD2014config as cfg
import dataset.CD2014 as dates



def check_dir(dir):
    if not os.path.exists(dir):
        #os.mkdir(dir)
        os.makedirs(dir)

def set_base_learning_rate_for_multi_layer(model):

    params_dict = dict(model.named_parameters())
    for layer_name, layer_param in params_dict.items():
        if 'embedding_layer' not in layer_name:
            if 'weight' in layer_name:
                yield layer_param

def set_2x_learning_rate_for_multi_layer(model):

    params_dict = dict(model.named_parameters())
    for layer_name, layer_param in params_dict.items():
        if 'embedding_layer' not in layer_name:
            if 'bias' in layer_name:
                yield layer_param

def set_10x_learning_rate_for_multi_layer(model):

    params_dict = dict(model.named_parameters())
    for layer_name, layer_param in params_dict.items():
        if 'embedding_layer' in layer_name:
            if 'weight' in layer_name:
                yield layer_param

def set_20x_learning_rate_for_multi_layer(model):

    params_dict = dict(model.named_parameters())
    for layer_name, layer_param in params_dict.items():
        if 'embedding_layer' in layer_name:
            if 'bias' in layer_name:
                yield layer_param

def untransform(transform_img,mean_vector):
    transform_img = transform_img.transpose(1,2,0)
    transform_img += mean_vector
    transform_img = transform_img.astype(np.uint8)
    transform_img = transform_img[:,:,::-1]
    return transform_img

def various_distance(out_vec_t0, out_vec_t1,dist_flag):
    if dist_flag == 'l2':
        distance = F.pairwise_distance(out_vec_t0, out_vec_t1, p=2)
    if dist_flag == 'l1':
        distance = F.pairwise_distance(out_vec_t0, out_vec_t1, p=1)
    if dist_flag == 'cos':
        distance = 1 - F.cosine_similarity(out_vec_t0, out_vec_t1)
    return distance

def single_layer_similar_heatmap_visual(output_t0,output_t1,save_change_map_dir,epoch,filename,layer_flag,dist_flag):
    interp = nn.Upsample(size=[cfg.TRANSFROM_SCALES[1],cfg.TRANSFROM_SCALES[0]], mode='bilinear')
    n, c, h, w = output_t0.data.shape

    out_t0_, out_t1_ = output_t0.squeeze(0), output_t1.squeeze(0)
    out_t0_rz = torch.transpose(out_t0_.view(c, h * w), 1, 0)
    out_t1_rz = torch.transpose(out_t1_.view(c, h * w), 1, 0)

    distance = various_distance(out_t0_rz,out_t1_rz,dist_flag=dist_flag)
    similar_distance_map = distance.view(h,w).data.cpu().numpy()
    similar_distance_map_rz = interp(Variable(torch.from_numpy(similar_distance_map[np.newaxis, np.newaxis, :])))
    similar_dis_map_colorize = cv2.applyColorMap(np.uint8(255 * similar_distance_map_rz.data.cpu().numpy()[0][0]), cv2.COLORMAP_JET)
    save_change_map_dir_ = os.path.join(save_change_map_dir, 'epoch_' + str(epoch))
    check_dir(save_change_map_dir_)

    save_change_map_dir_layer = os.path.join(save_change_map_dir_,layer_flag)
    check_dir(save_change_map_dir_layer)
    save_weight_fig_dir = os.path.join(save_change_map_dir_layer, filename)

    save_weight_fig_dir_1 = os.path.dirname(save_weight_fig_dir)
    check_dir(save_weight_fig_dir_1)

    cv2.imwrite(save_weight_fig_dir, similar_dis_map_colorize)
    return similar_distance_map_rz.data.cpu().numpy()


def train():
  # 保证训练时获取的随机数都是一样的
  # init_seed = 1
  # torch.manual_seed(init_seed)
  # torch.cuda.manual_seed(init_seed)
  # np.random.seed(init_seed)  # 用于numpy的随机数

  torch.backends.cudnn.benchmark = False

  torch.cuda.empty_cache()
  train_transform_det = trans.Compose([
      trans.Scale(cfg.TRANSFROM_SCALES),
  ])
  val_transform_det = trans.Compose([
      trans.Scale(cfg.TRANSFROM_SCALES),
  ])
  train_data = dates.Dataset(cfg.TRAIN_DATA_PATH,cfg.TRAIN_LABEL_PATH,
                                cfg.TRAIN_TXT_PATH,'train',transform=True,
                                transform_med = train_transform_det)
  train_loader = Data.DataLoader(train_data,batch_size=cfg.BATCH_SIZE,
                                 shuffle= True, num_workers= 0, pin_memory= True, drop_last=True)

  pretrained_net = FeatureResNet()
  Siamese = SiameseNet(pretrained_net)
  model = fusenet(Siamese)
  checkpoint_path = cfg.TRAINED_BEST_PERFORMANCE_CKPT
  model.load_state_dict({k.replace('module.' , '') : v for k , v in torch.load(checkpoint_path)['state_dict'].items()})


  resume = 1
  print('TRAIN_TXT_PATH:', cfg.TRAIN_TXT_PATH)
  if resume:
      path = cfg.TRAINED_BEST_PERFORMANCE_CKPT
      model.load_state_dict({k.replace('module.', ''): v for k, v in torch.load(path)['state_dict'].items()})
      print('resume success')



  if torch.cuda.device_count() > 1:
      print("Let's use", torch.cuda.device_count(), "GPUs!")
      model = nn.DataParallel(model)

  model = model.cuda()
  FuseLoss = ls.MarginConstractiveLoss()

  ab_test_dir = os.path.join(cfg.SAVE_PRED_PATH,'contrastive_loss')
  check_dir(ab_test_dir)
  # save_change_map_dir = os.path.join(ab_test_dir, 'changemaps/')
  # save_valid_dir = os.path.join(ab_test_dir,'valid_imgs')
  # save_roc_dir = os.path.join(ab_test_dir,'roc')
  # check_dir(save_change_map_dir),check_dir(save_valid_dir),check_dir(save_roc_dir)

  params_dict = dict(model.named_parameters())
  for layer_name, layer_param in params_dict.items():
      if 'SiameseNet' in layer_name:
          layer_param.requires_grad = False
  params = filter(lambda p: p.requires_grad, model.parameters())
  optimizer = torch.optim.SGD(params, lr=cfg.INIT_LEARNING_RATE, momentum=cfg.MOMENTUM, weight_decay=cfg.DECAY)


  loss_total = 0
  best_loss = 0
  for epoch in range(2):
      loss_total = 0
      for batch_idx, batch in enumerate(train_loader):
             step = epoch * len(train_loader) + batch_idx
             util.adjust_learning_rate(cfg.INIT_LEARNING_RATE, optimizer, step)
             model.train()
             img1_idx, img2_idx, label_idx, roi_idx, filename, height, width = batch

             img1, img2, label, roi = Variable(img1_idx.cuda()), Variable(img2_idx.cuda()), Variable(
                 label_idx.cuda()), Variable(roi_idx.cuda())

             fuse, out_middle, out_bottom, out_final = model(img1, img2)

             print(fuse.data.cpu().numpy().shape)
             label_fuse, roi_fuse = util.resize_label_and_roi(label.data.cpu().numpy()[:,:,:,:,0], roi.data.cpu().numpy(), size=fuse.data.cpu().numpy().shape[2:])
             label_fuse, roi_fuse = Variable(label_fuse.cuda()), Variable(roi_fuse.cuda())

             BCEloss_fuse = FuseLoss(fuse, label_fuse, roi_fuse)
             loss = BCEloss_fuse

             loss_total += loss.data.cpu()
             optimizer.zero_grad()
             loss.backward()
             optimizer.step()

             print("Epoch [%d/%d] "
                   "fuse: %.4f" % (epoch, batch_idx,
                                   BCEloss_fuse))

      print('loss_total:  ', loss_total)
      if (epoch+1) % 10 == 0:
          torch.save({'state_dict': model.state_dict()},
                       os.path.join(ab_test_dir, 'model' + str(epoch) + '.pth'))

      if epoch == 0:
          best_loss = loss_total
          torch.save({'state_dict': model.state_dict()},
                     os.path.join(ab_test_dir, 'model' + str(epoch) + '.pth'))
          shutil.copy(os.path.join(ab_test_dir, 'model' + str(epoch) + '.pth'),
                      cfg.TRAINED_BEST_PERFORMANCE_CKPT)
      elif loss_total < best_loss:
          torch.save({'state_dict': model.state_dict()},
                      os.path.join(ab_test_dir, 'model' + str(epoch) + '.pth'))
          shutil.copy(os.path.join(ab_test_dir, 'model' + str(epoch) + '.pth'),
                      cfg.TRAINED_BEST_PERFORMANCE_CKPT)
          best_loss = loss_total

if __name__ == '__main__':
   train()
