import argparse
import os.path
import shutil
import logging
import cv2

import matplotlib.pyplot as plt
import torch
import numpy as np

from networks.net_factory import net_factory
from dataloaders.dataset import BaseDataset, RandomCrop
from torch.utils.data import DataLoader
from torchvision import transforms
from utils.utils import accuracy, SCDD_eval_all, AverageMeter, create_visual_output, Evaluator
import torch.nn.functional as F

parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
                    default='../data/Simple', help='Name of Experiment')
parser.add_argument('--exp', type=str,
                    default='Simple/Distillation', help='experiment_name')
parser.add_argument('--model', type=str,
                    default='fcnet', help='model_name')
parser.add_argument('--num_classes', type=int,
                    default=7, help='output channel of network')
parser.add_argument('--test_pth', type=int,
                    default=500, help='model pth for test')
parser.add_argument('--step', type=int,
                    default=2, help='total numbers of the incremental steps')
parser.add_argument('--patch_size', type=list,
                    default=[512, 512], help='patch size of network input')
parser.add_argument('--iter_visual', type=int,
                    default=2, help='eache iter_visual save the predcition image')

def test(testloader, net, FLAGS, test_save_path, step_b):
    net.eval()
    torch.cuda.empty_cache()
    acc_meter = AverageMeter()

    # Change evaluator
    evaluator_cd = Evaluator(num_class=2)  # class1:change class2:not change
    evaluator_cd.reset()

    preds_all = []
    labels_all = []
    for i_batch, sampled_batch in enumerate(testloader):
        print(i_batch)
        image_A, image_B, label_A, label_B = sampled_batch['image_A'], sampled_batch['image_B'], sampled_batch[
            'label_A'], sampled_batch['label_B']
        image_A, image_B, label_A, label_B = image_A.cuda(), image_B.cuda(), label_A.cuda(), label_B.cuda()


        with torch.no_grad():
            output_cd, output_A, output_B = net(image_A, image_B)

            # Segmentation
            labels_A, labels_B = label_A.cpu().detach().numpy(), label_B.cpu().detach().numpy()
            output_A, output_B = output_A.cpu().detach(), output_B.cpu().detach()

            pred_A = torch.argmax(output_A, dim=1)
            pred_B = torch.argmax(output_B, dim=1)
            preds_A = pred_A.numpy()
            preds_B = pred_B.numpy()

            '''Change mask visualize and metric
            '''
            label_cd = (label_A == label_B).float().cuda()
            label_cd = torch.where(label_cd == True, 255, 0)
            # print('label_cd.shape, output_cd.shape:', label_cd.shape, output_cd.shape) [1, 512, 512], [1, 1, 512, 512]
            evaluator_cd.add_batch(label_cd, output_cd[0])
            mIoU_cd = evaluator_cd.Mean_Intersection_over_Union()
            Precision_cd = evaluator_cd.Precision()
            Recall_cd = evaluator_cd.Recall()
            F1_cd = evaluator_cd.F1()

            # CD visual
            label_cd = (label_A == label_B).float().cuda()
            label_cd = torch.where(label_cd == True, 255, 0)
            label_cd = label_cd.cpu().detach().numpy()

            output_cd = output_cd[0].cpu().detach()
            output_cd = F.sigmoid(output_cd) * 255


            output_cd, label_cd = np.transpose(output_cd, (1, 2, 0)), np.transpose(label_cd, (1, 2, 0))
            output_cd = cv2.cvtColor(output_cd.numpy().astype(np.uint8), cv2.COLOR_GRAY2RGB)
            output_cd, label_cd = 255 - output_cd, 255 - label_cd

            label_cd = np.concatenate((label_cd, label_cd, label_cd), axis=-1)

        for (pred_A, pred_B, label_A, label_B) in zip(preds_A, preds_B, labels_A, labels_B):
            acc_A, valid_sum_A = accuracy(pred_A, label_A)
            acc_B, valid_sum_B = accuracy(pred_B, label_B)
            preds_all.append(pred_A)
            preds_all.append(pred_B)
            labels_all.append(label_A)
            labels_all.append(label_B)
            acc = (acc_A + acc_B)*0.5
            acc_meter.update(acc)

        if i_batch % FLAGS.iter_visual == 0:
            pred_A_color = create_visual_output(preds_A[0])
            pred_B_color = create_visual_output(preds_B[0])
            label_A_color = create_visual_output(labels_A[0])
            label_B_color = create_visual_output(labels_B[0])

            fig = plt.figure()
            ax1 = fig.add_subplot(3, 2, 1)
            ax1.imshow(label_A_color)
            plt.title('The Ground Truth A', fontdict={'weight':'normal','size': 8})
            plt.xticks([])
            plt.yticks([])

            ax2 = fig.add_subplot(3, 2, 2)
            ax2.imshow(pred_A_color)
            plt.title('The Predition A', fontdict={'weight':'normal','size': 8})
            plt.xticks([])
            plt.yticks([])

            ax3 = fig.add_subplot(3, 2, 3)
            ax3.imshow(label_B_color)
            plt.title('The Ground Truth B', fontdict={'weight':'normal','size': 8})
            plt.xticks([])
            plt.yticks([])

            ax4 = fig.add_subplot(3, 2, 4)
            ax4.imshow(pred_B_color)
            plt.title('The Prediction B', fontdict={'weight':'normal','size': 8})
            plt.xticks([])
            plt.yticks([])

            ax5 = fig.add_subplot(3, 2, 5)
            ax5.imshow(label_cd)
            plt.title('The Ground Truth Mask', fontdict={'weight':'normal','size': 8})
            plt.xticks([])
            plt.yticks([])

            ax6 = fig.add_subplot(3, 2, 6)
            ax6.imshow(output_cd)
            plt.title('The Prediction Mask', fontdict={'weight':'normal','size': 8})
            plt.xticks([])
            plt.yticks([])

            plt.tight_layout()
            # plt.show()
            plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0, hspace=0.3)
            save_path = os.path.join(test_save_path, 'step'+str(step_b+1)+'_iter'+str(FLAGS.test_pth)+'_'+str(i_batch)+'.png' )
            plt.savefig(save_path, dpi=300, bbox_inches='tight')
            print('Prediction saved!')

        '''Segmentation metric
        '''
        score, mIoU_seg, Sek = SCDD_eval_all(preds_all, labels_all, FLAGS.num_classes)



    mean_metrics_seg = {}
    mean_metrics_seg['seg_score'] = (score/len(testloader))*100
    mean_metrics_seg['mIoU_seg'] = (mIoU_seg/len(testloader))*100
    mean_metrics_seg['Sek_seg'] = (Sek/len(testloader))*100
    mean_metrics_seg['acc_seg'] = (acc_meter.avg)*100

    mean_metrics_cd = {}
    mean_metrics_cd['cd_mIoU'] = mIoU_cd.data
    mean_metrics_cd['cd_precisions'] = Precision_cd.data.cpu()
    mean_metrics_cd['cd_recalls'] = Recall_cd.data.cpu()
    mean_metrics_cd['cd_f1scores'] = F1_cd.data.cpu()

    logging.info("CD METRICS" + str(mean_metrics_cd))
    logging.info("SEG METRICS" + str(mean_metrics_seg))


    return mean_metrics_seg, mean_metrics_cd



def Inference(FLAGS):
    # Load the parameters of the network
    snapshot_path = "../model/{}/{}".format(
        FLAGS.exp, FLAGS.model)
    test_save_path = "../model/{}/{}_predictions/".format(
        FLAGS.exp, FLAGS.model)
    if os.path.exists(test_save_path):
        shutil.rmtree(test_save_path)
    os.makedirs(test_save_path)

    logging.basicConfig(filename=test_save_path+"/log.txt", level=logging.INFO,
                        format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
    logging.info(str(FLAGS))


    '''Loop test for each step
    '''
    mode = ['set0', 'sub_set1', 'sub_set2', 'sub_set3', 'val']  # FIXME: change the train set to validation set
    for step_b in range(FLAGS.step):

        '''Load the model and parameters
        '''
        net = net_factory(net_type=FLAGS.model, in_chns=3, class_num=FLAGS.num_classes)
        save_model_path = os.path.join(snapshot_path, 'step' + str(2) + '_iter_{}.pth'.format(FLAGS.test_pth))  # TODO: 改成best_model
        print('save_model_path: ', save_model_path)
        if os.path.exists(save_model_path):
            net.load_state_dict(torch.load(save_model_path))
            net.eval()
            print("init weigh from {}".format(save_model_path))
        else:
            print('no loading')


        test_set = BaseDataset(base_dir=FLAGS.root_path, split="train", num=20, sub_set=mode[step_b + 1], transform=transforms.Compose([
            RandomCrop(FLAGS.patch_size)
        ]))
        testloader = DataLoader(test_set, batch_size=1, shuffle=False, num_workers=0)
        print('len(testloader)', len(testloader))
        mean_metrics_seg, mean_metrics_cd = test(testloader, net, FLAGS, test_save_path, step_b)
        print('test data set: ', mode[step_b + 1], 'mean_metrics_seg:', mean_metrics_seg, 'mean_metrics_cd:', mean_metrics_cd)




if __name__ == '__main__':
    FLAGS = parser.parse_args()
    Inference(FLAGS)

