import argparse
import os.path
import shutil
import logging
import cv2

import matplotlib.pyplot as plt
import torch
import numpy as np

from networks.net_factory import net_factory
from dataloaders.dataset import StickerDataset, StickerRandomCrop
from torch.utils.data import DataLoader
from torchvision import transforms
from utils.utils import accuracy, SCDD_eval_all, AverageMeter, create_visual_output, Evaluator
import torch.nn.functional as F
from sklearn.metrics import confusion_matrix


envpath = '/home/a409_home/anaconda3/envs/wenglean_pytorch/lib/python3.8/site-packages/cv2/qt/plugins/platforms'
os.environ['QT_QPA_PLATFORM_PLUGIN_PATH'] = envpath

parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
                    default='../data/SYSU-4Scene', help='Name of Experiment')
parser.add_argument('--exp', type=str,
                    default='SYSU-4Scene/Distillation', help='experiment_name')
parser.add_argument('--model', type=str,
                    default='cscdnet', help='model_name')
parser.add_argument('--num_classes', type=int,
                    default=7, help='output channel of network')
parser.add_argument('--test_pth', type=int,
                    default=20, help='model pth for test')
parser.add_argument('--step', type=int,
                    default=2, help='total numbers of the incremental steps')
parser.add_argument('--test_list', type=str, default="01",
                    help='sub_set3 to sub_set4')
parser.add_argument('--patch_size', type=list,
                    default=[256, 256], help='patch size of network input')
parser.add_argument('--iter_visual', type=int,
                    default=20, help='eache iter_visual save the predcition image')

def test(testloader, net, FLAGS, test_save_path, step_b):
    net.eval()
    torch.cuda.empty_cache()
    acc_meter = AverageMeter()

    # Change evaluator
    evaluator_cd = Evaluator(num_class=2)  # class1:change class2:not change
    evaluator_cd.reset()

    preds_all = []
    labels_all = []
    c_matrix = {'tn': 0, 'fp': 0, 'fn': 0, 'tp': 0}
    # tbar = tqdm(testloader, desc='\r')
    for i_batch, sampled_batch in enumerate(testloader):
        print(i_batch)
        image_A, image_B, label = sampled_batch['image'][0], sampled_batch['image'][1], sampled_batch['label']
        image_A, image_B, label = image_A.cuda(), image_B.cuda(), label.long().cuda()

        with torch.no_grad():
            output_cd, feature_map = net(image_A, image_B)

            image_A, image_B = image_A[0].cpu().detach().numpy(), image_B[0].cpu().detach().numpy()
            image_A, image_B = np.transpose(image_A, (1, 2, 0)), np.transpose(image_B, (1, 2, 0))

            '''Change mask visualize and metric
            '''
            # print('label_cd.shape, output_cd.shape:', label.shape, output_cd[0][0].shape) # [1, 512, 512], [1, 1, 512, 512]
            # output_pred = F.sigmoid(output_cd[0][0])
            # output_pred[output_pred > 0.5] = 1
            # output_pred[output_pred < 0.5] = 0

            _, output_cd = torch.max(output_cd[-1], 1)

            # tn, fp, fn, tp = confusion_matrix(label.data.cpu().numpy().flatten(),
            #                                   output_cd.cpu().flatten()).ravel()
            #
            # c_matrix['tn'] += tn
            # c_matrix['fp'] += fp
            # c_matrix['fn'] += fn
            # c_matrix['tp'] += tp
            evaluator_cd.add_batch(label, output_cd)
            mIoU_cd = evaluator_cd.Mean_Intersection_over_Union()
            Precision_cd = evaluator_cd.Precision()
            Recall_cd = evaluator_cd.Recall()
            F1_cd = evaluator_cd.F1()

            # CD visual
            label = label.cpu().detach().numpy()

            output_cd = output_cd.cpu().detach()
            # print('unique(output_cd)', torch.unique(output_cd))
            # print('----sigmoid------')
            # print('unique(F.sigmoid(output_cd))', torch.unique(F.sigmoid(output_cd)))
            # output_cd = F.sigmoid(output_cd) * 255
            # output_cd = F.sigmoid(output_cd)
            # output_cd[output_cd > 0.5] = 1
            # output_cd[output_cd < 0.5] = 0
            output_cd = output_cd * 255
            # print('unique(output_cd)', torch.unique(output_cd))

            # print('output_cd.shape, label.shape:', output_cd.shape, label.shape)
            output_cd, label = np.transpose(output_cd, (1, 2, 0)), np.transpose(label, (1, 2, 0))
            output_cd = cv2.cvtColor(output_cd.numpy().astype(np.uint8), cv2.COLOR_GRAY2RGB)

            label[label > 0] = 255
            output_cd, label = 255 - output_cd, 255 - label

            # print('label.shape:', label.shape, 'label.unique():', np.unique(label))
            label = np.concatenate((label, label, label), axis=-1)


        if i_batch % FLAGS.iter_visual == 0:
            fig = plt.figure()
            ax1 = fig.add_subplot(2, 2, 1)
            ax1.imshow(image_A / 255.)
            plt.title('The A', fontdict={'weight':'normal','size': 8})
            plt.xticks([])
            plt.yticks([])

            ax2 = fig.add_subplot(2, 2, 2)
            ax2.imshow(image_B / 255.)
            plt.title('The B', fontdict={'weight':'normal','size': 8})
            plt.xticks([])
            plt.yticks([])

            ax5 = fig.add_subplot(2, 2, 3)
            ax5.imshow(label)
            plt.title('The Ground Truth Mask', fontdict={'weight':'normal','size': 8})
            plt.xticks([])
            plt.yticks([])

            ax6 = fig.add_subplot(2, 2, 4)
            ax6.imshow(output_cd)
            plt.title('The Prediction Mask', fontdict={'weight':'normal','size': 8})
            plt.xticks([])
            plt.yticks([])


            plt.tight_layout()
            plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0, hspace=0.3)
            save_path = os.path.join(test_save_path, 'step'+str(step_b + 1)+'_iter'+str(FLAGS.test_pth)+'_'+str(i_batch)+'.png' )
            plt.savefig(save_path, dpi=300, bbox_inches='tight')
            print('Prediction saved!')

        '''Segmentation metric
        '''
        # score, mIoU_seg, Sek = SCDD_eval_all(preds_all, labels_all, FLAGS.num_classes)


    mean_metrics_cd = {}
    mean_metrics_cd['cd_mIoU'] = mIoU_cd
    mean_metrics_cd['cd_precisions'] = Precision_cd.data.cpu()
    mean_metrics_cd['cd_recalls'] = Recall_cd.data.cpu()
    mean_metrics_cd['cd_f1scores'] = F1_cd.data.cpu()

    logging.info("CD METRICS" + str(mean_metrics_cd))

    # tn, fp, fn, tp = c_matrix['tn'], c_matrix['fp'], c_matrix['fn'], c_matrix['tp']
    # P = tp / (tp + fp)
    # R = tp / (tp + fn)
    # F1 = 2 * P * R / (R + P)

    # logging.info('Precision: {}\nRecall: {}\nF1-Score: {}'.format(P, R, F1))

    return mean_metrics_cd



def Inference(FLAGS):
    # Load the parameters of the network
    snapshot_path = "../model/{}/{}".format(
        FLAGS.exp, FLAGS.model)
    test_save_path = "../model/{}/{}_predictions/".format(
        FLAGS.exp, FLAGS.model)
    if os.path.exists(test_save_path):
        shutil.rmtree(test_save_path)
    os.makedirs(test_save_path)

    logging.basicConfig(filename=test_save_path+"/log.txt", level=logging.INFO,
                        format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
    logging.info(str(FLAGS))


    '''Loop test for each step
    '''
    mode = ['sub_set1', 'sub_set2', 'sub_set3', 'sub_set4', 'val']  # FIXME: change the train set to validation set


    for step_b in range(len(FLAGS.test_list)):

        '''Load the model and parameters
        '''
        net = net_factory(net_type=FLAGS.model, in_chns=3, class_num=FLAGS.num_classes)
        save_model_path = os.path.join(snapshot_path, 'step' + str(FLAGS.step) + '_iter_{}.pth'.format(FLAGS.test_pth))  # TODO: 改成best_model
        print('save_model_path: ', save_model_path)
        if os.path.exists(save_model_path):
            net.load_state_dict(torch.load(save_model_path))
            net.eval()
            print("init weigh from {}".format(save_model_path))
        else:
            print('no loading')


        test_set = StickerDataset(base_dir=FLAGS.root_path, split="test", num=20, sub_set=mode[int(FLAGS.test_list[step_b])])
        testloader = DataLoader(test_set, batch_size=1, shuffle=False, num_workers=0)
        print('len(testloader)', len(testloader))
        test(testloader, net, FLAGS, test_save_path, step_b)
        # print('test data set: ', mode[int(FLAGS.test_list[step_b])], 'mean_metrics_cd:', mean_metrics_cd)




if __name__ == '__main__':
    FLAGS = parser.parse_args()
    Inference(FLAGS)

