import os
import sys  
sys.path.append("..")
import platform
import random
import numpy as np
import pandas as pd
import argparse
import time
import torch
import torch.nn as nn
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import precision_recall_curve
from scipy.ndimage import gaussian_filter
from skimage import morphology
from skimage.segmentation import mark_boundaries
from tqdm import tqdm
from READ_pytorch.datasets import MVTecDataset, CLASS_NAMES, OBJECT, TEXTURE
from READ_pytorch.datasets import centercrop_tranform, resize_transform_basic
from READ_pytorch.utils import time_file_str
from READ_pytorch.utils import GPUManager
from READ_pytorch.utils import visualize_loc_result
from READ_pytorch.ad_algorithm import PatchCore
import json
import gc
import matplotlib.pyplot as plt
import matplotlib
# import logging
# logger = logging.getLogger('ESAD.Train')


def parse_args():
    """
    Set args parameters
    """
    parser = argparse.ArgumentParser(description='PatchCore anomaly detection training.')
    parser.add_argument('--class_name', type=str, default='bottle')
    parser.add_argument('--backbone', type=str, choices=['resnet18', 'wide_resnet50_2'], default='wide_resnet50_2', help='Define backbone.')
    parser.add_argument('--data_dir', type=str, default='/data0/adc/Datasets/mvtec_anomaly_detection', help='Define the data dir')
    parser.add_argument("--save_dir", type=str, default='../ckpts', help="Define where to save model checkpoints.")
    parser.add_argument("--result_dir", type=str, default='../results', help="Define where to save test results.")
    parser.add_argument('--batch_size', type=int, default=8)
    parser.add_argument("--color", type=str, default='RGB', choices=['RGB', 'BGR', 'GRAY'], help="Define original color of training images")
    parser.add_argument("--mean", nargs='+', default=[0.485, 0.456, 0.406], help="Define the mean for image normalization.")
    parser.add_argument("--std", nargs='+', default=[0.229, 0.224, 0.225], help="Define the std for image normalization.")
    parser.add_argument("--gpu_num", type=int, default=1, help="Define how many gpus used to train this model")
    parser.add_argument("--train", action='store_true', help="Whether to train or not.")
    parser.add_argument("--img_size", type=int, default=256, help="Define the image size.")
    parser.add_argument("--crop_size", type=int, default=224, help="Define the crop size.")
    parser.add_argument("--pretrain", type=str, default=None, help="Location of the pretrained weights.")
    parser.add_argument('--experiment', default=None,
                        help='Experiment name (defult None).')
    parser.add_argument("--n_viz", type=int, default=30, help="num of viz results.")
    parser.add_argument('--corest_ratio', type=float, default=0.01, help='corest subsampling rate')
    args = parser.parse_args()

    return args

def main():
    args = parse_args()
    gpu_list = ",".join([str(x) for x in GPUManager().auto_choice(gpu_num=args.gpu_num)])
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_list)
    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)

    # args.input_channel = 1 if args.color == 'GRAY' else 3
    args.mean = [float(x) for x in args.mean]
    args.std = [float(x) for x in args.std]
    args.prefix = time_file_str()
    if args.class_name == 'all':
        DATASET = CLASS_NAMES
    else:
        if args.class_name in CLASS_NAMES:
            DATASET = [args.class_name]
        else:
            raise ValueError('dataset not exists')

    test_results = pd.DataFrame()
    result_out = os.path.join(args.result_dir, time.strftime("%m%d_%H%M", time.localtime(time.time())))

    for class_name in DATASET:
        test_cache = pd.DataFrame()
        # Init experiment
        if args.experiment == None:
            experi_name = '%s_%s' % (
                        'PatchCore',
                        class_name
                        )
        else:
            experi_name = args.experiment
        out_dir = os.path.abspath(os.path.join(args.save_dir, experi_name))
        if not os.path.exists(out_dir):
            os.makedirs(out_dir, exist_ok=True)

        class_transform = centercrop_tranform(img_size=args.img_size, 
                                                    crop_size=args.crop_size, 
                                                    mean=args.mean, 
                                                    std=args.std)
        train_dataset = MVTecDataset(data_path=args.data_dir, class_name=class_name, 
                                    is_train=True, resize=args.img_size, cropsize=args.crop_size,
                                    transform=class_transform, 
                                    length=None, img_color=args.color)
        test_dataset = MVTecDataset(data_path=args.data_dir, class_name=class_name, 
                                    is_train=False, resize=args.img_size, cropsize=args.crop_size,
                                    transform=class_transform, 
                                    length=None, img_color=args.color)
        kwargs = {'num_workers': 8, 'pin_memory': True} if torch.cuda.is_available() else {}
        # train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs)
        test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, **kwargs)
       
        model = PatchCore(backbone=args.backbone, corest_ratio=args.corest_ratio)
        if args.pretrain is not None:
            model.load_weights(args.pretrain)
        if args.train:
            print('Start to train!')
            model.train(train_dataset, out_dir, expect_fpr=0.05)

        scores = []
        test_imgs = []
        gt_list = []
        gt_mask_list = []
        
        for (x, y, mask) in tqdm(test_loader, '| feature extraction | test | %s |' % class_name):
            test_imgs.extend(x.cpu().detach().numpy())
            gt_list.extend(y.cpu().detach().numpy())
            gt_mask_list.extend(mask.cpu().detach().numpy())
            img_score, score = model.predict(x)
            scores.extend(score)

        torch.cuda.empty_cache()

        score_map = np.asarray(scores)
        # scores = np.asarray(scores)
        max_anomaly_score = score_map.max()
        min_anomaly_score = score_map.min()
        scores = (score_map - min_anomaly_score) / (max_anomaly_score - min_anomaly_score) # 


        # calculate image-level ROC AUC score
        img_scores = scores.reshape(scores.shape[0], -1).max(axis=1)
        # gt_list = np.asarray(gt_list)
        # gt_mask = np.asarray(gt_mask_list)
        gt_list = np.asarray(gt_list)
        fpr, tpr, _ = roc_curve(gt_list, img_scores)
        img_roc_auc = roc_auc_score(gt_list, img_scores)
        print('image ROCAUC: %.3f' % (img_roc_auc))

        # calculate per-pixel level ROCAUC
        gt_mask = np.asarray(gt_mask_list)
        precision, recall, thresholds = precision_recall_curve(gt_mask.flatten().astype('uint8'), scores.flatten())
        a = 2 * precision * recall
        b = precision + recall
        f1 = np.divide(a, b, out=np.zeros_like(a), where=b != 0)
        threshold = thresholds[np.argmax(f1)]
        print('Optimal thres', threshold)
        print('Estimate thres', model.seg_thres)
        # print('Estimate thres', model.seg_thres)
        fpr, tpr, _ = roc_curve(gt_mask.flatten().astype('uint8'), scores.flatten())
        per_pixel_rocauc = roc_auc_score(gt_mask.flatten().astype('uint8'), scores.flatten())
        print('pixel ROCAUC: %.3f' % (per_pixel_rocauc))
        test_cache['class'] = [class_name]
        test_cache['image ROCAUC'] = [img_roc_auc]
        test_cache['pixel ROCAUC'] = [per_pixel_rocauc]
        test_results = pd.concat([test_results, test_cache])
        plt.plot(fpr, tpr, label='%s ROCAUC: %.3f' % (args.class_name, per_pixel_rocauc))
        plt.legend(loc="lower right")
        save_dir = result_out + '/' + f'PatchCore_{class_name}' + '/' + 'pictures_{:.4f}'.format(
            threshold)
        os.makedirs(save_dir, exist_ok=True)
        plt.savefig(os.path.join(save_dir, class_name + '_roc_curve.png'), dpi=100)

        visualize_loc_result(args, test_imgs, gt_mask_list, scores, threshold, save_dir, class_name, args.n_viz)

        test_results.to_csv(os.path.join(result_out, 'test_results.csv'), index=False)

        # collect memory
        del model
        gc.collect()
        torch.cuda.empty_cache()
    
if __name__ == "__main__":
    main()