import sys  
import os
import gc
import argparse
import time
import pandas as pd
import numpy as np
import torch
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import precision_recall_curve
from tqdm import tqdm
from anomaly.datasets import MVTecDataset_RE ,CLASS_NAMES, OBJECT
from anomaly.datasets import resize_transform_basic, rotationflip_transform
from anomaly.utils import time_file_str
from anomaly.utils import visualize_loc_result
from anomaly.utils import GPUManager
from anomaly.models import FAVAE
import platform
import matplotlib.pyplot as plt
import logging

from anomaly.utils.metrics import estimate_thr_recall, cal_confusion_matrix

logger = logging.getLogger('favae.Train')


def parse_args():
    """
    Set args parameters
    """
    parser = argparse.ArgumentParser(description='RIAD anomaly detection training.')
    parser.add_argument('--class_name', type=str, default='bottle')
    parser.add_argument('--data_dir', type=str, default='../../_DATASET/mvtec', help='Define the data dir')
    parser.add_argument("--weights_dir", type=str, default='../../_Weights/anomaly_lab/favae/weights', help="Define where to save model checkpoints.")
    parser.add_argument('--epochs', type=int, default=300, help='maximum training epochs')
    parser.add_argument('--batch_size', type=int, default=64)
    parser.add_argument('--validation_ratio', type=float, default=0.2)   # ?
    # parser.add_argument('--img_size', type=int, default=256)
    parser.add_argument('--z_dim', type=int, default=100, help="Define the hidden state length.")
    parser.add_argument("--color", type=str, default='RGB', choices=['RGB', 'BGR', 'GRAY'], help="Define original color of training images")
    parser.add_argument("--mean", nargs='+', default=[0, 0, 0], help="Define the mean for image normalization.")
    parser.add_argument("--std", nargs='+', default=[1, 1, 1], help="Define the std for image normalization.")
    parser.add_argument('--lr', type=float, default=1e-3, help='learning rate of Adam')
    parser.add_argument('--kld_weight', type=float, default=1.0)
    parser.add_argument("--optimizer", type=str, default='adam', choices=['adam', 'sgd', 'adabelief', 'radam'], help="Define optimizer.")
    parser.add_argument("--gpu_num", type=int, default=1, help="Define how many gpus used to train this model")
    parser.add_argument("--scheduler", type=str, default='cosine', choices=['step', 'cosine', None], help="Define scheduler.")
    parser.add_argument("--train", action='store_true', help="Whether to train or not.")
    parser.add_argument("--pretrain", type=str, default=None, help="Location of the pretrained weights.")   # weights
    parser.add_argument('--experiment', default=None,
                        help='Experiment name (defult None).')
    parser.add_argument('--result_dir', default='/home/ops/anomaly_lab/favae/result',
                        help='Experiment name (defult None).')  # 性能预测图，各个指标图，正负召回，误判等
    parser.add_argument("--n_viz", type=int, default=50, help="num of viz results.")  # 可视化最好等和最差等
    args = parser.parse_args()

    return args


def main():
    args = parse_args()
    gpu_list = ",".join([str(x) for x in GPUManager().auto_choice(gpu_num=args.gpu_num)])
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_list)

    if not os.path.exists(args.weights_dir):
        os.makedirs(args.weights_dir)

    # args.p_crop = 1 if args.crop_size != args.img_resize else 0
    args.input_channel = 1 if args.color == 'GRAY' else 3
    args.mean = [float(x) for x in args.mean]
    args.std = [float(x) for x in args.std]
    args.prefix = time_file_str()
    if args.class_name == 'all':
        DATASET = CLASS_NAMES
    else:
        if args.class_name in CLASS_NAMES:
            DATASET = [args.class_name]
        else:
            raise ValueError('dataset not exists')

    test_results = pd.DataFrame()
    result_out = os.path.join(args.result_dir, time.strftime("%m%d_%H%M", time.localtime(time.time())))  # 指标图，增加时间，因训练模型
    for class_name in DATASET:
    # for class_name in ['transistor', 'wood', 'zipper']:
        # Init experiment
        test_cache = pd.DataFrame()
        if args.experiment == None:
            experi_name = '%s_%s' % (
                        'FAVAE',
                        class_name
                        )
        else:
            experi_name = args.experiment

        out_dir = os.path.abspath(os.path.join(args.weights_dir, experi_name))  # out_dir in train for weights

        if not os.path.exists(out_dir):
            os.makedirs(out_dir)

        if class_name in OBJECT:
            args.img_size = 128
            args.crop_size = 128
            # args.class_length = 10000
            rotation = 180
            border_mode = 0
            args.lr = 0.005
        else:
            args.img_size = 512   # 纹理更细致，对分辨率要求高
            args.crop_size = 128
            # args.class_length = 10000
            rotation = 15
            border_mode = 0
            args.lr = 0.001

        rotflip_transform = rotationflip_transform(img_size=(args.img_size,args.img_size),
                                                    mean=args.mean, 
                                                    std=args.std,
                                                    rotation=rotation,
                                                    border_mode=border_mode)
        resize_transform = resize_transform_basic(img_size=(args.img_size,args.img_size),
                                                    mean=args.mean, 
                                                    std=args.std)
        total_dataset = MVTecDataset_RE(data_path=args.data_dir, class_name=class_name, 
                                    is_train=True, resize=args.img_size, cropsize=args.img_size,
                                    transform=rotflip_transform, 
                                    length=None, img_color=args.color)
        train_data = total_dataset
        img_nums = len(total_dataset)
        valid_num = int(img_nums * args.validation_ratio)   # 分比例
        train_num = img_nums - valid_num
#         train_data, val_data = torch.utils.data.random_split(total_dataset, [train_num, valid_num], generator=torch.Generator().manual_seed(752))
        train_data_cat = train_data
        while len(train_data_cat) < 10000: # train on 10000 images
            train_data_cat = torch.utils.data.ConcatDataset([train_data_cat, train_data])
        test_data = MVTecDataset_RE(data_path=args.data_dir, class_name=class_name, 
                            is_train=False, resize=args.img_size, cropsize=args.img_size,
                            transform=resize_transform, 
                            length=None, img_color=args.color)
        loader_kwargs = {'num_workers': 0, 'pin_memory': True} if (torch.cuda.is_available() and platform.system() == 'Linux') else {}
        test_dataloader = torch.utils.data.DataLoader(test_data, batch_size=24 if args.class_name in OBJECT else 1, shuffle=False, **loader_kwargs)

        model = FAVAE(n_channel=args.input_channel, 
                        crop_size=args.crop_size, z_dim=args.z_dim)

        if args.pretrain is not None:
            model.load_weights(args.pretrain)
        if args.train:
            print('Start to train!')
            model.train(train_data_cat, save_path=out_dir, 
                        val_data=test_data, lr=args.lr, optimizer=args.optimizer,
                        scheduler=args.scheduler, batch_size=args.batch_size,
                        epochs=args.epochs)
#         else:
#             model.est_thres(test_data, expect_fpr=0.005)   # 评估正样本性能, 模型重构等能力

        ####### Start to Test #######
        scores = []
        test_imgs = []
        gt_list = []
        gt_mask_list = []
        gt_list_path = []
        for (data, label, mask, data_p) in tqdm(test_dataloader):
            test_imgs.extend(data.cpu().numpy())
            gt_list_path.append(data_p)
            gt_list.extend(label.cpu().numpy())
            gt_mask_list.extend(mask.cpu().numpy())
            img_score, score = model.predict(data)
            scores.extend(score)

        scores = np.asarray(scores)
        # max_anomaly_score = scores.max()
        # min_anomaly_score = scores.min()
        # scores = (scores - min_anomaly_score) / (max_anomaly_score - min_anomaly_score) # 


        # calculate image-level ROC AUC score
        img_scores = scores.reshape(scores.shape[0], -1).max(axis=1)
        # TODO img_scores is heat_map?
        gt_list = np.asarray(gt_list)
        # fpr, tpr, _ = roc_curve(gt_list, img_scores)
        img_roc_auc = roc_auc_score(gt_list, img_scores)
        print('image ROCAUC: %.3f' % (img_roc_auc))
        # 增加
        best_f1_v = estimate_thr_recall(gt_list, img_score, draw=False)
        noraml_recall, abnormal_recall, precision_img, false_p, false_n = cal_confusion_matrix(gt_list, img_score, best_f1_v, gt_list_path)

        # 增加正负样本召回 
        test_cache["false_p"] = false_p
        test_cache["false_n"] = false_n
        test_cache["noraml_recall"] = [noraml_recall]
        test_cache["abnormal_recall"] = [abnormal_recall]
        test_cache["precision_img"] = [precision_img]

        # calculate per-pixel level ROCAUC
        gt_mask = np.asarray(gt_mask_list)
        precision, recall, thresholds = precision_recall_curve(gt_mask.flatten().astype('uint8'), scores.flatten())
        a = 2 * precision * recall
        b = precision + recall
        f1 = np.divide(a, b, out=np.zeros_like(a), where=b != 0)
        threshold = thresholds[np.argmax(f1)]
        print('Optimal thres', threshold)
        print('Estimate seg thres', model.seg_thres)

        fpr, tpr, _ = roc_curve(gt_mask.flatten().astype('uint8'), scores.flatten())
        per_pixel_rocauc = roc_auc_score(gt_mask.flatten().astype('uint8'), scores.flatten())
        print('pixel ROCAUC: %.3f' % (per_pixel_rocauc))
        test_cache['class'] = [class_name]
        test_cache['image ROCAUC'] = [img_roc_auc]
        test_cache['pixel ROCAUC'] = [per_pixel_rocauc]

        test_results = pd.concat([test_results, test_cache])   # Panda格式
        plt.plot(fpr, tpr, label='%s ROCAUC: %.3f' % (args.class_name, per_pixel_rocauc))
        plt.legend(loc="lower right")
        save_dir = result_out + '/' + f'FAVAE_{class_name}' + '/' + 'pictures_{:.4f}'.format(
            threshold)
        os.makedirs(save_dir, exist_ok=True)
        plt.savefig(os.path.join(save_dir, class_name + '_roc_curve.png'), dpi=100)

        visualize_loc_result(args, test_imgs, gt_list_path, gt_mask_list, scores, threshold, save_dir, class_name, args.n_viz)

        test_results.to_csv(os.path.join(result_out, 'test_results.csv'), index=False)

        # collect memory
        del model
        gc.collect()
        torch.cuda.empty_cache()


if __name__ == "__main__":
    main()
