import torch
import os
import argparse
import pandas as pd
import time, random
from tqdm import tqdm
import numpy as np
from pathlib import Path
from datasets import T7_Box_Test_Dataset, T7_Box_Dataset
from datasets.transformations import normalize_only
import platform
import yaml
from utils.helper import cm_analysis
from utils.metrics import  AverageMeter, accuracy
import models as cls_models

import logging
logger = logging.getLogger('ClsModel.Test')
logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s',
                    level=logging.DEBUG)


class Cls_Test(object):
    '''This class takes care of training and validation of cls model'''
    def __init__(self, dataloader, model, model_name, out_dir, ymap, batch_size, save_pred_file, is_cuda):
        self.batch_size = batch_size
        self.out_dir = out_dir
        
        self.net = model
        self.dataloader = dataloader
        self.model_name = model_name
        self.ymap = ymap
        self.is_cuda = is_cuda
        # self.save_pred_file = save_pred_file

        self.acc1s = 0
        self.acc2s = 0
        self.auc_ovr = 0
        self.auc_ovo = 0

    def forward(self, images, targets):
        # loss_val = 0
        if self.is_cuda:
            images = images.cuda(non_blocking=True)
            for j in range(len(targets)):
                targets[j] = targets[j].cuda(non_blocking=True)

        with torch.no_grad():
            outputs = self.net(images)
            # if 
            #     classification_vector = self.net.classification_vector
            # else:
            classification_vector = None
        # losses = self.criterion(outputs, targets[0])
        # loss_val = losses.item()

        return None, outputs, None, classification_vector

    def iterate(self):
        # losses = AverageMeter('Loss', ":.4e")
        start = time.strftime("%H:%M:%S")
        running_loss = 0.0
        top1 = AverageMeter('Acc@1', ":6.2f")
        top2 = AverageMeter('Acc@1', ":6.2f")
        y_pred = []
        y_true = []
        img_paths = []
        out_probs = None
        out_classification_vector = None
        cx_all, cy_all, ws_all, hs_all = [], [], [], []
        for itr, (images, targets, _) in enumerate(tqdm(self.dataloader)):
            
            # cx_all.extend(cxs.tolist())
            # cy_all.extend(cys.tolist())
            # ws_all.extend(ws.tolist())
            # hs_all.extend(hs.tolist())

            _, outputs, _, classification_vector = self.forward(images, targets)
            target = targets[0].cpu()
            outputs = outputs.cpu().detach()
            acc1, acc2 = accuracy(outputs, target, topk=(1,2))
            _, preds = outputs.cpu().topk(1, 1, True, True)
            preds = torch.squeeze(preds.t())
            y_pred.extend(preds.numpy())
            y_true.extend(target.numpy())
            top1.update(acc1[0], images.size(0))
            top2.update(acc2[0], images.size(0))
            if out_probs is None:
                out_probs = torch.softmax(outputs, dim=1).numpy()
            else:
                out_probs = np.concatenate([out_probs, torch.softmax(outputs, dim=1).numpy()], axis=0)
            
            # if out_classification_vector is None:
            #     out_classification_vector = classification_vector.cpu().numpy()
            # else:
            #     out_classification_vector = np.concatenate([out_classification_vector, classification_vector.cpu().numpy()], axis=0)

        # print('out_classification_vector shape: ', out_classification_vector.shape)
        # exit()
        self.acc1s = np.squeeze(top1.avg.cpu().numpy())
        self.acc2s = np.squeeze(top2.avg.cpu().numpy())
        # self.auc_ovr = roc_auc_score(y_true, out_probs, multi_class='ovr')
        # self.auc_ovo = roc_auc_score(y_true, out_probs, multi_class='ovo')
        torch.cuda.empty_cache()
        return y_pred, y_true, np.max(out_probs, axis=1), out_classification_vector   # , cx_all, cy_all, ws_all, hs_all

    def start(self):
        y_pred, y_true, pred_probs, out_classification_vector = self.iterate()  #, cx_all, cy_all, ws_all, hs_all
        logger.info('model: %s, Val Acc-1: %.4f, Acc-2: %.4f, Auc-ovr: %.4f, Auc-ovo: %.4f.' % (
                self.model_name,
                self.acc1s,
                self.acc2s,
                self.auc_ovr,
                self.auc_ovo
                ))
        cm_analysis(y_true, y_pred, os.path.join(self.out_dir, '%s_confusion_matrix_lowest.png'%self.model_name),
                            np.arange(len(self.ymap.keys())), ymap=self.ymap)
        print()
        return self.acc1s, self.acc2s, self.auc_ovr, self.auc_ovo, y_pred, y_true, pred_probs, out_classification_vector  # , cx_all, cy_all, ws_all, hs_all


def parse_args():
    """
    Set args parameters
    """
    parser = argparse.ArgumentParser(description='Train a segmentation model.')
    parser.add_argument('--name', default=None,
                        help='Name of this experiment: (default: arch+timestamp)')
    parser.add_argument("--batch_size", type=int, default=120, help="batch size of input")
    parser.add_argument("--img_height", type=int, default=299, help="size of image height")
    parser.add_argument("--img_width", type=int, default=299, help="size of image width")
    parser.add_argument("--mean", nargs='+', default=[0.485, 0.456, 0.406], help="Define the mean for image normalization.")
    parser.add_argument("--std", nargs='+', default=[0.229, 0.224, 0.225], help="Define the std for image normalization.")
    parser.add_argument("--test_csv", type=str, default='/data2/autorepair/ruanzhifeng/autorepair_t7_10/code/adc_classification/datasets/csv/AA_T7/T6_7_charm_merged_val_1104.csv', help="Define the data location.")
    parser.add_argument("--path", type=str, default='/data2/autorepair/ruanzhifeng/autorepair_t7_10/code/adc_classification/ckpts/AA_T7/1104_110553_densenet161', help="The folder of models")
    parser.add_argument("--save_dir", type=str, default='./test_result/AA_T7', help="Define where to save model checkpoints.")
    parser.add_argument("--csv_name", type=str, default='val_classification.csv', help="")
    parser.add_argument("--vector_file_path", type=str, default='xxx.txt', help="")
    
    parser.add_argument("--save_pred_file", type=bool, default=True, help="Define whether to save pred file.")
    parser.add_argument("--seed", type=int, default=576, help="Define the seed.")

    return parser

def setup_seed(seed):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True

def get_model_file(path, suffix='pth'):
    for m in os.listdir(path):
        if m.endswith(suffix) and 'latest' in m:
            return m
    return None

def dict_reverse(memo):
    res = {}
    for key, val in memo.items():
        res[val] = key
    return res

def update_map(code2label, label2code, arr, codes):
    s = list(set(arr))
    n = len(code2label)
    for code in s:
        if code not in code2label:
            if len(codes) == 0:
                pass
            elif code not in codes:
                continue
            code2label[code] = n
            label2code[n] = code
            n += 1


def main():
    parser = parse_args()
    args = parser.parse_args()

    setup_seed(args.seed)

    os.environ["CUDA_VISIBLE_DEVICES"] = '0'

    args.mean = [float(x) for x in args.mean]
    args.std = [float(x) for x in args.std]

    logger.info('Loading the datasets ...')
    df_test = pd.read_csv(args.test_csv)

    df_test = df_test.drop_duplicates(subset=['image'], keep='first', inplace=False).reset_index(drop=True)
    logger.info('Datasets loaded done !!!')    
    kwargs = {'num_workers': 8, 'pin_memory': True} if (torch.cuda.is_available() and platform.system() == 'Linux') else {}

    config_path = os.path.join(args.path, 'config.yaml')
    if not os.path.exists(config_path):
        print('config.yaml not exist!')
    with open(config_path, 'r', encoding="utf-8") as f:
        next(f)
        file_data = f.read()
    config = yaml.load(file_data, Loader=yaml.FullLoader)
    model_param_file = get_model_file(args.path)
    if model_param_file is None:
        print('model file not exist!')
    model_dict = torch.load(os.path.join(args.path, model_param_file))
    label2code = model_dict['ymap']
    model_name = config['model']
    print('y_map: ', label2code)
    # if  model_name in ['swsl_resnext50_32x4d', 'swsl_resnext101_32x4d', 'wide_resnet50_2', 'ssl_resnext50_32x4d', 
    #     'wide_resnet50_2', 'seresnet50', 'seresnext50_32x4d', 'res2net50_48w_2s', 'resnest50d', 'legacy_seresnext50_32x4d', 'mobilenetv3_large_100']:
    #     model = timm.create_model(model_name, pretrained=False, num_classes=len(label2code))
    # # elif model_name.startswith('efficientnet'):
    # #     model = EfficientNet.from_pretrained(model_name)
    # #     feature = model._fc.in_features
    # #     model._fc = nn.Linear(in_features=feature, out_features=len(label2code), bias=True)
    # else:
    model = getattr(cls_models, model_name)(num_classes=len(label2code), pretrained=None)  # input_channels=3,
    
    model.load_state_dict(model_dict['state_dict'])
    model = model.cuda()
    model.eval()

    code2label = dict_reverse(label2code)
    # update_map(code2label, label2code, df_test['code'].values, args.codes)
    df_test = df_test.loc[df_test["code"].isin(code2label.keys())].copy().reset_index(drop=True)
    logger.info(f'The test set shape is {df_test.shape} .')

    test_dataset = T7_Box_Dataset(dataframe=df_test,
                                    code2label=code2label,
                                    img_size=(args.img_height,args.img_width),
                                    transform=normalize_only(img_size=(args.img_height,args.img_width),
                                                    mean=args.mean,
                                                    std=args.std)                                    
                                )

    # test_dataset = T7_Box_Test_Dataset(dataframe=df_test,
    #                             code2label=code2label,
    #                             img_size=(args.img_height,args.img_width),
    #                             transform=normalize_only(img_size=(args.img_height,args.img_width), mean=args.mean, std=args.std)
    #                             )
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, **kwargs)
    
    model_folder_name = str(Path(args.path).name)
    os.makedirs(args.save_dir, exist_ok=True)
    model_trainer = Cls_Test(test_loader, model, model_folder_name, args.save_dir, label2code, batch_size=args.batch_size, save_pred_file=args.save_pred_file, is_cuda=True)
    acc1s, acc2s, auc_ovr, auc_ovo, y_pred, y_true, pred_probs, out_classification_vector = model_trainer.start()  # , cx_all, cy_all, ws_all, hs_all
    if args.save_pred_file:
        res = pd.DataFrame()
        df_test['image_name'] = df_test['image'].apply(lambda x: x.split('/')[-1])
        res['img_path'] = df_test['image']
        res['img'] = df_test['image_name']
        res['code'] = list(map(lambda x:label2code[x], y_true))
        res['pred_code'] = list(map(lambda x:label2code[x], y_pred))
        res['prob'] = pred_probs
        res.to_csv(os.path.join(args.save_dir, args.csv_name), index=False)

        # res2 = pd.DataFrame(out_classification_vector)
        # res2['img'] = df_test['image_name']
        # res2['code'] = res['code'].copy()
        # res2['pred_code'] = res['pred_code'].copy()
        # res2.to_csv(os.path.join(args.save_dir, args.vector_file_path), index=False)


    print('model_name: ', model_folder_name)
    print('acc_1:%.1f, acc_2:%.1f, auc_ovo:%.1f, auc_ovr:%.1f\n'%(acc1s, acc2s, auc_ovr, auc_ovo))
    torch.cuda.empty_cache()


if __name__ == "__main__":
    main()
