import torch
from torch.nn import functional as F
import torch.nn as nn
import cv2
import os
from sklearn.metrics import roc_auc_score
from utils.metrics import cal_confusion_matrix, estimate_thr_recall
from sklearn.metrics import roc_auc_score, precision_recall_curve
from torchvision import transforms
from utils.embedding_utils import *
from torch import optim
from utils.schedulers import *
from utils.optims import *

class BaseModel(nn.Module):
    def __init__(self,cfg):
        super(BaseModel, self).__init__()

        self.cfg = cfg
        self.init_model()
        self.init_results_list()

            
    def forward(self,x):
        pass
    
    def init_model(self):
        pass
            
    def init_results_list(self):
        self.gt_list_px_lvl = []
        self.pred_list_px_lvl = []
        self.gt_list_img_lvl = []
        self.pred_list_img_lvl = []
        self.img_path_list = []
        self.img_list = []
        
    def train(self, dataloader):
        pass

    def train_after(self,c):
        pass

    def test(self,dataloader):
        pass
    
    def test_after(self, test_dataloader,c): 
        pass

    def est_thresh(self,gt_list_px_lvl,pred_list_px_lvl):
        precision, recall, thresholds = precision_recall_curve(gt_list_px_lvl, pred_list_px_lvl)
        a = 2 * precision * recall
        b = precision + recall
        f1 = np.divide(a, b, out=np.zeros_like(a), where=b != 0)
        threshold = thresholds[np.argmax(f1)]
        return threshold
    
    def min_max_norm(self,scores):
        self.score_max = scores.max()
        self.score_min = scores.min()
        scores_norm = (scores - self.score_min) / (self.score_max - self.score_min + 1e-5)
        return scores_norm
    
    def evaluate(self,c):
        print("Total pixel-level auc-roc score :")
        try:
            pixel_auc = roc_auc_score(self.gt_list_px_lvl, self.pred_list_px_lvl)
        except:
            gt_list_px_lvl = np.array(self.gt_list_px_lvl).flatten()
            pred_list_px_lvl = np.array(self.pred_list_px_lvl).flatten()
            pixel_auc = roc_auc_score(gt_list_px_lvl, pred_list_px_lvl)

        print(pixel_auc)
        print("Total image-level auc-roc score :")
        img_auc = roc_auc_score(self.gt_list_img_lvl, self.pred_list_img_lvl)   
        print(img_auc)

        values = {'pixel_auc': round(pixel_auc, 4), 'img_auc': round(img_auc, 4)}

        best_thr = self.est_thresh(self.gt_list_img_lvl, self.pred_list_img_lvl)

        print('image-level best_thr is:', best_thr)

        # thresholding
        noraml_recall, abnormal_recall, precision ,false_p, false_n = \
            cal_confusion_matrix(self.gt_list_img_lvl, self.pred_list_img_lvl, img_path_list=self.img_path_list, thresh=best_thr)
        values["noraml_recall"] = round(noraml_recall, 4)
        values["abnormal_recall"] = round(abnormal_recall, 4)
        values["precision"] = round(precision, 4)
        # add best thresh
        values["best_thr"] = round(best_thr, 6)
        values["false_p"] = false_p
        values["false_n"] = false_n

        # self.log_dict(values)
        print('test end')
        
        with open(os.path.join(self.cfg['results_dir'],'results_{}.txt'.format(self.cfg['method'])), 'a') as f:
            f.write(c + ' : ' + str(values) + '\n')