import torch
from torch.nn import functional as F
import torch.nn as nn
import cv2
import os
from sklearn.metrics import roc_auc_score,precision_recall_curve
from utils.metrics import cal_confusion_matrix, estimate_thr_recall
from torchvision import transforms
from utils.embedding_utils import *
from utils.kdad_utils import *
from .base import BaseModel
from backbones.vgg_kdad import *
from losses.kdad_losses import *
from pathlib import Path
from tqdm import tqdm
from utils.metrics import AverageMeter, estimate_thr_by_list
inv_normalize = transforms.Normalize(mean=[-0.485/0.229, -0.456/0.224, -0.406/0.255], std=[1/0.229, 1/0.224, 1/0.255])

class KDAD(BaseModel):
    def __init__(self,cfg):
        super(BaseModel, self).__init__()
        self.cfg = cfg

        self.weights_dir, self.sample_path = prep_dirs(self.cfg['results_dir'])
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        self.normal_class = self.cfg['normal_class']
        self.lamda = self.cfg['lamda']
        if self.cfg['direction_loss_only']:
            self.criterion = DirectionOnlyLoss()
        else:
            self.criterion = MseDirectionLoss(self.lamda)
        self.init_model()
        self.init_results_list()
        self.weights_dir_path, self.sample_path = prep_dirs(self.cfg['results_dir'])

    def forward(self,x):
        y = self.model(x)
        return y

    def init_model(self):

        self.equal_network_size = self.cfg['equal_network_size']
        self.pretrain = self.cfg['pretrain']
        self.use_bias = self.cfg['use_bias']
        self.load_checkpoint = self.cfg['load_checkpoint']
        self.normal_class = self.cfg['normal_class']
        network_cfg = {
        'A': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
        'B': [16, 16, 'M', 16, 128, 'M', 16, 16, 256, 'M', 16, 16, 512, 'M', 16, 16, 512, 'M'],
        }
        if self.equal_network_size:
            config_type = 'A'
        else:
            config_type = 'B'

        self.vgg = Vgg16(self.pretrain).to(self.device)
        self.model = make_arch(config_type, network_cfg, self.use_bias, True).to(self.device)

        if self.load_checkpoint:
            last_checkpoint = self.cfg['last_checkpoint']
            # checkpoint_path = "./outputs/{}/{}/checkpoints/".format(experiment_name, dataset_name)
            checkpoint_path = self.weights_dir
            self.model.load_state_dict(
                torch.load('{}/Cloner_{}_epoch_{}.pth'.format(checkpoint_path, self.normal_class, last_checkpoint)))
            if not self.pretrain:
                self.vgg.load_state_dict(
                    torch.load('{}/Source_{}_random_vgg.pth'.format(checkpoint_path, self.normal_class)))
        elif not self.pretrain:
            # checkpoint_path = "./outputs/{}/{}/checkpoints/".format(experiment_name, dataset_name)
            checkpoint_path = self.weights_dir
            Path(checkpoint_path).mkdir(parents=True, exist_ok=True)

            torch.save(self.vgg.state_dict(), '{}/Source_{}_random_vgg.pth'.format(checkpoint_path, self.normal_class))
            print("Source Checkpoint saved!")
 

    def init_results_list(self):
        self.gt_list_px_lvl = []
        self.pred_list_px_lvl = []
        self.gt_list_img_lvl = []
        self.pred_list_img_lvl = []
        self.img_path_list = []
        self.img_list = []

    def train(self, train_dataloader, test_dataloader):
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=float(self.cfg['learning_rate']))
        self.num_epochs = self.cfg['num_epochs']
        self.interval = self.cfg['train_interval']
        for epoch in range(self.num_epochs + 1):
            self.model.train()

            for data in train_dataloader:
                img = data[0]   # 格式一致
                if img.shape[1] == 1:
                    img = img.repeat(1, 3, 1, 1)
                img = img.to(self.device)  

                output_pred = self.forward(img)
                # with torch.no_grad():

                output_real = self.vgg(img)

                total_loss = self.criterion(output_pred, output_real)
                self.optimizer.zero_grad()
                total_loss.backward()
                self.optimizer.step()
            # print('epoch [{}/{}], loss:{:.4f}'.format(epoch + 1, num_epochs, epoch_loss))
            if epoch % 10 == 0:
                self.test_after(test_dataloader,self.normal_class)
                self.evaluate(self.normal_class)
                self.init_results_list()
            if epoch % self.interval == 0:
                torch.save(self.model.state_dict(),
                        '{}/Cloner_{}_epoch_{}.pth'.format(self.weights_dir, self.normal_class, epoch))
                torch.save(self.optimizer.state_dict(),
                        '{}/Opt_{}_epoch_{}.pth'.format(self.weights_dir, self.normal_class, epoch))
        
    def train_after(self,c):
        pass

    def test_after(self, test_dataloader,c): 
        
        # dataset_name = config['dataset_name']
        self.direction_only = self.cfg['direction_loss_only']

        similarity_loss = torch.nn.CosineSimilarity()
        label_score = []
        self.model.eval()
        for index, data in enumerate(tqdm(test_dataloader)):
            x, y, mask, path = data    # data should have more info for statics pred wrong sample.
            if x.shape[1] == 1:
                x = x.repeat(1, 3, 1, 1)

            x = x.to(self.device)
            output_pred = self.model.forward(x)
            output_real = self.vgg(x)
            y_pred_1, y_pred_2, y_pred_3 = output_pred[6], output_pred[9], output_pred[12]
            y_1, y_2, y_3 = output_real[6], output_real[9], output_real[12]

            if self.direction_only:
                loss_1 = 1 - similarity_loss(y_pred_1.view(y_pred_1.shape[0], -1), y_1.view(y_1.shape[0], -1))
                loss_2 = 1 - similarity_loss(y_pred_2.view(y_pred_2.shape[0], -1), y_2.view(y_2.shape[0], -1))
                loss_3 = 1 - similarity_loss(y_pred_3.view(y_pred_3.shape[0], -1), y_3.view(y_3.shape[0], -1))
                total_loss = loss_1 + loss_2 + loss_3
            else:
                abs_loss_1 = torch.mean((y_pred_1 - y_1) ** 2, dim=(1, 2, 3))
                loss_1 = 1 - similarity_loss(y_pred_1.view(y_pred_1.shape[0], -1), y_1.view(y_1.shape[0], -1))
                abs_loss_2 = torch.mean((y_pred_2 - y_2) ** 2, dim=(1, 2, 3))
                loss_2 = 1 - similarity_loss(y_pred_2.view(y_pred_2.shape[0], -1), y_2.view(y_2.shape[0], -1))
                abs_loss_3 = torch.mean((y_pred_3 - y_3) ** 2, dim=(1, 2, 3))
                loss_3 = 1 - similarity_loss(y_pred_3.view(y_pred_3.shape[0], -1), y_3.view(y_3.shape[0], -1))
                total_loss = loss_1 + loss_2 + loss_3 + self.lamda * (abs_loss_1 + abs_loss_2 + abs_loss_3)

            # print('abs_loss_1:{}, abs_loss_2:{}, abs_loss_3:{}'.format(abs_loss_1,abs_loss_2,abs_loss_3))
            # print('cos_loss_1:{}, cos_loss_2:{}, cos_loss_3:{}'.format(loss_1,loss_2,loss_3))
            label_score += list(zip(y.cpu().data.numpy().tolist(), total_loss.cpu().data.numpy().tolist()))   # 损失函数做为异常判断
            # gt_np = mask.cpu().numpy()[0,0].astype(int)
            self.gt_list_px_lvl.extend(mask.cpu().numpy())
            self.img_path_list.extend(path)  # add class
            self.img_list.extend(x.cpu().numpy())
        labels, scores = zip(*label_score)
        self.gt_list_img_lvl = np.array(labels)
        self.gt_list_px_lvl = np.array(self.gt_list_px_lvl)
        self.pred_list_img_lvl = np.array(scores)
        # self.pred_list_img_lvl = sigmoid(np.array(scores))



        grad,_= localization_test(model=self.model, vgg=self.vgg, test_dataloader=test_dataloader,
                                        config=self.cfg)   
        
        self.pred_list_px_lvl = grad

        self.pred_list_px_lvl = self.min_max_norm(np.array(self.pred_list_px_lvl))
        self.pred_list_img_lvl = self.min_max_norm(np.array(self.pred_list_img_lvl))
        self.gt_list_img_lvl = np.array(self.gt_list_img_lvl)
        self.gt_list_px_lvl = np.array(self.gt_list_px_lvl)

        threshold = self.est_thresh(self.gt_list_px_lvl.flatten().astype('uint8'),self.pred_list_px_lvl.flatten())
        print('pixel-level best_thr is', threshold)
        visualize_loc_result(self.img_list, self.img_path_list, self.gt_list_px_lvl, self.pred_list_px_lvl, threshold, self.sample_path, c, self.cfg['n_viz'])


   

