import torch
from torch.nn import functional as F
import torch.nn as nn
import cv2
import os
from scipy.ndimage import gaussian_filter 
from sklearn.metrics import roc_auc_score,precision_recall_curve
from utils.metrics import cal_confusion_matrix, estimate_thr_recall
from torchvision import transforms
from utils.embedding_utils import *
from backbones.vae import *
from torchvision import models
from tqdm import tqdm
from torch import optim
from utils.metrics import AverageMeter
from utils.checkpoints import *
from utils.favae_utils import *
from utils.schedulers import *
from utils.optims import *
from .base import BaseModel
import random
import logging


class FAVAE(BaseModel):
    def __init__(self,cfg):
        super(BaseModel, self).__init__()
        self.cfg = cfg
        self.n_channels = self.cfg["n_channels"]
        self.crop_size = self.cfg['crop_size']
        self.z_dim = self.cfg['z_dim']
        self.n_viz = self.cfg['n_viz']
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.lr = float(self.cfg['lr'])
        self.kld_weight = self.cfg['kld_weight']
        self.logger = logging.getLogger('favae.Train')
        self.init_model()
        self.init_results_list()
        self.init_optimizer()
        self.init_scheduler()
        self.weights_dir_path, self.sample_path = prep_dirs(self.cfg['results_dir'])
    
    def init_model(self):
        self.model = VAE(input_channel=self.n_channels, output_size=self.crop_size, z_dim=self.z_dim).to(self.device)
        self.teacher = models.vgg16(pretrained=True).to(self.device)
        for param in self.teacher.parameters():
            param.requires_grad = False

    def init_results_list(self):
        self.gt_list_px_lvl = []
        self.pred_list_px_lvl = []
        self.gt_list_img_lvl = []
        self.pred_list_img_lvl = []
        self.img_path_list = []
        self.img_list = []
        self.recon_imgs_list = []
        
    def init_optimizer(self):
        self.optimizer_name = self.cfg['optimizer']
        self.lr = float(self.cfg['lr'])
        if (self.optimizer_name == 'adam') or (self.optimizer_name == 'Adam'):
            self.optimizer = optim.Adam(filter(lambda p: p.requires_grad, self.model.parameters()), lr=self.lr, weight_decay=1e-6)
        elif (self.optimizer_name == 'sgd') or (self.optimizer_name == 'SGD'):
            self.optimizer = optim.SGD(filter(lambda p: p.requires_grad, self.model.parameters()), lr=self.lr, momentum=0.9, nesterov=True)
        elif (self.optimizer_name == 'radam') or (self.optimizer_name == 'RAdam'):
            self.optimizer = RAdam(filter(lambda p: p.requires_grad, self.model.parameters()), lr=self.lr, weight_decay=1e-6)
        elif (self.optimizer_name == 'adabelief') or (self.optimizer_name == 'Adabelief'):
            self.optimizer = AdaBelief(filter(lambda p: p.requires_grad, self.model.parameters()), lr=self.lr, weight_decay=1e-6, eps=1e-16, betas=(0.9,0.999), weight_decouple = True, rectify = True)
        else:
            raise ValueError('Could Only Support optimizer in [Adam, SGD, RAdam, Adabelief].')
            
    def init_scheduler(self):
        self.scheduler_name = self.cfg['scheduler']
        self.epochs = self.cfg['epochs']
        if self.scheduler_name == 'step':
            self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, int(0.1 * self.epochs), 0.5)
        elif self.scheduler_name == 'cosine':
            self.scheduler = CosineAnnealingScheduler(self.optimizer, start_anneal=30, n_epochs=self.epochs)
            # scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs, eta_min=1e-8)
        else:
            raise ValueError('Could Only Support scheduler in [Step, Cosine].')
            
    def train(self, epoch, train_loader,c):
        self.model.train()
        # print(self.model)
        self.teacher.eval()
        losses = AverageMeter()
        MSE_loss = nn.MSELoss(reduction='sum')

        for x,y,mask,path in tqdm(train_loader):
            if type(x) != torch.Tensor:
                if type(x) == list or type(x) == tuple:
                    x = x[0]
                    if type(x) != torch.Tensor:
                        raise ValueError('Input should be a torch.Tensor or a list of torch.Tensor.')
                else:
                    raise ValueError('Input should be a torch.Tensor or a list of torch.Tensor.')
            # if torch.cuda.is_available():
            #     data = data.cuda()
            img_size = x.size(-1)
            x = x.to(self.device)
            if img_size > self.crop_size:
                x = self._random_crop(x, self.crop_size)
            z, output, mu, log_var = self.model(x)
            #z:([bs, 100])
            #output: bs,3,128,128
            #mu:bs,100
            #log_var:bs,100

            # get model's intermediate outputs
            s_activations, _ = self._feature_extractor(z, self.model.decode, target_layers=['11', '17', '23'])
            # print('sssss',s_activations[0].shape)
            t_activations, _ = self._feature_extractor(x, self.teacher.features, target_layers=['7', '14', '21'])
            # print('ttttt',t_activations[0].shape)
            self.optimizer.zero_grad()
            
            #输出图像和输入图像计算mse损失
            mse_loss = MSE_loss(output, x)
            #vae的自带损失kld
            kld_loss = 0.5 * torch.sum(-1 - log_var + torch.exp(log_var) + mu**2)
            
            #提取的multi-scale特征与对应的pretrain里面同尺度的特征图计算mse损失
            for i in range(len(s_activations)):
                s_act = self.model.adapter[i](s_activations[-(i + 1)])
                mse_loss += MSE_loss(s_act, t_activations[i])
            #loss之间的比例由kld_weight决定
            loss = mse_loss + self.kld_weight * kld_loss
            losses.update(loss.sum().item(), x.size(0))
    
            loss.backward()
            self.optimizer.step()
        torch.save(self.model.state_dict(), os.path.join(self.weights_dir_path,'{}.pth'.format(c)))
        self.logger.info('Train Epoch: {} Loss: {:.6f}'.format(epoch, losses.avg))
        self.scheduler.step()

    def _random_crop(self, tensor, size):
        h, w = tensor.shape[-2], tensor.shape[-1]
        x = random.randint(0, w-size) #random number
        y = random.randint(0, h-size)
        tensor_crop = tensor[:,:,y:y+size, x:x+size].clone()
        return tensor_crop

    def load_weights(self, ckpt_path):
        if torch.cuda.is_available():
            params = torch.load(ckpt_path)
        else:
            params = torch.load(ckpt_path, map_location='cpu')
        try:
            params = params["state_dict"]
        except:
            params = params

        self.model.load_state_dict(remove_dataparallel(params))
        print('Pretrained weights from %s has been loaded.' %ckpt_path)
    
    def _feature_extractor(self, x, model, target_layers):
        target_activations = list()
        for name, module in model._modules.items():
            x = module(x)
            if name in target_layers:
                target_activations += [x]
                # print(target_activations)
        return target_activations, x

    def test(self,c,test_dataloader,weight_path =''):
        if weight_path:
            weight = torch.load(os.path.join(weight_path,str(c)+'.pth'))
            self.model.load_state_dict(weight)
        self.model.eval()
        self.teacher.eval()
        MSE_loss = nn.MSELoss(reduction='none')
        with torch.no_grad():
            for index, data in enumerate(tqdm(test_dataloader)):
                score = 0
                x, y, mask, path = data
                x = x.to(self.device)
                img_size = x.size(-1)
                assert (img_size >= self.crop_size), 'Input size should not be smaller than the crop size.'
                if img_size > self.crop_size:
                    x = get_patch(x, self.crop_size)
                    # print(data.shape)
                    z, output, _, _ = self.model(x)
                    # print(output.shape)
                    # get model's intermediate outputs
                    s_activations, _ = self._feature_extractor(z, self.model.decode, target_layers=['11', '17', '23'])
                    t_activations, _ = self._feature_extractor(x, self.teacher.features, target_layers=['7', '14', '21'])
                    score = MSE_loss(output, x).sum(1, keepdim=True)
                    for i in range(len(s_activations)):
                        s_act = self.model.adapter[i](s_activations[-(i + 1)])
                        mse_loss = MSE_loss(s_act, t_activations[i]).sum(1, keepdim=True)
                        score += F.interpolate(mse_loss, size=x.size(2), mode='bilinear', align_corners=False)
                    # print(score.shape)
                    output = patch2img(output.cpu(), img_size, self.crop_size)
                    # print(output.shape)
                    score = patch2img(score.cpu(), img_size, self.crop_size)

                else:
                    z, output, _, _ = self.model(x)
                    # get model's intermediate outputs
                    s_activations, _ = self._feature_extractor(z, self.model.decode, target_layers=['11', '17', '23'])
                    t_activations, _ = self._feature_extractor(x, self.teacher.features, target_layers=['7', '14', '21'])

                    score = MSE_loss(output, x).sum(1, keepdim=True)
                    for i in range(len(s_activations)):
                        s_act = self.model.adapter[i](s_activations[-(i + 1)])
                        mse_loss = MSE_loss(s_act, t_activations[i]).sum(1, keepdim=True)
                        score += F.interpolate(mse_loss, size=x.size(2), mode='bilinear', align_corners=False)

                score = score.squeeze().cpu().numpy()
                if score.ndim < 3:
                    score = np.expand_dims(score, axis=0)
                for i in range(score.shape[0]):
                    score[i] = gaussian_filter(score[i], sigma=4)

                
                img_score = score.reshape(score.shape[0], -1).max(axis=1)
                self.img_list.extend(x.cpu().numpy())
                self.gt_list_px_lvl.extend(mask.cpu().numpy())
                self.pred_list_px_lvl.extend(score)
                self.gt_list_img_lvl.append(y.cpu().numpy()[0])
                self.pred_list_img_lvl.extend(img_score)
                self.img_path_list.extend(path)  
                self.recon_imgs_list.extend(output.cpu().numpy())
                
            self.pred_list_px_lvl = self.min_max_norm(np.array(self.pred_list_px_lvl))
            self.pred_list_img_lvl = self.min_max_norm(np.array(self.pred_list_img_lvl))
            self.gt_list_img_lvl = np.array(self.gt_list_img_lvl)
            self.gt_list_px_lvl = np.array(self.gt_list_px_lvl)

            threshold = self.est_thresh(self.gt_list_px_lvl.flatten().astype('uint8'),self.pred_list_px_lvl.flatten())
            print('pixel-level best_thr is', threshold)
            visualize_loc_result(self.img_list, self.img_path_list, self.gt_list_px_lvl, self.pred_list_px_lvl, threshold, self.sample_path, c, self.n_viz,self.recon_imgs_list)


