import torch
from torch.nn import functional as F
import torch.nn as nn
import cv2
import os
from scipy.ndimage import gaussian_filter 
from sklearn.metrics import roc_auc_score,precision_recall_curve
from utils.metrics import cal_confusion_matrix, estimate_thr_recall
from torchvision import transforms
from utils.embedding_utils import *
from backbones import *
from losses import *
from torchvision import models
from tqdm import tqdm
from torch import optim
from utils.metrics import AverageMeter
from utils.checkpoints import *
from utils.favae_utils import *
from utils.schedulers import *
from utils.optims import *
from .base import BaseModel
import random
import logging

class RIAD(BaseModel):
    def __init__(self,cfg):
        super(BaseModel, self).__init__()

        self.cfg = cfg
        self.n_channels = self.cfg["n_channels"]
        self.crop_size = self.cfg['crop_size']
        self.z_dim = self.cfg['z_dim']
        self.n_viz = self.cfg['n_viz']
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.lr = float(self.cfg['lr'])
        
        self.alpha = self.cfg['alpha']
        self.beta = self.cfg['beta']
        self.gamma = self.cfg['gamma']
        self.k_value = self.cfg['k_value']
        self.logger = logging.getLogger('riad.Train')
        self.init_model()
        self.init_results_list()
        self.init_optimizer()
        self.init_scheduler()
        self.weights_dir_path, self.sample_path = prep_dirs(self.cfg['results_dir'])
    
    def init_model(self):

        self.model = UNet(n_channels=self.n_channels).to(self.device)
        self.optimizer = optim.Adam(filter(lambda p: p.requires_grad, self.model.parameters()), lr=self.lr, weight_decay=1e-6)

    def init_results_list(self):
        self.gt_list_px_lvl = []
        self.pred_list_px_lvl = []
        self.gt_list_img_lvl = []
        self.pred_list_img_lvl = []
        self.img_path_list = []
        self.img_list = []
        self.recon_imgs_list = []
        
    def init_optimizer(self):
        self.optimizer_name = self.cfg['optimizer']
        self.lr = float(self.cfg['lr'])
        if (self.optimizer_name == 'adam') or (self.optimizer_name == 'Adam'):
            self.optimizer = optim.Adam(filter(lambda p: p.requires_grad, self.model.parameters()), lr=self.lr, weight_decay=1e-6)
        elif (self.optimizer_name == 'sgd') or (self.optimizer_name == 'SGD'):
            self.optimizer = optim.SGD(filter(lambda p: p.requires_grad, self.model.parameters()), lr=self.lr, momentum=0.9, nesterov=True)
        elif (self.optimizer_name == 'radam') or (self.optimizer_name == 'RAdam'):
            self.optimizer = RAdam(filter(lambda p: p.requires_grad, self.model.parameters()), lr=self.lr, weight_decay=1e-6)
        elif (self.optimizer_name == 'adabelief') or (self.optimizer_name == 'Adabelief'):
            self.optimizer = AdaBelief(filter(lambda p: p.requires_grad, self.model.parameters()), lr=self.lr, weight_decay=1e-6, eps=1e-16, betas=(0.9,0.999), weight_decouple = True, rectify = True)
        else:
            raise ValueError('Could Only Support optimizer in [Adam, SGD, RAdam, Adabelief].')
            
    def init_scheduler(self):
        self.scheduler_name = self.cfg['scheduler']
        self.epochs = self.cfg['epochs']
        if self.scheduler_name == 'step':
            self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, int(0.1 * self.epochs), 0.5)
        elif self.scheduler_name == 'cosine':
            self.scheduler = CosineAnnealingScheduler(self.optimizer, start_anneal=30, n_epochs=self.epochs)
            # scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs, eta_min=1e-8)
        else:
            raise ValueError('Could Only Support scheduler in [Step, Cosine].')
        
    def gen_mask(self,k_list,n,im_size):
#         while True:
#             Ms = []
#             for k in k_list:
#                 N = im_size // k
#                 rdn = np.random.permutation(N**2)
#                 additive = N**2 % n
#                 if additive > 0:
#                     rdn = np.concatenate((rdn, np.asarray([-1] * (n - additive))))
#                 n_index = rdn.reshape(n, -1)
#                 for index in n_index:
#                     tmp = [0 if i in index else 1 for i in range(N**2)]
#                     tmp = np.asarray(tmp).reshape(N, N)
#                     tmp = tmp.repeat(k, 0).repeat(k, 1)
#                     Ms.append(tmp)
#         yield Ms

        Ms = []
        for k in k_list:
            N = im_size // k
            #np.random.permutation对一个list进行随机排序
            rdn = np.random.permutation(N**2)
            additive = N**2 % n
            if additive > 0:
                rdn = np.concatenate((rdn, np.asarray([-1] * (n - additive))))
            n_index = rdn.reshape(n, -1)
            for index in n_index:
                tmp = [0 if i in index else 1 for i in range(N**2)]
                tmp = np.asarray(tmp).reshape(N, N)
                tmp = tmp.repeat(k, 0).repeat(k, 1)
                Ms.append(tmp)
        return Ms
        
    def train(self, epoch, train_loader,c):
        self.model.train()

        mse = nn.MSELoss(reduction='sum')
        ssim = SSIM_Loss()
        msgms = MSGMS_Loss()
        
        mse_losses = AverageMeter()
        gms_losses = AverageMeter()
        ssim_losses = AverageMeter()
        losses = AverageMeter()
        # for (data, _, _) in tqdm(train_loader):
        for x,y,mask,path in tqdm(train_loader):
#             print('11111111111111111')
            if type(x) != torch.Tensor:
                if type(x) == list or type(x) == tuple:
                    x = x[0]
                    if type(x) != torch.Tensor:
                        raise ValueError('Input should be a torch.Tensor or a list of torch.Tensor.')
                else:
                    raise ValueError('Input should be a torch.Tensor or a list of torch.Tensor.')
            # if torch.cuda.is_available():
            #     data = data.cuda()
            img_size = x.size(-1)
            x = x.to(self.device)
            k_value = random.sample(self.k_value, 1)
            Ms = self.gen_mask(k_value, 3, img_size)

            inputs = [x * (torch.tensor(mask, requires_grad=False).to(self.device)) for mask in Ms]
            outputs = [self.model(x) for x in inputs]
            output = sum(map(lambda x, y: x * (torch.tensor(1 - y, requires_grad=False).to(self.device)), outputs, Ms))
            
            mse_loss = mse(x, output)
            gms_loss = msgms(x, output)
            ssim_loss = ssim(x, output)

            loss = self.gamma * mse_loss + self.alpha * gms_loss + self.beta * ssim_loss

            mse_losses.update(mse_loss.item(), x.size(0))
            gms_losses.update(gms_loss.item(), x.size(0))
            ssim_losses.update(ssim_loss.item(), x.size(0))
            losses.update(loss.sum().item(), x.size(0))
            loss.backward()
            self.optimizer.step()
        torch.save(self.model.state_dict(), os.path.join(self.weights_dir_path,'{}.pth'.format(c)))
        self.logger.info('Train Epoch: {} Loss: {:.6f}'.format(epoch, losses.avg))
        self.scheduler.step()

    def _random_crop(self, tensor, size):
        h, w = tensor.shape[-2], tensor.shape[-1]
        x = random.randint(0, w-size) #random number
        y = random.randint(0, h-size)
        tensor_crop = tensor[:,:,y:y+size, x:x+size].clone()
        return tensor_crop

    def load_weights(self, ckpt_path):
        if torch.cuda.is_available():
            params = torch.load(ckpt_path)
        else:
            params = torch.load(ckpt_path, map_location='cpu')
        try:
            params = params["state_dict"]
        except:
            params = params

        self.model.load_state_dict(remove_dataparallel(params))
        print('Pretrained weights from %s has been loaded.' %ckpt_path)
    
    def _feature_extractor(self, x, model, target_layers):
        target_activations = list()
        for name, module in model._modules.items():
            x = module(x)
            if name in target_layers:
                target_activations += [x]
                # print(target_activations)
        return target_activations, x

    def test(self,c,test_dataloader,weight_path =''):
        self.model.eval()
        msgms_score = MSGMS_Score()
        with torch.no_grad():
            for index, data in enumerate(tqdm(test_dataloader)):
                score = 0
                x, y, mask, path = data
                x = x.to(self.device)
                
                for k in self.k_value:
                    img_size = x.size(-1)
                    N = img_size // k
                    Ms = self.gen_mask([k], 3, img_size)
#                     Ms = next(Ms_generator)
                    inputs = [x * (torch.tensor(mask, requires_grad=False).to(self.device)) for mask in Ms]
                    outputs = [self.model(x) for x in inputs]
                    output = sum(map(lambda x, y: x * (torch.tensor(1 - y, requires_grad=False).to(self.device)), outputs, Ms))
#                     print((msgms_score(x, output) / (N**2)).shape)
                    score  = score + msgms_score(x, output) / (N**2)

                score = score.squeeze().cpu().numpy()
                if score.ndim < 3:
                    score = np.expand_dims(score, axis=0)
                #这里原始是4，其他地方取的是4
                for i in range(score.shape[0]):
                    score[i] = gaussian_filter(score[i], sigma=4)

                img_score = score.reshape(score.shape[0], -1).max(axis=1)

                self.img_list.extend(x.cpu().numpy())
                self.gt_list_px_lvl.extend(mask.cpu().numpy())
                self.pred_list_px_lvl.extend(score)
#                 print(score)
                self.gt_list_img_lvl.append(y.cpu().numpy()[0])
                self.pred_list_img_lvl.extend(img_score)
                self.img_path_list.extend(path)  
                self.recon_imgs_list.extend(output.cpu().numpy())

        
        # print(self.gt_list_img_lvl)
        # calculate per-pixel level ROCAUC
        self.pred_list_px_lvl = self.min_max_norm(np.array(self.pred_list_px_lvl))
        self.pred_list_img_lvl = self.min_max_norm(np.array(self.pred_list_img_lvl))
        self.gt_list_img_lvl = np.array(self.gt_list_img_lvl)
        self.gt_list_px_lvl = np.array(self.gt_list_px_lvl)
        
        
        threshold = self.est_thresh(self.gt_list_px_lvl.flatten().astype('uint8'),self.pred_list_px_lvl.flatten())
        print('pixel-level best_thr is', threshold)

        visualize_loc_result(self.img_list, self.img_path_list, self.gt_list_px_lvl, self.pred_list_px_lvl, threshold, self.sample_path, c, self.n_viz,self.recon_imgs_list)

   


