""" Implementation of the CFlow algorithm for anomaly localization and detection
This method is proposed in the paper:
    'CFLOW-AD: Real-Time Unsupervised Anomaly Detection with Localization via Conditional Normalizing Flows'
The implementation about flow head is adapted from the repository:
    https://github.com/gudovskiy/cflow-ad
"""
import torch
from torch import nn
import torch.optim as optim
import torch.nn.functional as F
import random
import math
import os
import sys
import time
import platform
import logging
import FrEIA.framework as Ff
import numpy as np
import FrEIA.modules as Fm
from adabelief_pytorch import AdaBelief
from tqdm import tqdm
from scipy.ndimage import gaussian_filter
from READ_pytorch.backbones import vit_extractor
import READ_pytorch.backbones as models
from READ_pytorch.utils import set_logger, AverageMeter, EarlyStop
from READ_pytorch.optimizer import RAdam
from READ_pytorch.utils import estimate_thred_with_fpr

backbones = ['vit_base_patch16_224', 'vit_base_patch16_384', 'wide_resnet50', 'resnet50']

def get_logp(C, z, logdet_J):
    logp = C * (-0.9189385332046727) - 0.5*torch.sum(z**2, 1) + logdet_J # ln(sqrt(2*pi))=-0.9189385332046727
    return logp

def positionalencoding2d(D, H, W):
    """
    :param D: dimension of the model
    :param H: H of the positions
    :param W: W of the positions
    :return: DxHxW position matrix
    """
    if D % 4 != 0:
        raise ValueError("Cannot use sin/cos positional encoding with odd dimension (got dim={:d})".format(D))
    P = torch.zeros(D, H, W)
    # Each dimension use half of D
    D = D // 2
    div_term = torch.exp(torch.arange(0.0, D, 2) * -(math.log(1e4) / D))
    pos_w = torch.arange(0.0, W).unsqueeze(1)
    pos_h = torch.arange(0.0, H).unsqueeze(1)
    P[0:D:2, :, :]  = torch.sin(pos_w * div_term).transpose(0, 1).unsqueeze(1).repeat(1, H, 1)
    P[1:D:2, :, :]  = torch.cos(pos_w * div_term).transpose(0, 1).unsqueeze(1).repeat(1, H, 1)
    P[D::2,  :, :]  = torch.sin(pos_h * div_term).transpose(0, 1).unsqueeze(2).repeat(1, 1, W)
    P[D+1::2,:, :]  = torch.cos(pos_h * div_term).transpose(0, 1).unsqueeze(2).repeat(1, 1, W)
    return P

def subnet_fc(dims_in, dims_out):
    return nn.Sequential(nn.Linear(dims_in, 2*dims_in), nn.ReLU(), nn.Linear(2*dims_in, dims_out))

# def freia_cflow_head(c, n_feat):
#     n_cond = c.condition_vec
#     coder = Ff.SequenceINN(n_feat)
#     print('CNF coder:', n_feat)
#     for k in range(c.coupling_blocks):
#         coder.append(Fm.AllInOneBlock, cond=0, cond_shape=(n_cond,), subnet_constructor=subnet_fc, affine_clamping=c.clamp_alpha,
#             global_affine_type='SOFTPLUS', permute_soft=True)
#     return coder

def freia_cflow_head(n_feat=768, condition_vec=128, coupling_blocks=8, clamp_alpha = 1.9):
    n_cond = condition_vec
    coder = Ff.SequenceINN(n_feat)
    for k in range(coupling_blocks):
        coder.append(Fm.AllInOneBlock, cond=0, cond_shape=(n_cond,), subnet_constructor=subnet_fc, affine_clamping=clamp_alpha,
            global_affine_type='SOFTPLUS', permute_soft=True)
    return coder


class CFlow(object):
    def __init__(self, backbone='vit_base_patch16_224', **kwargs):
        super().__init__()
        random.seed(1024)
        torch.manual_seed(1024)
        assert backbone in backbones, f"Only support backbones in {backbones}."
        self.backbone = backbone
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        if 'vit' in self.backbone:
            pool_dims = [768, 768, 768]
            condition_vec = 128
            self.condition_vec = condition_vec
            coupling_blocks = 8
            clamp_alpha = 1.9
            block_index=[2, 6, 10]
            self.encoder = vit_extractor(backbone=backbone, block_index=block_index)
        elif 'wide' in self.backbone:
            pool_dims = [256, 512, 1024]
            condition_vec = 128
            self.condition_vec = condition_vec
            coupling_blocks = 8
            clamp_alpha = 1.9
            block_index=[2, 6, 10]
            self.encoder = getattr(models, self.backbone)(pretrained=True)
        else:
            pool_dims = [256, 512, 1024]
            condition_vec = 128
            self.condition_vec = condition_vec
            coupling_blocks = 8
            clamp_alpha = 1.9
            block_index=[2, 6, 10]
            self.encoder = getattr(models, self.backbone)(pretrained=True)
        
        self.encoder = self.encoder.to(self.device).eval()
        self.decoders = [freia_cflow_head(n_feat=pool_dim, 
                                            condition_vec=condition_vec, 
                                            coupling_blocks=coupling_blocks, 
                                            clamp_alpha=clamp_alpha) for pool_dim in pool_dims]
        self.decoders = [decoder.to(self.device) for decoder in self.decoders]

        self.trainable_parameters = list(self.decoders[0].parameters())
        for l in range(1, len(block_index)):
            self.trainable_parameters += list(self.decoders[l].parameters())
        self.block_index = block_index
        self.log_theta = torch.nn.LogSigmoid()
        self.val_max_as = None
        self.val_min_as = None
        self.seg_thres = None
        self.cls_thres = None
        self.ref_map = None

    def train(self, 
                train_data,
                save_path,
                val_data=None,
                expect_fpr=0.01,
                **kwargs):
        batch_size = kwargs.get("batch_size", 32)
        lr = kwargs.get("lr", 2e-4)
        epochs = kwargs.get("epochs", 200)
        optimizer_name = kwargs.get("optimizer", 'adam')
        scheduler_name = kwargs.get("scheduler", 'step')
        batch_size = kwargs.get("batch_size", 32)
        validation_ratio = kwargs.get("validation_ratio", 0.2)

        self.logger = logging.getLogger('READ.Train')
        set_logger(os.path.join(save_path, 'train.log'), 'READ')
        loader_kwargs = {'num_workers': 8, 'pin_memory': True} if (torch.cuda.is_available() and platform.system() == 'Linux') else {}
        if val_data == None:
            img_nums = len(train_data)
            valid_num = int(img_nums * validation_ratio)
            train_num = img_nums - valid_num
            train_data, val_data = torch.utils.data.random_split(train_data, [train_num, valid_num])
        train_dataloader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, **loader_kwargs)
        val_dataloader = torch.utils.data.DataLoader(val_data, batch_size=int(batch_size), shuffle=True, **loader_kwargs)

        if (optimizer_name == 'adam') or (optimizer_name == 'Adam'):
            optimizer = optim.Adam(filter(lambda p: p.requires_grad, self.trainable_parameters), lr=lr, weight_decay=0.00001, amsgrad=True)
        elif (optimizer_name == 'sgd') or (optimizer_name == 'SGD'):
            optimizer = optim.SGD(filter(lambda p: p.requires_grad, self.trainable_parameters), lr=lr, momentum=0.9, nesterov=True)
        elif (optimizer_name == 'radam') or (optimizer_name == 'RAdam'):
            optimizer = RAdam(filter(lambda p: p.requires_grad, self.trainable_parameters), lr=lr, weight_decay=0.00001)
        elif (optimizer_name == 'adabelief') or (optimizer_name == 'Adabelief'):
            optimizer = AdaBelief(filter(lambda p: p.requires_grad, self.trainable_parameters), lr=lr, weight_decay=0.00001, eps=1e-16, betas=(0.9,0.999), weight_decouple = True, rectify = True)
        else:
            raise ValueError('Could Only Support optimizer in [Adam, SGD].')
        
        if scheduler_name == 'step':
            scheduler = optim.lr_scheduler.StepLR(optimizer, int(0.1 * epochs), 0.5)
        elif scheduler_name == 'cosine':
            scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs, eta_min=1e-8)

        x_ref, _, _ = iter(train_dataloader).next()
        assert (len(x_ref.shape) == 4), 'input tensor should be 4-dim.'
        assert (x_ref.shape[2] == x_ref.shape[3]), 'Input height should be equal to width.'

        # MSE_loss = nn.MSELoss(reduction='sum')
        epoch_time = AverageMeter()
        save_lowest = os.path.join(save_path, 'model_lowest_loss.pt')
        early_stop = EarlyStop(patience=int(0.1*epochs) if int(0.1*epochs) > 20 else 20, 
                                save_name=save_lowest)
        save_name = os.path.join(save_path, 'model.pt')
        start_time = time.time()
        for epoch in range(0, epochs):
            self._train(epoch, train_dataloader, optimizer)
            val_loss = self._val(val_dataloader)
            # if (early_stop(val_loss, self.flow2d, optimizer)):
            #     break
            self.save_weights(save_name)
            self.logger.info('Train Epoch: {} Val Loss: {:.6f}'.format(epoch, val_loss))
            epoch_time.update(time.time() - start_time)
            start_time = time.time()
            scheduler.step()
            
        torch.cuda.empty_cache()
        self.est_thres(val_data, expect_fpr=expect_fpr)


    def _train(self, epoch, train_loader, optimizer):
        self.decoders = [decoder.train() for decoder in self.decoders]
        self.encoder.eval()
        losses = AverageMeter()
        for (data) in tqdm(train_loader):
            if type(data) != torch.Tensor:
                if type(data) == list or type(data) == tuple:
                    data = data[0]
                    if type(data) != torch.Tensor:
                        raise ValueError('Input should be a torch.Tensor or a list of torch.Tensor.')
                else:
                    raise ValueError('Input should be a torch.Tensor or a list of torch.Tensor.')
            img_size = data.size(-1)
            data = data.to(self.device)
            with torch.no_grad():
                feature_list = self.encoder(data)
            if 'vit' not in self.backbone:
                feature_list = feature_list[:3]

            for l, feature_cache in enumerate(feature_list):
                if 'vit' in self.backbone:
                    e = feature_cache.transpose(1, 2)[...,1:]
                    e_hw = int(np.sqrt(e.size(2)))
                    e = e.reshape(-1, e.size(1), e_hw, e_hw)  # BxCxHxW
                else:
                    e = feature_cache.detach()
                
                B, C, H, W = e.size()
                S = H*W
                E = B*S
                N = 256
                p = positionalencoding2d(self.condition_vec, H, W).to(self.device).unsqueeze(0).repeat(B, 1, 1, 1)
                c_r = p.reshape(B, self.condition_vec, S).transpose(1, 2).reshape(E, self.condition_vec)  # BHWxP
                e_r = e.reshape(B, C, S).transpose(1, 2).reshape(E, C)  # BHWxC
                perm = torch.randperm(E).to(self.device)  # BHW
                decoder = self.decoders[l]
                #
                FIB = E//N  # number of fiber batches
                assert FIB > 0, 'MAKE SURE WE HAVE ENOUGH FIBERS, otherwise decrease N or batch-size!'
                for f in range(FIB):  # per-fiber processing
                    idx = torch.arange(f*N, (f+1)*N)
                    c_p = c_r[perm[idx]]  # NxP
                    e_p = e_r[perm[idx]]  # NxC
                    z, log_jac_det = decoder(e_p, [c_p,])
                    #
                    decoder_log_prob = get_logp(C, z, log_jac_det)
                    log_prob = decoder_log_prob / C  # likelihood per dim
                    loss = -self.log_theta(log_prob)
                    optimizer.zero_grad()
                    loss.mean().backward()
                    optimizer.step()
                    losses.update(loss.mean().item(), len(loss))

        self.logger.info('Train Epoch: {} Train Loss: {:.6f}'.format(epoch, losses.avg))

    def _val(self, val_loader):
        self.decoders = [decoder.eval() for decoder in self.decoders]
        self.encoder.eval()
        losses = AverageMeter()
        for (data) in tqdm(val_loader):
            if type(data) != torch.Tensor:
                if type(data) == list or type(data) == tuple:
                    data = data[0]
                    if type(data) != torch.Tensor:
                        raise ValueError('Input should be a torch.Tensor or a list of torch.Tensor.')
                else:
                    raise ValueError('Input should be a torch.Tensor or a list of torch.Tensor.')
            img_size = data.size(-1)
            data = data.to(self.device)
            with torch.no_grad():
                feature_list = self.encoder(data)
            if 'vit' not in self.backbone:
                feature_list = feature_list[:3]
            for l, feature_cache in enumerate(feature_list):
                if 'vit' in self.backbone:
                    e = feature_cache.transpose(1, 2)[...,1:]
                    e_hw = int(np.sqrt(e.size(2)))
                    e = e.reshape(-1, e.size(1), e_hw, e_hw)  # BxCxHxW
                else:
                    e = feature_cache.detach()
                
                B, C, H, W = e.size()
                S = H*W
                E = B*S
                N = 256
                p = positionalencoding2d(self.condition_vec, H, W).to(self.device).unsqueeze(0).repeat(B, 1, 1, 1)
                c_r = p.reshape(B, self.condition_vec, S).transpose(1, 2).reshape(E, self.condition_vec)  # BHWxP
                e_r = e.reshape(B, C, S).transpose(1, 2).reshape(E, C)  # BHWxC
                perm = torch.randperm(E).to(self.device)  # BHW
                decoder = self.decoders[l]
                #
                FIB = E//N  # number of fiber batches
                assert FIB > 0, 'MAKE SURE WE HAVE ENOUGH FIBERS, otherwise decrease N or batch-size!'
                for f in range(FIB):  # per-fiber processing
                    idx = torch.arange(f*N, (f+1)*N)
                    c_p = c_r[perm[idx]]  # NxP
                    e_p = e_r[perm[idx]]  # NxC
                    z, log_jac_det = decoder(e_p, [c_p,])
                    #
                    decoder_log_prob = get_logp(C, z, log_jac_det)
                    log_prob = decoder_log_prob / C  # likelihood per dim
                    loss = -self.log_theta(log_prob)
                    losses.update(loss.mean().item(), len(loss))

        return losses.avg

    def est_thres(self, val_data, expect_fpr=0.01, **kwargs):
        batch_size = kwargs.get("batch_size", 32)

        loader_kwargs = {'num_workers': 8, 'pin_memory': True} if (torch.cuda.is_available() and platform.system() == 'Linux') else {}
        val_dataloader = torch.utils.data.DataLoader(val_data, batch_size=int(batch_size), shuffle=False, **loader_kwargs)
        for decoder in self.decoders:
            print(type(decoder))
        self.decoders = [decoder.eval() for decoder in self.decoders]
        self.encoder.eval()
        val_scores = []
        for (data) in tqdm(val_dataloader):
            if type(data) != torch.Tensor:
                if type(data) == list or type(data) == tuple:
                    data = data[0]
                    if type(data) != torch.Tensor:
                        raise ValueError('Input should be a torch.Tensor or a list of torch.Tensor.')
                else:
                    raise ValueError('Input should be a torch.Tensor or a list of torch.Tensor.')
                
            data = data.to(self.device)
            img_size = data.size(-1)
            with torch.no_grad():
                feature_list = self.encoder(data)
                if 'vit' not in self.backbone:
                    feature_list = feature_list[:3]
                val_dist = [list() for i in range(len(feature_list))]
                val_mask = 0
                for l, feature_cache in enumerate(feature_list):
                    if 'vit' in self.backbone:
                        e = feature_cache.transpose(1, 2)[...,1:]
                        e_hw = int(np.sqrt(e.size(2)))
                        e = e.reshape(-1, e.size(1), e_hw, e_hw)  # BxCxHxW
                    else:
                        e = feature_cache.detach()

                    B, C, H, W = e.size()
                    S = H*W
                    E = B*S
                    N = 256
                    p = positionalencoding2d(self.condition_vec, H, W).to(self.device).unsqueeze(0).repeat(B, 1, 1, 1)
                    c_r = p.reshape(B, self.condition_vec, S).transpose(1, 2).reshape(E, self.condition_vec)  # BHWxP
                    e_r = e.reshape(B, C, S).transpose(1, 2).reshape(E, C)  # BHWxC

                    decoder = self.decoders[l]
                    FIB = E//N + int(E%N > 0)  # number of fiber batches
                    for f in range(FIB):
                        if f < (FIB-1):
                            idx = torch.arange(f*N, (f+1)*N)
                        else:
                            idx = torch.arange(f*N, E)
                        #
                        c_p = c_r[idx]  # NxP
                        e_p = e_r[idx]  # NxC
                        #
                        z, log_jac_det = decoder(e_p, [c_p,])
                        decoder_log_prob = get_logp(C, z, log_jac_det)
                        log_prob = decoder_log_prob / C  # likelihood per dim
                        #
                        val_dist[l] = val_dist[l] + log_prob.detach().cpu().tolist()
                    val_norm = torch.tensor(val_dist[l], dtype=torch.double)  # EHWx1
                    val_mask_cache = val_norm.reshape(-1, H, W)
                    val_mask_cache = F.interpolate(val_mask_cache.unsqueeze(1),
                size=img_size, mode='bilinear', align_corners=True).squeeze().numpy()
                    val_mask += val_mask_cache
            val_scores.extend(val_mask)

        val_scores = np.asarray(val_scores)
        self.val_max_as = val_scores.max()
        self.val_min_as = val_scores.min()

        val_scores = (val_scores - self.val_min_as) / (self.val_max_as - self.val_min_as)
        # val_scores = val_scores - self.val_max_as
        val_scores = np.clip(val_scores, -50000, 1.0)
        val_scores = np.exp(1.0) - np.exp(val_scores)
        val_img_scores = val_scores.reshape(val_scores.shape[0], -1).max(axis=1)
        self.seg_thres = estimate_thred_with_fpr(val_scores, expect_fpr=expect_fpr)
        self.cls_thres = estimate_thred_with_fpr(val_img_scores, expect_fpr=expect_fpr)

    def predict(self, test_data, **kwargs):
        self.decoders = [decoder.eval() for decoder in self.decoders]
        self.encoder.eval()
        test_dist = [list() for block in self.block_index]
        score = 0
        with torch.no_grad():
            data = test_data.to(self.device)
            img_size = data.size(-1)
            feature_list = self.encoder(data)
            if 'vit' not in self.backbone:
                feature_list = feature_list[:3]
            for l, feature_cache in enumerate(feature_list):
                if 'vit' in self.backbone:
                    e = feature_cache.transpose(1, 2)[...,1:]
                    e_hw = int(np.sqrt(e.size(2)))
                    e = e.reshape(-1, e.size(1), e_hw, e_hw)  # BxCxHxW
                else:
                    e = feature_cache.detach()

                B, C, H, W = e.size()
                S = H*W
                E = B*S
                N = 256
                p = positionalencoding2d(self.condition_vec, H, W).to(self.device).unsqueeze(0).repeat(B, 1, 1, 1)
                c_r = p.reshape(B, self.condition_vec, S).transpose(1, 2).reshape(E, self.condition_vec)  # BHWxP
                e_r = e.reshape(B, C, S).transpose(1, 2).reshape(E, C)  # BHWxC

                decoder = self.decoders[l]
                FIB = E//N + int(E%N > 0)  # number of fiber batches
                for f in range(FIB):
                    if f < (FIB-1):
                        idx = torch.arange(f*N, (f+1)*N)
                    else:
                        idx = torch.arange(f*N, E)
                    #
                    c_p = c_r[idx]  # NxP
                    e_p = e_r[idx]  # NxC
                    #
                    z, log_jac_det = decoder(e_p, [c_p,])
                    decoder_log_prob = get_logp(C, z, log_jac_det)
                    log_prob = decoder_log_prob / C  # likelihood per dim
                    #
                    test_dist[l] = test_dist[l] + log_prob.detach().cpu().tolist()
                test_norm = torch.tensor(test_dist[l], dtype=torch.double)  # EHWx1
                test_mask_cache = test_norm.reshape(-1, H, W)
                test_mask_cache = F.interpolate(test_mask_cache.unsqueeze(1),
                size=img_size, mode='bilinear', align_corners=True).squeeze().numpy()
                score += test_mask_cache

        if score.ndim < 3:
            score = np.expand_dims(score, axis=0)
        for i in range(score.shape[0]):
            score[i] = gaussian_filter(score[i], sigma=7)

        if (self.val_max_as is not None) and (self.val_min_as is not None):
            # print('Normalizing!')
            score = (score - self.val_min_as) / (self.val_max_as - self.val_min_as)
            # score = score - self.val_max_as
        score = np.clip(score, -50000, 1.0)
        score =  np.exp(1.0) - np.exp(score)

        img_score = score.reshape(score.shape[0], -1).max(axis=1)

        return img_score, score

    def save_weights(self, save_name):
        state = {'encoder_state_dict': self.encoder.state_dict(),
                'decoder_state_dict': [decoder.state_dict() for decoder in self.decoders],'ref_map': self.ref_map, 'val_max_as': self.val_max_as, 'val_min_as': self.val_min_as, 'seg_thres': self.seg_thres, 'cls_thres': self.cls_thres}
        # filename = '{}_{}.pt'.format(model_name, run_date)
        # path = os.path.join(WEIGHT_DIR, filename)
        # torch.save(state, path)
        # print('Saving weights to {}'.format(filename))
        torch.save(state, save_name)
                
    def load_weights(self, ckpt_path):
        if torch.cuda.is_available():
            params = torch.load(ckpt_path)
        else:
            params = torch.load(ckpt_path, map_location='cpu')

        self.ref_map = params['ref_map'] 
        self.val_max_as = params['val_max_as']
        self.val_min_as = params['val_min_as']
        self.seg_thres = params['seg_thres']
        self.cls_thres = params['cls_thres']
        encoder_param = params["encoder_state_dict"]
        decoder_params = params["decoder_state_dict"]

        self.encoder.load_state_dict(encoder_param, strict=False)
        # self.decoders = [decoder.load_state_dict(state, strict=False) for decoder, state in zip(self.decoders, decoder_params)]
        for i, decoder in enumerate(self.decoders):
            decoder.load_state_dict(decoder_params[i], strict=False)