""" Implementation of the PatchCore algorithm for anomaly localization and detection
This method is proposed in the paper:
    'Towards Total Recall in Industrial Anomaly Detection'
"""

import os
import platform
import torch
import torch.nn as nn
import torch.nn.functional as F
# from torch.utils.data import DataLoader
from torchvision.models import wide_resnet50_2, resnet18
# from torchvision import models
from collections import OrderedDict
import numpy as np
import cv2
from tqdm import tqdm
from scipy.ndimage import gaussian_filter
from sklearn.random_projection import SparseRandomProjection
from READ_pytorch.sampling_methods import kCenterGreedy
import gc
import time
from random import sample
import pickle
from READ_pytorch.utils import estimate_thred_with_fpr
import sys
# from scipy.spatial.distance import mahalanobis
# from sklearn.covariance import LedoitWolf

##############################################################
####################### PatchCore Model ###########################
##############################################################

class PatchCore(object):
    # train_outputs = OrderedDict([('layer1', []), ('layer2', []), ('layer3', [])])
    # test_outputs = OrderedDict([('layer1', []), ('layer2', []), ('layer3', [])])
    # train_outputs_cache = None
    train_outputs = None
    def __init__(self, backbone='wide_resnet50_2', corest_ratio=0.01, topk=5):
        # global np, cache
        # if torch.cuda.is_available():
        #     print('Using Cuda Mode')
        #     import cupy as np
        #     cache = np.fft.config.get_plan_cache()
        # else:
        #     print('Using Cpu Mode')
        #     import numpy as np
        self.backbone = backbone
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        if self.backbone == 'resnet18':
            self.model = resnet18(pretrained=True, progress=True)
            t_d = 448
            d = 100
        elif self.backbone == 'wide_resnet50_2':
            self.model = wide_resnet50_2(pretrained=True, progress=True)
            t_d = 1792
            d = 550
        else:
            raise ValueError('This backbone has not been supported yet.')
        self.model.to(self.device)
        self.model.eval()
        self.idx = torch.tensor(sample(range(0, t_d), d)) #random choice features
        self.randomprojector = SparseRandomProjection(n_components='auto', eps=0.9)
        self.coreset_sampling_ratio = corest_ratio
        self.train_size = None
        self.topk = topk

    def train(self, 
                train_data,
                save_path,
                expect_fpr=0.01,
                **kwargs):
        batch_size = kwargs.get("batch_size", 32)
        loader_kwargs = {'num_workers': 8, 'pin_memory': True} if (torch.cuda.is_available() and platform.system() == 'Linux') else {}
        train_dataloader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, **loader_kwargs)
        train_outputs = OrderedDict([('layer1', []), ('layer2', []), ('layer3', [])])
        outputs = []
        def hook(module, input, output):
            outputs.append(output.cpu().detach())
        self.model.layer1[-1].register_forward_hook(hook)
        self.model.layer2[-1].register_forward_hook(hook)
        self.model.layer3[-1].register_forward_hook(hook)
        # for (x, _, _) in tqdm(train_dataloader, '| feature extraction | train |'):
        for (x) in tqdm(train_dataloader, '| feature extraction | train |'):
            if type(x) != torch.Tensor:
                if type(x) == list or type(x) == tuple:
                    x = x[0]
                    if type(x) != torch.Tensor:
                        raise ValueError('Input should be a torch.Tensor or a list of torch.Tensor.')
                else:
                    raise ValueError('Input should be a torch.Tensor or a list of torch.Tensor.')
            self.train_size = (x.size(-2), x.size(-1))
            with torch.no_grad():
                _ = self.model(x.to(self.device))
            # get intermediate layer outputs
            for k, v in zip(train_outputs.keys(), outputs):
                train_outputs[k].append(v.cpu().detach())
            # initialize hook outputs
            outputs = []
        for k, v in train_outputs.items():
            train_outputs[k] = torch.cat(v, 0)
        # Embedding concat

        # embedding_vectors = train_outputs['layer1']
        embedding_vectors = train_outputs['layer2']
        # for layer_name in ['layer2', 'layer3']:
        for layer_name in ['layer3']:
            embedding_vectors = self._embedding_concat(embedding_vectors, train_outputs[layer_name])
        self.train_outputs_cache = embedding_vectors
        embedding_vectors = embedding_vectors.permute(0,2,3,1)
        embedding_vectors = torch.flatten(embedding_vectors, start_dim=0, end_dim=-2).detach().numpy()

        self.randomprojector.fit(embedding_vectors)
        selector = kCenterGreedy(embedding_vectors,0,0)
        selected_idx = selector.select_batch(model=self.randomprojector, already_selected=[], N=int(embedding_vectors.shape[0]*self.coreset_sampling_ratio))
        self.train_outputs = embedding_vectors[selected_idx]

        torch.cuda.empty_cache()

        self.save_weights(os.path.join(save_path, 'model.pkl'))
        self.est_thres(val_data=train_data, expect_fpr=expect_fpr)

    def save_weights(self, filepath):
        with open(filepath, 'wb') as f:
            pickle.dump([self.train_outputs_cache, self.train_outputs, self.train_size], f)

    def load_weights(self, filepath):
        print('load train set feature from: %s' % filepath)
        with open(filepath, 'rb') as f:
            # self.train_outputs = pickle.load(f)
            loaded_features = pickle.load(f)
        self.train_outputs_cache = loaded_features[0]
        self.train_outputs = loaded_features[1]
        self.train_size = loaded_features[2]

    def est_thres(self, val_data, expect_fpr=0.01):
        assert self.train_outputs is not None, 'Should train the model or load weights at first.'
        idx = 0
        val_batch = 1
        val_score_map_list = []
        val_score_list = []
        while idx*val_batch < self.train_outputs_cache.shape[0]:
            embedding_val = self.train_outputs_cache[idx*val_batch:(idx+1)*val_batch,:]
            idx += 1
            embedding_val = embedding_val.permute(0,2,3,1)
            embedding_val = torch.flatten(embedding_val, start_dim=0, end_dim=-2).detach().cuda()
            dist = self._calc_dist_matrix(embedding_val, torch.from_numpy(self.train_outputs).cuda(),2)**(1/2)
            topk_values = dist.topk(self.topk+1, largest=False)[0].detach().cpu().numpy()
            anomaly_map = topk_values[:,1].reshape((28,28))
            N_b = topk_values[np.argmax(topk_values[:,1])]
            w = (1 - (np.max(np.exp(N_b))/np.sum(np.exp(N_b))))
            score = w*max(topk_values[:,1]) # Image-level score
            anomaly_map_resized = cv2.resize(anomaly_map, self.train_size)
            anomaly_map_resized_blur = gaussian_filter(anomaly_map_resized, sigma=4)
            val_score_map_list.extend(np.expand_dims(anomaly_map_resized_blur, axis=0))
            val_score_list.append(score)
        
        val_score_map = np.asarray(val_score_map_list)
        val_score = np.asarray(val_score_list)

        self.val_max_as = val_score_map.max()
        self.val_min_as = val_score.min()
        self.seg_thres = estimate_thred_with_fpr(val_score_map, expect_fpr=expect_fpr)
        self.cls_thres = estimate_thred_with_fpr(val_score, expect_fpr=expect_fpr)
        torch.cuda.empty_cache()

    def predict(self, x):
        # model prediction
        test_outputs = OrderedDict([('layer1', []), ('layer2', []), ('layer3', [])])
        outputs = []
        def hook(module, input, output):
            outputs.append(output.cpu().detach())
        self.model.layer1[-1].register_forward_hook(hook)
        self.model.layer2[-1].register_forward_hook(hook)
        self.model.layer3[-1].register_forward_hook(hook)
        with torch.no_grad():
            _ = self.model(x.to(self.device))
        # get intermediate layer outputs
        for k, v in zip(test_outputs.keys(), outputs):
            test_outputs[k].append(v.cpu().detach())
        # initialize hook outputs
        outputs = []
        for k, v in test_outputs.items():
            test_outputs[k] = torch.cat(v, 0)
        # Embedding concat
        # embedding_vectors = test_outputs['layer1']
        embedding_vectors = test_outputs['layer2']
        # for layer_name in ['layer2', 'layer3']:
        for layer_name in ['layer3']:
            embedding_vectors = self._embedding_concat(embedding_vectors, test_outputs[layer_name])

        embedding_vectors = embedding_vectors.permute(0,2,3,1)
        embedding_vectors = torch.flatten(embedding_vectors, start_dim=0, end_dim=-2).detach().cuda()
        dist = self._calc_dist_matrix(embedding_vectors, torch.from_numpy(self.train_outputs).cuda(),2)**(1/2)
        topk_values = dist.topk(self.topk, largest=False)[0].detach().cpu().numpy()
        anomaly_map = topk_values[:,0].reshape((28,28))
        N_b = topk_values[np.argmax(topk_values[:,0])]
        w = (1 - (np.max(np.exp(N_b))/np.sum(np.exp(N_b))))

        anomaly_map_resized = cv2.resize(anomaly_map, self.train_size)
        anomaly_map_resized_blur = gaussian_filter(anomaly_map_resized, sigma=4)

        if (self.val_max_as is not None) and (self.val_min_as is not None):
            # print('Normalizing!')
            score = (anomaly_map_resized_blur - self.val_min_as) / (self.val_max_as - self.val_min_as)


        if len(score.shape) == 2:
            score = np.expand_dims(score, axis=0)
        img_score  = w * score.reshape(score.shape[0], -1).max(axis=1)

        return img_score, score

    def _embedding_concat(self, x, y):
        B, C1, H1, W1 = x.size()
        _, C2, H2, W2 = y.size()
        s = int(H1 / H2)
        x = F.unfold(x, kernel_size=s, dilation=1, stride=s)
        x = x.view(B, C1, -1, H2, W2)
        z = torch.zeros(B, C1 + C2, x.size(2), H2, W2)
        for i in range(x.size(2)):
            z[:, :, i, :, :] = torch.cat((x[:, :, i, :, :], y), 1)
        z = z.view(B, -1, H2 * W2)
        z = F.fold(z, kernel_size=s, output_size=(H1, W1), stride=s)

        return z

    def _reset_train_features(self):
        self.train_outputs = None
        self.__init__(self.backbone)
        torch.cuda.empty_cache()

    def _calc_dist_matrix(self, x, y, p=2, slc_n=100):
        """Calculate Euclidean distance matrix with torch.tensor"""
        n = x.size(0)
        m = y.size(0)
        d = x.size(1)
        dist_matrix = torch.zeros((n, m))
        # x = x.unsqueeze(1).expand(n, m, d).half()
        # y = y.unsqueeze(0).expand(n, m, d).half()
        # dist_matrix = torch.sqrt(torch.pow(x - y, p).sum(2)).detach().float()
        for i in range(0, n, slc_n):
            if i+slc_n > n:
                slc_n = n - i
            slicex = x[i:i+slc_n,:].unsqueeze(1).expand(slc_n, m, d).half()
            slicey = y.unsqueeze(0).expand(slc_n, m, d).half()
            dist_matrix[i:i+slc_n,:]= torch.sqrt(torch.pow(slicex - slicey, p).sum(2)).detach().float()
        return dist_matrix