# from scipy._lib.six import with_metaclass
from tqdm import tqdm
import torch

import numpy as np
from sklearn.metrics import roc_auc_score
from anomaly.utils.knn_utils import GaussianBlur, get_coreset_idx_randomp

import os
import pickle
from anomaly.backbones import ResNet50, WideResNet50
from anomaly.utils.metrics import cal_confusion_matrix, estimate_thr_recall, write_results


class KNNExtractor(torch.nn.Module):
	def __init__(
		self,
		args,
		backbone : str = "resnet50",
		pool : bool = False,
	):
		super().__init__()
		self.args = args

		self.feature_extractor = WideResNet50() if backbone == 'wide_resnet50_2' else ResNet50()

		for param in self.feature_extractor.parameters():
			param.requires_grad = False
		self.feature_extractor.eval()
		
		self.pool = torch.nn.AdaptiveAvgPool2d(1) if pool else None
		self.backbone = backbone # for results metadata

		self.device = "cuda" if torch.cuda.is_available() else "cpu"
		self.feature_extractor.to(self.device)
			
	def __call__(self, x):
		feature_maps = self.feature_extractor(x.to(self.device))
        # feature_maps = [f1,f2,f3]
		feature_maps = [fmap.to("cpu") for fmap in feature_maps]
		if self.pool:
			z = self.pool(feature_maps[-1])
			return feature_maps, z
		else:
			return feature_maps

	def fit(self, train_ds):
		raise NotImplementedError

	def evaluate(self, test_ds):
		"""Calls predict step for each test sample."""
		# 增加正负样本召回率， 文件写入负判样本

		image_preds = []
		image_labels = []
		pixel_preds = []
		pixel_labels = []
		pred_maps = []
		ps = []

		for sample, mask, label,p in tqdm(test_ds):
			z_score, fmap = self.predict(sample.unsqueeze(0))
			pred_maps.append(fmap.numpy())
			ps.append(p)
			
			image_preds.append(z_score.numpy())
			image_labels.append(label)
			
			pixel_preds.extend(fmap.flatten().numpy())
			pixel_labels.extend(mask.flatten().numpy())
		
		self.draw_result(pred_maps, ps, self.args.result_dir)   # TODO add region not just heatmap
			
		image_preds = np.stack(image_preds)
		best_f1_v = estimate_thr_recall(image_labels, image_preds)

		noraml_recall, abnormal_recall, precision, false_p, false_n = cal_confusion_matrix(image_labels, image_preds, best_f1_v, ps)

		image_rocauc = roc_auc_score(image_labels, image_preds)
		pixel_rocauc = roc_auc_score(pixel_labels, pixel_preds)

		# values = dict()

		# values = {'image_rocauc': round(image_rocauc, 4), 'pixel_rocauc': round(pixel_rocauc, 4)}

		# values["noraml_recall"] = round(noraml_recall, 4)
		# values["abnormal_recall"] = round(abnormal_recall, 4)
		# values["precision"] = round(precision, 4)
        # # add best thresh
		# values["best_thr"] = round(best_f1_v, 6)
		# values["false_p"] = false_p
		# values["false_n"] = false_n

		# with open(os.path.join(self.args.result_dir, 'result_knn_pathcore.txt'), 'a') as f:
		# 	f.write(self.args.category + " : " + str(values) + "\n")

		write_results(self.args, image_rocauc, pixel_rocauc, noraml_recall, abnormal_recall, \
			precision,best_f1_v, false_p, false_n, 'result_knn_pathcore.txt')

		return image_rocauc, pixel_rocauc


	def get_parameters(self):
		raise NotImplementedError

	def draw_result(self, pred_maps, pred_paths, save_p):
		# 增加区域分割图
		import os, cv2
		if not os.path.exists(save_p):
			os.makedirs(save_p)

		for mp, pd in  zip(pred_maps, pred_paths):
			img = mp.transpose((1, 2, 0))
			# print(img.shape, img.max(), img.min())
			img = (img - img.min()) / (img.max() - img.min())
			img = (img * 255).astype(np.uint8)
			img_name = os.path.basename(pd)
			img_cls = os.path.basename(os.path.dirname(pd))
			save_name = img_cls + "_" + img_name 
			# print(mp.shape, pd)
			cv2.imwrite(os.path.join(save_p, save_name), img)


class PatchCoreK(KNNExtractor):
	def __init__(
		self,
		args,
		f_coreset: float = 0.01,
		backbone : str = "wide_resnet50_2",
	):
		super().__init__(
			args,
			backbone=backbone,
			pool=False,
		)
		self.args = args
		self.f_coreset = f_coreset
		self.image_size = 224
		self.average = torch.nn.AvgPool2d(3, stride=1)
		self.blur = GaussianBlur(4)
		self.n_reweight = 3

		self.patch_lib = []
		self.resize = None

		self.device = "cuda" if torch.cuda.is_available() else "cpu"
		self.feature_extractor.to(self.device)

	def fit(self, train_ds):
		for sample, _ in tqdm(train_ds):
			feature_maps = self(sample.unsqueeze(0))
            # feature_maps = self(sample.unsqueeze(0))[:2]

			if self.resize is None:
				largest_fmap_size = feature_maps[0].shape[-2:]
				self.resize = torch.nn.AdaptiveAvgPool2d(largest_fmap_size)
			resized_maps = [self.resize(self.average(fmap)) for fmap in feature_maps]
			patch = torch.cat(resized_maps, 1)
			patch = patch.reshape(patch.shape[1], -1).T

			self.patch_lib.append(patch)

		self.patch_lib = torch.cat(self.patch_lib, 0)

		if self.f_coreset < 1:
			self.coreset_idx = get_coreset_idx_randomp(
				self.patch_lib,
				n=int(self.f_coreset * self.patch_lib.shape[0]),
			)
			self.patch_lib = self.patch_lib[self.coreset_idx]
			print(self.patch_lib.shape, self.patch_lib.shape[0], type(self.patch_lib))

		embedding_dir = self.args.weights_dir
		if not os.path.exists(embedding_dir):
			os.makedirs(embedding_dir)
		self.patch_lib = self.patch_lib.detach().numpy()
		
		if not os.path.exists(f'{train_ds.cls}.pickle'):
			with open(os.path.join(embedding_dir, f'{train_ds.cls}.pickle'), 'wb') as f:
				# pickle.dump(np.array(self.patch_lib), f, protocol=4)
				pickle.dump(self.patch_lib , f)


	def predict(self, sample):		
		feature_maps = self(sample)
		resized_maps = [self.resize(self.average(fmap)) for fmap in feature_maps]
		patch = torch.cat(resized_maps, 1)
		patch = patch.reshape(patch.shape[1], -1).T

		dist = torch.cdist(patch, self.patch_lib)
		min_val, min_idx = torch.min(dist, dim=1)
		s_idx = torch.argmax(min_val)
		s_star = torch.max(min_val)

		# reweighting
		m_test = patch[s_idx].unsqueeze(0) # anomalous patch
		m_star = self.patch_lib[min_idx[s_idx]].unsqueeze(0) # closest neighbour
		w_dist = torch.cdist(m_star, self.patch_lib) # find knn to m_star pt.1
		_, nn_idx = torch.topk(w_dist, k=self.n_reweight, largest=False) # pt.2
		# equation 7 from the paper
		m_star_knn = torch.linalg.norm(m_test-self.patch_lib[nn_idx[0,1:]], dim=1)
		w = 1-(torch.exp(s_star)/torch.sum(torch.exp(m_star_knn)))
		s = w*s_star

		# segmentation map
		s_map = min_val.view(1,1,*feature_maps[0].shape[-2:])
		s_map = torch.nn.functional.interpolate(
			s_map, size=(self.image_size,self.image_size), mode='bilinear'
		)
		s_map = self.blur(s_map)

		return s, s_map

	def get_parameters(self):
		return {
			"backbone": self.backbone,
			"f_coreset": self.f_coreset,
			"n_reweight": self.n_reweight,
		}
