import argparse
import os

import torch
import torch.nn.functional as F
from torchvision import transforms
from tqdm import tqdm
from sklearn.metrics import roc_auc_score

from anomaly.models import SemiOrthogonal
from anomaly.utils import compute_pro_score
from anomaly.datasets import train_dataloader, test_dataset
from anomaly.utils.metrics import cal_confusion_matrix, estimate_thr_recall


parser = argparse.ArgumentParser("SemiOrthogonal test on MVTEC")
parser.add_argument("--data_root", required=True)    # contain class
parser.add_argument("--backbone", default="resnet18", choices=["resnet18", "wide_resnet50"])
parser.add_argument("--size", default="256x256")
parser.add_argument("-k", type=int, default=100)
parser.add_argument("--weights_dir", default="/home/ops/anomaly_lab/semi_ort/weights", help="model dir to save")
parser.add_argument("--result_dir", default="/home/ops/anomaly_lab/semi_ort/result", help="results of heatmap region and scores to save ")
args = parser.parse_args()


device = "cuda" if torch.cuda.is_available() else "cpu"
size = tuple(map(int, args.size.split("x")))
semi_orthogonal = SemiOrthogonal(
    k=args.k, device=device, backbone=args.backbone, size=size)

print(">> Training")
for imgs, _ in tqdm(train_dataloader):
    imgs = imgs.to(device)
    semi_orthogonal.train_one_batch(imgs)
semi_orthogonal.finalize_training()   # 增加pickle to args.weights_dir

n_test_images = len(test_dataset)
print(f">> Testing on {n_test_images} images")
y_trues = []
y_scores = []

amaps = []
masks = []

limited_generator = (x for _, x in zip(range(n_test_images), test_dataset))

imgname_lists = []

for i, (img, mask, label, imgfile) in tqdm(enumerate(limited_generator), total=n_test_images):
    img_cla = os.path.basename(os.path.dirname(imgfile))
    img_name = os.path.basename(imgfile)
    imgname_lists.append(img_cla + "_" + img_name)

    img = img.to(device)
    preds = semi_orthogonal.predict(img.unsqueeze(0))
    y_scores.append(preds.max().item())
    y_trues.append(label)

    masks.append(mask.unsqueeze(0))
    amaps.append(preds.unsqueeze(0))

gaussian_smoothing = transforms.GaussianBlur(9)

amaps = torch.cat(amaps)
amaps = F.interpolate(amaps, size, mode="bilinear", align_corners=True)
amaps = gaussian_smoothing(amaps)
amaps -= amaps.min()
amaps /= amaps.max()
masks = torch.cat(masks)

amaps = amaps.squeeze().cpu().numpy()
masks = masks.squeeze().cpu().numpy()

roc_score = roc_auc_score(y_trues, y_scores)
print(f">> ROC AUC Score = {roc_score}")

# from patchcore_train import cal_confusion_matrix, estimate_thr_recall

best_thr = estimate_thr_recall(y_trues, y_scores)

print('best_thr is:', best_thr)

results = dict()

# thresholding
noraml_recall, abnormal_recall, precision = \
    cal_confusion_matrix(y_trues, y_scores, img_path_list=imgname_lists, thresh=best_thr)
results["noraml_recall"] = round(noraml_recall, 4)
results["abnormal_recall"] = round(abnormal_recall, 4)
results["precision"] = round(precision, 4)
# add best thresh
results["best_thr"] = round(best_thr, 6)


pro_score = compute_pro_score(amaps, masks) 
print(f">> PRO Score     = {pro_score}")

results["roc_score"] = round(roc_score, 4)
results["pro_score"] = round(pro_score, 4)

save_dir = args.result_dir
if not os.path.exists(save_dir):
    os.makedirs(save_dir)

with open(os.path.join(save_dir, 'semi_ort.txt'), 'a') as f:
    f.write(os.path.basename(args.data_root) + ' : ' + str(results) + '\n')
