
import os
import glob
import shutil
import cv2
import numpy as np
from PIL import Image
import torch
from torch.nn import functional as F
from torch import nn
from torchvision import transforms
from sklearn.metrics import confusion_matrix
from torch.utils.data import Dataset, DataLoader
from sklearn.metrics import roc_auc_score
from sklearn.metrics import precision_recall_curve
from common.utils import *

def reshape_embedding(embedding):
    embedding_list = []
    for k in range(embedding.shape[0]):
        for i in range(embedding.shape[2]):
            for j in range(embedding.shape[3]):
                embedding_list.append(embedding[k, :, i, j])
    return embedding_list

def cvt2heatmap(gray):
    heatmap = cv2.applyColorMap(np.uint8(gray), cv2.COLORMAP_JET)
    return heatmap

def heatmap_on_image(heatmap, image):
    if heatmap.shape != image.shape:
        heatmap = cv2.resize(heatmap, (image.shape[0], image.shape[1]))
    out = np.float32(heatmap)/255 + np.float32(image)/255
    out = out / np.max(out)
    return np.uint8(255 * out)

def min_max_norm(image):
    a_min, a_max = image.min(), image.max()
    return (image-a_min)/(a_max - a_min)    


def copy_files(src, dst, ignores=[]):
    src_files = os.listdir(src)
    for file_name in src_files:
        ignore_check = [True for i in ignores if i in file_name]
        if ignore_check:
            continue
        full_file_name = os.path.join(src, file_name)
        if os.path.isfile(full_file_name):
            shutil.copy(full_file_name, os.path.join(dst,file_name))
        if os.path.isdir(full_file_name):
            os.makedirs(os.path.join(dst, file_name), exist_ok=True)
            copy_files(full_file_name, os.path.join(dst, file_name), ignores)

def prep_dirs(pa, name):
    # make embeddings dir
    # embeddings_path = os.path.join(root, 'embeddings')
    embeddings_path = f'{pa}/{name}/models'
    os.makedirs(embeddings_path, exist_ok=True)
    # make sample dir
    sample_path = os.path.join(root, 'sample')
    os.makedirs(sample_path, exist_ok=True)
    # make source code record dir & copy
    source_code_save_path = os.path.join(root, 'src')
    os.makedirs(source_code_save_path, exist_ok=True)
    copy_files('./', source_code_save_path, ['.git','.vscode','__pycache__','logs','README','samples','LICENSE']) # copy source code
    return embeddings_path, sample_path, source_code_save_path

def find_gt(pas, name, postfixs, exts):
    out = []
    fn1 = os.path.splitext(name)[0]
    for pa in pas:
        for post in postfixs:
            for ext in exts:
                fn = f'{pa}/{fn1}{post}{ext}'
                if not fn.endswith(name):
                    #print(fn)
                    if os.path.exists(fn):
                        out.append(fn)
                        return out
    
    return out

def load_MVTecdataset(pa, include=None):
    pa1 = pa.replace('\\', '/').strip('/')
    papa = os.path.split(pa1)[0]
    gt_path = f'{papa}/ground_truth'
    out = []
    #pa = os.path.split()
    exts = '.jpg .png .bmp'.split()
    img_paths = listdirsub(pa, exts, False)
    for fn in img_paths:
        fn0 = fn.replace(pa, '').replace('\\', '/').strip('/')
        img_tot_path = f'{pa}/{fn}'
        tot_label = 1
        defect_type = fn0.split('/')[0]
        gt_tot_path = 0
        gt_tot_paths = find_gt([pa, gt_path], fn0, ['', '_mask'], exts)
        if len(gt_tot_paths)>0:
            gt_tot_path = gt_tot_paths[0]
        if defect_type == 'good':
            tot_label = 0
        if include is not None:
            if defect_type not in include:
                continue
        out.append([img_tot_path, gt_tot_path, tot_label, defect_type])

    #assert len(img_tot_paths) == len(gt_tot_paths), "Something wrong with test and ground truth pair!"
    return out


#imagenet
mean_train = [0.485, 0.456, 0.406]
std_train = [0.229, 0.224, 0.225]

class MVTecDataset(Dataset):
    def __init__(self, img_paths, transform, gt_transform, phase):
        self.transform = transform
        self.gt_transform = gt_transform
        self.img_paths = img_paths
        # load dataset
        #self.img_paths, self.gt_paths, self.labels, self.types = self.load_dataset() # self.labels => good : 0, anomaly : 1

    def __len__(self):
        return len(self.img_paths)

    def __getitem__(self, idx):
        img_path, gt, label, img_type = self.img_paths[idx]
        img = Image.open(img_path).convert('RGB')
        img = self.transform(img)
        if gt == 0:
            gt = torch.zeros([1, img.size()[-2], img.size()[-2]])
        else:
            gt = Image.open(gt).convert('L')
            gt = self.gt_transform(gt)
        
        assert img.size()[1:] == gt.size()[1:], "image.size != gt.size !!!"
        #print(img.size(), gt.size())
        return img, gt, label, os.path.basename(img_path[:-4]), img_type


def cal_confusion_matrix(y_true, y_pred_no_thresh, thresh, img_path_list):
    pred_thresh = []
    false_n = []
    false_p = []
    for i in range(len(y_pred_no_thresh)):
        if y_pred_no_thresh[i] > thresh:
            pred_thresh.append(1)
            if y_true[i] == 0:
                false_p.append(img_path_list[i])
        else:
            pred_thresh.append(0)
            if y_true[i] == 1:
                false_n.append(img_path_list[i])

    cm = confusion_matrix(y_true, pred_thresh)
    print(cm)
    print('false positive')
    print(false_p)
    print('false negative')
    print(false_n)
    
def create_transforms(args):
    data_transforms = transforms.Compose([
                    transforms.Resize((args.load_size, args.load_size), Image.ANTIALIAS),
                    transforms.ToTensor(),
                    transforms.CenterCrop(args.input_size),
                    transforms.Normalize(mean=mean_train,
                                        std=std_train)])
    gt_transforms = transforms.Compose([
                    transforms.Resize((args.load_size, args.load_size)),
                    transforms.ToTensor(),
                    transforms.CenterCrop(args.input_size)])

    inv_normalize = transforms.Normalize(mean=[-0.485/0.229, -0.456/0.224, -0.406/0.255], std=[1/0.229, 1/0.224, 1/0.255])
    return data_transforms, gt_transforms, inv_normalize

def save_anomaly_map(sample_path, anomaly_map, input_img, gt_img, file_name, x_type, thd=None, isng=None):
    if anomaly_map.shape != input_img.shape:
        anomaly_map = cv2.resize(anomaly_map, (input_img.shape[0], input_img.shape[1]))
    anomaly_map_norm = min_max_norm(anomaly_map)
    anomaly_map_norm_hm = cvt2heatmap(anomaly_map_norm*255)

    # anomaly map on image
    heatmap = cvt2heatmap(anomaly_map_norm*255)
    hm_on_img = heatmap_on_image(heatmap, input_img)
    mkdir(sample_path)
    if isng is not None:
        if isng:
            h, w = input_img.shape[:2]
            r = extend_rect1(input_img, [0, 0, h, w], -0.05)
            drawrect(input_img, r, (0, 0, 255))
            drawrect(hm_on_img, r, (0, 0, 255))

    if thd is not None:
        anomaly_mask = ((anomaly_map>thd)*255).astype(np.uint8)
        cv_imwrite(f'{sample_path}/{x_type}_{file_name}_amap_mask.jpg', anomaly_mask)
    # save images
    cv_imwrite(f'{sample_path}/{x_type}_{file_name}.jpg', input_img)
    cv_imwrite(f'{sample_path}/{x_type}_{file_name}_amap.jpg', anomaly_map_norm_hm)
    cv_imwrite(f'{sample_path}/{x_type}_{file_name}_amap_on_img.jpg', hm_on_img)
    cv_imwrite(f'{sample_path}/{x_type}_{file_name}_gt.jpg', gt_img)
    return 0
