__all__ = [
    "load_image_tensor",
    "compute_similar_images",
    "compute_similar_orb_features",
    "plot_similar_images_with_pred",
    "compute_similarity",
    "dist_sim_fun_choose",
]

import config_classical_features_oil as config
import numpy as np
import torch
import torch_classical_features_model_oil as torch_model
from torch_train_classical_features_model_oil import mean_std_scale_features_with_trainfeature, min_max_scale_features_with_trainfeature
from sklearn.neighbors import NearestNeighbors
import torchvision.transforms as T
import os
import cv2
from PIL import Image
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
# import torchvision
from tqdm import tqdm
import time
from scipy import stats
# from sklearn.metrics import pairwise
from scipy.spatial import distance as scipy_distance
from tools.faissindex import FaissIndex
# import traj_dist.distance as tdist_distance

plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['font.size'] = 12  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号


def load_image_tensor(image_path, device):
    """
    Loads a given image to device.
    Args:
    image_path: path to image to be loaded.
    device: "cuda" or "cpu"
    """
    image_resize_size = config.IMG_HEIGHT
    image = T.RandomResizedCrop(image_resize_size)(Image.open(image_path))
    image = T.Grayscale(num_output_channels=1)(image)
    image_tensor = T.PILToTensor()(image)
    image_tensor = image_tensor.unsqueeze(0)
    # print(image_tensor.shape)
    input_images = image_tensor.to(device)
    return input_images


def compute_similar_images(image_path, num_images, embedding, device):
    """
    KNN
    Given an image and number of similar images to generate.
    Returns the num_images closest neares images.

    Args:
    image_path: Path to image whose similar images are to be found.
    num_images: Number of similar images to find.
    embedding : A (num_images, embedding_dim) Embedding of images learnt from auto-encoder.
    device : "cuda" or "cpu" device.
    """

    image_tensor = load_image_tensor(image_path, device)
    # image_tensor = image_tensor.to(device)

    with torch.no_grad():
        # image_embedding = encoder(image_tensor).cpu().detach().numpy()
        enc_output = encoder(image_tensor)
        # image_embedding = enc_output.cpu().detach().numpy()
        image_embedding = enc_output
    # print(image_embedding.shape)
    flattened_embedding = image_embedding.reshape((image_embedding.shape[0], -1))
    # print(flattened_embedding.shape)

    knn = NearestNeighbors(n_neighbors=num_images, metric="correlation")
    # knn = NearestNeighbors(n_neighbors=num_images, metric="jaccard")  # jaccard
    knn.fit(embedding)

    neighbors_dists, indices = knn.kneighbors(flattened_embedding)
    indices_list = indices.tolist()
    # 判断返回的列表是否为空，逐个相似度计算
    nn_similarity_results = []
    if len(indices_list) > 0:
        indices = indices_list[0]
        metric_method = "pearson"  # jaccard ,cosine,euclidean, dtw, pearson
        features_distance_fun = obtain_distance_fun(metric_method)
        similarity_fun = similarity_fun_choose(metric_method)
        for i in tqdm(range(len(indices))):
            candidate_indice = indices[i]
            # features_distance = pairwise_distances(flattened_embedding, embedding[candidate_indice],metric=metric_method)
            # features_distance = cosine_distances(flattened_embedding, embedding[candidate_indice])
            # 方式一：
            similarity_result = compute_similarity(flattened_embedding[0], embedding[candidate_indice], metric_method)
            # 方式二: for之前就有
            # features_distance = features_distance_fun(flattened_embedding[0], embedding[candidate_indice])
            # similarity_result = similarity_fun(features_distance)
            cur_sim_result = np.around(similarity_result,4)
            nn_similarity_results.append(cur_sim_result)
        # print(indices_list)
    return enc_output, indices_list, nn_similarity_results


def compute_similar_images_based_faiss(image_path, num_images, faiss_dir, embedding, device):
    """
    Given an image and number of similar images to generate.
    Returns the num_images closest neares images.

    Args:
    image_path: Path to image whose similar images are to be found.
    num_images: Number of similar images to find.
    device : "cuda" or "cpu" device.
    """

    image_tensor = load_image_tensor(image_path, device)
    # image_tensor = image_tensor.to(device)

    with torch.no_grad():
        # image_embedding = encoder(image_tensor).cpu().detach().numpy()
        enc_output = encoder(image_tensor)
        # image_embedding = enc_output.cpu().detach().numpy()
        image_embedding = enc_output
    # print(image_embedding.shape)

    flattened_embedding = image_embedding.reshape((image_embedding.shape[0], -1))
    # print(flattened_embedding.shape)

    faiss_index_use = FaissIndex(image_embedding.shape[1])
    faiss_index_use.load(faiss_dir)
    dist_list, query_results_idxs = faiss_index_use.search(query=image_embedding, k=num_images)

    images_list = faiss_index_use.decode_ids(query_results_idxs[0])
    # 判断返回的列表是否为空，逐个相似度计算
    nn_similarity_results = []
    indices = query_results_idxs[0]
    if len(indices) > 0:
        metric_method = "cosine"  # jaccard ,cosine,euclidean, dtw, pearson
        # features_distance_fun = obtain_distance_fun(metric_method)
        # similarity_fun = similarity_fun_choose(metric_method)
        for i in tqdm(range(len(indices))):
            candidate_indice = indices[i]
            # features_distance = pairwise_distances(flattened_embedding, embedding[candidate_indice],metric=metric_method)
            # features_distance = cosine_distances(flattened_embedding, embedding[candidate_indice])
            # 方式一：
            similarity_result = compute_similarity(flattened_embedding[0], embedding[candidate_indice], metric_method)
            # 方式二: for之前就有
            # features_distance = features_distance_fun(flattened_embedding[0], embedding[candidate_indice])
            # similarity_result = similarity_fun(features_distance)
            cur_sim_result = np.around(similarity_result, 4)
            nn_similarity_results.append(cur_sim_result)
        # print(indices_list)
        return enc_output, query_results_idxs, images_list, dist_list, nn_similarity_results
    # else:
    #     # 返回空
    #     return enc_output, query_results_idxs, images_list, dist_list, nn_similarity_results

def obtain_distance_fun(metric_method):
    metric_method_list = {
        "pearson": stats.pearsonr,
        "cosine": scipy_distance.cosine,
        "euclidean": scipy_distance.euclidean,
        "jaccard": scipy_distance.jaccard,
        "corr" : scipy_distance.correlation
    }
    features_distance_fun = metric_method_list.get(metric_method, lambda: None)
    return features_distance_fun


def origin_similarity_fun(features_distance):
    sim_result = features_distance
    return sim_result


def exponential_similarity_fun(features_distance):
    sim_result = np.exp(-features_distance)
    return sim_result


def fraction_similarity_fun(features_distance):
    sim_result = 1 / (1 + features_distance)
    return sim_result


def dist_sim_fun_choose(metric_method):
    metric_method_list = {
        "fraction": fraction_similarity_fun,
        "exponential": exponential_similarity_fun,
        "origin": origin_similarity_fun
    }
    similarity_fun = metric_method_list.get(metric_method, lambda: None)
    return similarity_fun


def similarity_fun_choose(metric_method):
    '''
    Choose similarity function based on distance method
    Args:
        metric_method: from obtain_distance_fun method

    Returns:
        similarity method function
    '''
    metric_method_list = {
        "pearson": origin_similarity_fun,
        "cosine": origin_similarity_fun,
        "euclidean": fraction_similarity_fun,
        "jaccard": fraction_similarity_fun,
        "corr": origin_similarity_fun
    }
    similarity_fun = metric_method_list.get(metric_method, lambda: None)
    return similarity_fun


def compute_similarity(array_X, array_Y, metric_method):
    """
        Compute the similarity based on distance between two 1-D arrays.
    """
    if metric_method == "pearson":
        features_distance, p_value = stats.pearsonr(array_X, array_Y)
        similarity_result = features_distance
    elif metric_method == "cosine":
        features_distance = 1 - scipy_distance.cosine(array_X, array_Y)
        similarity_result = features_distance
    elif metric_method == "euclidean":
        features_distance = scipy_distance.euclidean(array_X, array_Y)
        similarity_result = 1 /  (1 + features_distance)
    elif metric_method == "jaccard":
        features_distance = scipy_distance.jaccard(array_X, array_Y)
        similarity_result = 1 /  (1 + features_distance)
    elif metric_method == "corr":
        features_distance = scipy_distance.correlation(array_X, array_Y)
        similarity_result = 1 /  (1 + features_distance)
    else:
        features_distance, p_value = stats.pearsonr(array_X, array_Y)
        similarity_result = features_distance
    return similarity_result


def obtain_image_database(img_dir_path):
    '''
    img_dir_path ='images/'
    '''
    img_path_list = []
    img_file_path_list = []
    dataX_path_list = []
    dataY_path_list = []

    categories = os.listdir(img_dir_path)
    # print('类别', categories)
    ii = 0
    for category_calss in categories:
        img_path_list = os.listdir(img_dir_path + category_calss)
        for img_file_name in img_path_list:
            img_path = img_dir_path + category_calss + os.sep + img_file_name
            # print('图片：',img_path)
            dataX_path_list.append(img_path)
            dataY_path_list.append(category_calss)
    # with open('test_label.txt', encoding='utf-8', mode='w+') as writer:
    #     count = 0
    #     for category_calss in categories:
    #         writer.write(str(count) + '\t' + category_calss + '\n')
    #         count = count + 1
    # writer.close()
    return np.asarray(dataX_path_list), np.asarray(dataY_path_list), categories


def obtain_images_from_list(imgs_list):
    dataX_list = []
    for img_path in imgs_list:
        img = Image.open(img_path).convert("RGB")
        dataX_list.append(img)

    return dataX_list


def plot_similar_images_with_pred(query_image_path, pred_img_class, indices_list, network_similarity_results, image_format):
    """
    Plots images that are similar to indices obtained from computing simliar images.
    Args:
    indices_list : List of List of indexes. E.g. [[1, 2, 3]]
    """

    indices = indices_list[0]
    result_images = []
    result_images_name = []
    result_images_labels = []
    count_max = config.NUM_IMAGES
    count = 0
    image_list, label_list, classes_list = obtain_image_database(config.IMG_DATA_PATH)
    for index in indices:
        # confirm indices with image

        if index == 0:
            # index 0 is a dummy embedding.
            pass
        else:
            # 返回结果数超过需求了，结束循环
            if count >= count_max:
                break
            else:
                # real_index = str(index - 1)
                real_index = index - 1
                # img_name = image_list[real_index]
                # img_path = os.path.join(config.DATA_PATH + img_name)
                img_path = image_list[real_index]
                img_name = os.path.basename(img_path)
                # print(img_path)
                img = Image.open(img_path).convert("RGB")
                result_images.append(img)

                result_images_name.append(img_name)
                result_images_labels.append(label_list[real_index])
                count = count + 1

    plotImages_with_pred_class(query_image_path, pred_img_class, result_images, result_images_name, result_images_labels, network_similarity_results,
                               config.NUM_IMAGES)
    # plt.imshow(img)
    # plt.show()
    # img.save(f"../outputs/query_image_3/recommended_{index - 1}.jpg")


def plotImages_with_pred_class(query_image_path, pred_img_class, X_batch, X_batch_name, y_batch, network_similarity_results, n_img):
    max_c = 5  # max images per row

    if n_img <= max_c:
        r = 2
        c = n_img
    else:
        r = int(np.ceil(n_img / max_c)) + 1
        c = max_c

    fig, axes = plt.subplots(r, c, figsize=(15, 15))
    plt.subplots_adjust(left=None, bottom=None, right=None, top=None,
                        wspace=0.4, hspace=0.3)
    # plt.xticks([])  # 去掉x轴
    # plt.yticks([])  # 去掉y轴
    # plt.axis('off')  # 去掉坐标轴
    axes = axes.flatten()
    i = 0
    # for img_batch, label_batch, ax in zip(X_batch, y_batch, axes):
    #     ax.imshow(img_batch, cmap='gray')
    #     # ax.grid()
    #     i = i + 1
    #     ax.set_title('Rank {} : {}'.format(i, label_batch))
    query_image_data = Image.open(query_image_path).convert("RGB")
    true_label = os.path.normpath(query_image_path).split(os.sep)[-2]
    query_image_name = os.path.basename(query_image_path)
    axes[0].imshow(query_image_data, cmap='gray')
    axes[0].set_title('query image: {}'.format(query_image_name) +
                      '\n class:{}'.format(true_label))
    axes[0].set_axis_off()
    # axes[1].imshow(reconstruction_output, cmap='gray')
    # axes[1].set_title('reconstruction image: {}'.format(query_image_name))
    # axes[1].set_axis_off()
    for i in range(max_c):
        axes[i].set_axis_off()
    img_save_dir = f"../outputs/query_image_{config.DATASET_NAME}"
    if not os.path.exists(img_save_dir):
        os.makedirs(img_save_dir)
    for i in range(len(y_batch)):
        img_batch = X_batch[i]
        label_batch = y_batch[i]
        img_name = X_batch_name[i]
        cur_similarity_result = network_similarity_results[i]
        msg_color = "green" if true_label == label_batch else "red"
        axes[i + max_c].imshow(img_batch, cmap='gray')
        axes[i + max_c].set_title(
            'Rank {}:{}, \n class:{} \n similarity:{}'.format(str(i + 1), img_name, label_batch, cur_similarity_result),
            color=msg_color)
        axes[i + max_c].set_axis_off()
    # plt.tight_layout()
    plt.savefig(os.path.join(img_save_dir, f"recommended_{img_name}"), dpi=300)

    plt.show()
    # max_c = 5  # max images per row
    #
    # if n_img <= max_c:
    #     r = 2
    #     c = n_img
    # else:
    #     r = int(np.ceil(n_img / max_c)) + 1
    #     c = max_c
    #
    # fig, axes = plt.subplots(r, c, figsize=(15, 15))
    # plt.subplots_adjust(left=None, bottom=None, right=None, top=None,
    #                     wspace=0.4, hspace=0.3)
    # # plt.xticks([])  # 去掉x轴
    # # plt.yticks([])  # 去掉y轴
    # # plt.axis('off')  # 去掉坐标轴
    # axes = axes.flatten()
    # i = 0
    # # for img_batch, label_batch, ax in zip(X_batch, y_batch, axes):
    # #     ax.imshow(img_batch, cmap='gray')
    # #     # ax.grid()
    # #     i = i + 1
    # #     ax.set_title('Rank {} : {}'.format(i, label_batch))
    # query_image_data = Image.open(query_image_path).convert("RGB")
    # query_image_name = os.path.basename(query_image_path)
    # axes[0].imshow(query_image_data, cmap='gray')
    # axes[0].set_title('query image: {}, pred class: {}\n'.format(query_image_name, pred_img_class))
    # axes[0].set_axis_off()
    # if isinstance(y_batch, np.ndarray):
    #     y_batch_data = y_batch
    # else:
    #     y_batch_data = np.asarray(y_batch)
    # for i in range(max_c):
    #     axes[i].set_axis_off()
    # nums_image = len(y_batch_data)
    #
    # for i in range(nums_image):
    #     img_batch = X_batch[i]
    #     label_batch = y_batch_data[i]
    #     img_name = X_batch_name[i]
    #     cur_similarity_result = network_similarity_results[i]
    #     axes[i + max_c].imshow(img_batch, cmap='gray')
    #     axes[i + max_c].set_title(
    #         'Rank {}:{}, \n class:{} \n similarity:{:.4f}'.format(str(i + 1), img_name, label_batch, float(cur_similarity_result)))
    #     axes[i + max_c].set_axis_off()
    # # plt.tight_layout()
    # # plt.savefig(f"../outputs/query_image_3/recommended_.jpg")
    #
    # plt.show()


def compute_similar_orb_features(image_path, num_images, database_embedding, nfeatures=30):
    """
    Given a image, it computes features using ORB detector and finds similar images to it
    Args:
    image_path: Path to image whose features and simlar images are required.
    num_images: Number of similar images required.
    database_embedding: 2 Dimensional Embedding vector.
    nfeatures: (optional) Number of features ORB needs to compute
    """

    image = cv2.imread(image_path)
    orb = cv2.ORB_create(nfeatures=nfeatures)

    # Detect features
    keypoint_features = orb.detect(image)
    # compute the descriptors with ORB
    keypoint_features, des = orb.compute(image, keypoint_features)

    # des contains the description to features

    des = des / 255.0
    des = np.expand_dims(des, axis=0)
    des = np.reshape(des, (des.shape[0], -1))
    # print(des.shape)
    # print(database_embedding.shape)

    pca = PCA(n_components=des.shape[-1])
    reduced_embedding = pca.fit_transform(
        database_embedding,
    )
    # print(reduced_embedding.shape)

    knn = NearestNeighbors(n_neighbors=num_images, metric="cosine")
    knn.fit(reduced_embedding)
    _, indices = knn.kneighbors(des)

    indices_list = indices.tolist()
    # print(indices_list)
    # calc similarity
    nn_similarity_results = []
    if len(indices_list) > 0:
        indices = indices_list[0]
        metric_method = "pearson"  # jaccard ,cosine,euclidean, dtw, pearson
        features_distance_fun = obtain_distance_fun(metric_method)
        similarity_fun = similarity_fun_choose(metric_method)
        for i in tqdm(range(len(indices))):
            candidate_indice = indices[i]
            # features_distance = pairwise_distances(flattened_embedding, database_embedding[candidate_indice],metric=metric_method)
            # features_distance = cosine_distances(flattened_embedding, database_embedding[candidate_indice])
            # 方式一：
            similarity_result = compute_similarity(des[0], reduced_embedding[candidate_indice], metric_method)
            # 方式二: for之前就有
            # features_distance = features_distance_fun(flattened_embedding[0], database_embedding[candidate_indice])
            # similarity_result = similarity_fun(features_distance)
            cur_sim_result = np.around(similarity_result, 4)
            nn_similarity_results.append(cur_sim_result)
        # print(indices_list)
    return indices_list, nn_similarity_results


if __name__ == "__main__":
    # Loads the model
    start_time = time.time()

    # device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
    device = torch.device("cpu")
    feature_list = config.FEATURE_LIST
    encoder = torch_model.ClassicalFeaturesEncoder(feature_list)
    # RBF | Linear
    decoder = torch_model.ClassifierDecoder(config.DECODE_LOGIT_TYPE, device=device)

    # Load the state dict of encoder
    # encoder.load_state_dict(torch.load(config.ENCODER_MODEL_PATH, map_location=device))
    encoder.eval()
    encoder.to(device)
    decoder.load_state_dict(torch.load(config.DECODER_MODEL_PATH, map_location=device))
    decoder.eval()
    decoder.to(device)

    # Loads the embedding, define the database use
    label_embedding = np.load(config.EMBEDDING_LABEL_PATH)
    embedding = np.load(config.EMBEDDING_PATH)
    search_method = "KNN"  # KNN | FAISS | CALC_SORT
    #-----------------------------------Based on KNN------------------------------------------------
    if search_method == "KNN":
        enc_output, indices_list, nn_similarity_results = compute_similar_images(
            config.TEST_IMAGE_PATH, config.NUM_IMAGES, embedding, device
        )
        database_image_format = ".png"
        if config.SCALE_POLICY == "mean_std":
            mean_features = np.load(config.EMB_SCALE_PARA1_PATH)
            std_features = np.load(config.EMB_SCALE_PARA2_PATH)
            query_feature_embedding = mean_std_scale_features_with_trainfeature(torch.from_numpy(enc_output),mean_features,std_features)
        else:
            min_features = np.load(config.EMB_SCALE_PARA1_PATH)
            max_features = np.load(config.EMB_SCALE_PARA2_PATH)
            query_feature_embedding = min_max_scale_features_with_trainfeature(torch.from_numpy(enc_output),
                                                                               min_features, max_features)
        # query_feature_embedding =
        dec_prob_output = decoder(query_feature_embedding)
        pred = dec_prob_output.data.max(1, keepdim=True)[
            1
        ]  # get the index of the max probability
        pred_img_class = pred.detach().cpu().numpy()
        end_time = time.time()
        print("time use: {:.5f} s".format(end_time - start_time))
        plot_similar_images_with_pred(config.TEST_IMAGE_PATH, pred_img_class[0], indices_list, nn_similarity_results, database_image_format)

        # Using orb detection
        # indices_list2, nn_similarity_results2 = compute_similar_orb_features(config.TEST_IMAGE_PATH, config.NUM_IMAGES, embedding, nfeatures=1)
        # plot_similar_images_with_pred(config.TEST_IMAGE_PATH, pred_img_class, indices_list2, nn_similarity_results2, database_image_format)
    elif search_method == "FAISS":
        enc_output, query_result_idxs, images_list, dist_results,nn_similarity_results = compute_similar_images_based_faiss(
            config.TEST_IMAGE_PATH, config.NUM_IMAGES, config.FAISS_INDEX_DIR_PATH, embedding, device
        )
        # 根据训练过程归一化
        if config.SCALE_POLICY == "mean_std":
            mean_features = np.load(config.EMB_SCALE_PARA1_PATH)
            std_features = np.load(config.EMB_SCALE_PARA2_PATH)
            query_feature_embedding = mean_std_scale_features_with_trainfeature(torch.from_numpy(enc_output),
                                                                                mean_features, std_features)
        else:
            min_features = np.load(config.EMB_SCALE_PARA1_PATH)
            max_features = np.load(config.EMB_SCALE_PARA2_PATH)
            query_feature_embedding = min_max_scale_features_with_trainfeature(torch.from_numpy(enc_output),
                                                        min_features=min_features, max_features=max_features)

        dec_prob_output = decoder(query_feature_embedding)
        pred = dec_prob_output.data.max(1, keepdim=True)[
            1
        ]  # get the index of the max probability
        pred_img_class = pred.detach().cpu().numpy()

        result_images_name = [os.path.basename(image_path) for image_path in images_list]

        result_images_labels = [label_embedding[item] for item in query_result_idxs[0]]
        result_images_list = obtain_images_from_list(images_list)

        # "fraction": fraction_similarity_fun,
        # "exponential": exponential_similarity_fun,
        # "origin": origin_similarity_fun
        metric_method = "fraction"
        similarity_fun = dist_sim_fun_choose(metric_method)
        nums_image = len(result_images_name)
        similarity_results_list = []
        for i in range(nums_image):
            features_distance = dist_results[0][i]
            similarity_result = similarity_fun(features_distance)
            similarity_results_list.append(np.around(similarity_result, 4))

        end_time = time.time()
        print("time use: {:.5f} s".format(end_time - start_time))
        plotImages_with_pred_class(config.TEST_IMAGE_PATH,
                                   pred_img_class[0],
                                   result_images_list,
                                   result_images_name,
                                   result_images_labels,
                                   nn_similarity_results, # similarity_results_list | nn_similarity_results
                                   config.NUM_IMAGES)
