__all__ = [
    "load_image_tensor",
    "compute_similar_images",
    "compute_similar_orb_features",
    "plot_similar_images",
    "compute_similarity",
]

import config_conv_encoder_decoder_oil as config
import torch
import numpy as np
import torch_conv_encoder_decoder_model as torch_model
from sklearn.neighbors import NearestNeighbors
import torchvision.transforms as T
import os
import cv2
from PIL import Image
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
# import torchvision
from tqdm import tqdm
import albumentations as A
from albumentations.pytorch import ToTensorV2
import pyiqa
from scipy import stats
# from sklearn.metrics import pairwise
from scipy.spatial import distance as scipy_distance
import utils
# import traj_dist.distance as tdist_distance
np.set_printoptions(precision=4)
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['font.size'] = 10.5  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号

def resizeConvertImgTensor(img_data):
    # image_resize_size = config.IMG_HEIGHT
    # image = T.RandomResizedCrop(image_resize_size)(img_data)
    # image = T.Grayscale(num_output_channels=1)(img_data)
    # image_tensor = T.ToTensor()(image)
    val_transform = A.Compose(
        [
            A.ToGray(),
            A.SmallestMaxSize(max_size=256),
            A.CenterCrop(height=250, width=250),
            # A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
            ToTensorV2(),
        ]
    )
    image = val_transform(image=np.array(img_data))['image']
    image_tensor = image.unsqueeze(0)
    input_images = image_tensor.to(device)
    return input_images

def load_image_tensor(image_path, device):
    """
    Loads a given image to device.
    Args:
    image_path: path to image to be loaded.
    device: "cuda" or "cpu"
    """
    image_resize_size = config.IMG_HEIGHT
    image = T.RandomResizedCrop(image_resize_size)(Image.open(image_path))
    image = T.Grayscale(num_output_channels=1)(image)
    image_tensor = T.ToTensor()(image)
    image_tensor = image_tensor.unsqueeze(0)
    # print(image_tensor.shape)
    input_images = image_tensor.to(device)
    return input_images


def compute_similar_images(image_path, num_images, embedding, device):
    """
    Given an image and number of similar images to generate.
    Returns the num_images closest neares images.

    Args:
    image_path: Path to image whose similar images are to be found.
    num_images: Number of similar images to find.
    embedding : A (num_images, embedding_dim) Embedding of images learnt from auto-encoder.
    device : "cuda" or "cpu" device.
    """

    image_tensor = load_image_tensor(image_path, device)
    # image_tensor = image_tensor.to(device)

    with torch.no_grad():
        # image_embedding = encoder(image_tensor).cpu().detach().numpy()
        enc_output, reconstruct_out = encoder(image_tensor)
        image_embedding = enc_output.cpu().detach().numpy()
    # print(image_embedding.shape)

    flattened_embedding = image_embedding.reshape((image_embedding.shape[0], -1))
    # print(flattened_embedding.shape)
    nn_similarity_candicate_results = []
    metric_method = "corr"
    for item_emb in embedding[1:]:
        similarity_result = compute_similarity(flattened_embedding[0], item_emb, metric_method)
        cur_sim_result = np.around(similarity_result, 4)
        nn_similarity_candicate_results.append(cur_sim_result)

    # 使用enumerate函数获取值和索引，然后按值从小到大排序
    sorted_indices = [index for index, value in
                      sorted(enumerate(nn_similarity_candicate_results), key=lambda x: x[1], reverse=False)]
    sorted_nn_similarity_results = [value for index, value in
                                    sorted(enumerate(nn_similarity_candicate_results), key=lambda x: x[1], reverse=False)]

    knn = NearestNeighbors(n_neighbors=num_images, metric="cosine")
    # knn = NearestNeighbors(n_neighbors=num_images, metric="euclidean")
    # knn = NearestNeighbors(n_neighbors=num_images, metric="jaccard")  # jaccard
    knn.fit(embedding)

    distances, indices = knn.kneighbors(flattened_embedding)
    indices_list = indices.tolist()
    max_distance_idx = np.argmax(distances.sum(axis=1))
    # most_unsimilar_sample = embedding[max_distance_idx]
    # 判断返回的列表是否为空，逐个相似度计算
    nn_similarity_results = []
    if len(indices_list) > 0:
        indices = indices_list[0]
        metric_method = "corr"  # jaccard ,cosine,euclidean, dtw, pearson
        # features_distance_fun = obtain_distance_fun(metric_method)
        # similarity_fun = similarity_fun_choose(metric_method)
        for i in tqdm(range(len(indices))):
            candidate_indice = indices[i]
            # features_distance = pairwise_distances(flattened_embedding, embedding[candidate_indice],metric=metric_method)
            # features_distance = cosine_distances(flattened_embedding, embedding[candidate_indice])
            # 方式一：
            similarity_result = compute_similarity(flattened_embedding[0], embedding[candidate_indice], metric_method)
            # 方式二: for之前就有
            # features_distance = features_distance_fun(flattened_embedding[0], embedding[candidate_indice])
            # similarity_result = similarity_fun(features_distance)
            cur_sim_result = np.around(similarity_result,4)
            nn_similarity_results.append(cur_sim_result)
        # print(indices_list)
    indices_list = [indices_list[0] + sorted_indices[:num_images]]
    nn_similarity_results = nn_similarity_results + sorted_nn_similarity_results[:num_images]
    return enc_output, reconstruct_out, indices_list, nn_similarity_results


def obtain_distance_fun(metric_method):
    metric_method_list = {
        "pearson": stats.pearsonr,
        "cosine": scipy_distance.cosine,
        "euclidean": scipy_distance.euclidean,
        "jaccard": scipy_distance.jaccard,
        "corr" : scipy_distance.correlation
    }
    features_distance_fun = metric_method_list.get(metric_method, lambda: None)
    return features_distance_fun


def origin_similarity_fun(features_distance):
    sim_result = features_distance
    return sim_result

def exponential_similarity_fun(features_distance):
    sim_result = np.exp(-features_distance)
    return sim_result

def fraction_similarity_fun(features_distance):
    sim_result = 1 / (1 + features_distance)
    return sim_result


def similarity_fun_choose(metric_method):
    '''
    Choose similarity function based on distance method
    Args:
        metric_method: from obtain_distance_fun method

    Returns:
        similarity method function
    '''
    metric_method_list = {
        "pearson": origin_similarity_fun,
        "cosine": origin_similarity_fun,
        "euclidean": fraction_similarity_fun,
        "jaccard": fraction_similarity_fun,
        "corr": origin_similarity_fun
    }
    similarity_fun = metric_method_list.get(metric_method, lambda: None)
    return similarity_fun


def compute_similarity(array_X, array_Y, metric_method):
    """
        Compute the similarity based on distance between two 1-D arrays.
    """
    if metric_method == "pearson":
        features_distance, p_value = stats.pearsonr(array_X, array_Y)
        similarity_result = features_distance
    elif metric_method == "cosine":
        features_distance = scipy_distance.cosine(array_X, array_Y)
        similarity_result = features_distance
    elif metric_method == "euclidean":
        features_distance = scipy_distance.euclidean(array_X, array_Y)
        similarity_result = 1 /  (1 + features_distance)
    elif metric_method == "jaccard":
        features_distance = scipy_distance.jaccard(array_X, array_Y)
        similarity_result = 1 /  (1 + features_distance)
    elif metric_method == "corr":
        features_distance = scipy_distance.correlation(array_X, array_Y)
        similarity_result = 1 /  (1 + features_distance)
    else:
        features_distance, p_value = stats.pearsonr(array_X, array_Y)
        similarity_result = features_distance
    return similarity_result


def obtain_image_database(img_dir_path):
    '''
    img_dir_path ='images/'
    '''
    img_path_list = []
    img_file_path_list = []
    dataX_path_list = []
    dataY_path_list = []

    categories = os.listdir(img_dir_path)
    # print('类别', categories)
    ii = 0
    for category_calss in categories:
        img_path_list = os.listdir(img_dir_path + category_calss)
        for img_file_name in img_path_list:
            img_path = img_dir_path + category_calss + os.sep + img_file_name
            # print('图片：',img_path)
            dataX_path_list.append(img_path)
            dataY_path_list.append(category_calss)
    # with open('test_label.txt', encoding='utf-8', mode='w+') as writer:
    #     count = 0
    #     for category_calss in categories:
    #         writer.write(str(count) + '\t' + category_calss + '\n')
    #         count = count + 1
    # writer.close()
    return np.asarray(dataX_path_list), np.asarray(dataY_path_list), categories


def plot_similar_images(query_image_path, reconstruction_output, indices_list, network_similarity_results, image_format, visible):
    """
    Plots images that are similar to indices obtained from computing simliar images.
    Args:
    indices_list : List of List of indexes. E.g. [[1, 2, 3]]
    """

    indices = indices_list[0]
    result_images = []
    result_images_name = []
    result_images_labels = []
    count_max = config.NUM_IMAGES * 2
    count = 0
    image_list, label_list, classes_list = obtain_image_database(config.IMG_DATA_PATH)
    plt.rc('font',family="Times New Roman")
    indices_use = indices[:count_max] + indices[-count_max:]
    # for index in indices:
    for index in indices_use:
        # confirm indices with image

        if index == 0:
            # index 0 is a dummy embedding.
            pass
        else:
            # 返回结果数超过需求了，结束循环
            if count >= count_max:
                break
            else:
                # real_index = str(index - 1)
                real_index = index - 1
                # img_name = image_list[real_index]
                # img_path = os.path.join(config.DATA_PATH + img_name)
                img_path = image_list[real_index]
                img_name = os.path.basename(img_path)
                # print(img_path)
                img = Image.open(img_path).convert("RGB")
                result_images.append(img)

                result_images_name.append(img_name)
                result_images_labels.append(label_list[real_index])
                count = count + 1


    plotImages(query_image_path, reconstruction_output, result_images, result_images_name, result_images_labels, network_similarity_results,
               count_max, visible)
    # plt.imshow(img)
    # plt.show()
    # img.save(f"../outputs/query_image_3/recommended_{index - 1}.jpg")


def plotImages(query_image_path, reconstruction_output, X_batch, X_batch_name, y_batch, network_similarity_results, n_img,visible):
    max_c = 10  # max images per row

    if n_img <= max_c:
        r = 2
        c = n_img
    else:
        r = int(np.ceil(n_img / max_c)) + 1
        c = max_c

    fig, axes = plt.subplots(r, c, figsize=(20, 10))
    plt.subplots_adjust(left=None, bottom=None, right=None, top=None,
                        wspace=0.4, hspace=0.2)
    # plt.xticks([])  # 去掉x轴
    # plt.yticks([])  # 去掉y轴
    # plt.axis('off')  # 去掉坐标轴
    axes = axes.flatten()
    i = 0
    # for img_batch, label_batch, ax in zip(X_batch, y_batch, axes):
    #     ax.imshow(img_batch, cmap='gray')
    #     # ax.grid()
    #     i = i + 1
    #     ax.set_title('Rank {} : {}'.format(i, label_batch))
    query_image_data = Image.open(query_image_path).convert("RGB")
    true_label = os.path.normpath(query_image_path).split(os.sep)[-2]
    query_image_name = os.path.basename(query_image_path)
    axes[0].imshow(query_image_data, cmap='gray')
    axes[0].set_title('query image: {}'.format(query_image_name) +
                      '\n class:{}'.format(true_label))
    axes[0].set_axis_off()
    # axes[1].imshow(reconstruction_output, cmap='gray')
    # axes[1].set_title('reconstruction image: {}'.format(query_image_name))
    # axes[1].set_axis_off()
    for i in range(max_c):
        axes[i].set_axis_off()
    img_save_dir = f"../outputs/query_conv_encoder—0322_image_{config.DATASET_NAME}/"
    if not os.path.exists(img_save_dir):
        os.makedirs(img_save_dir)
    iqa_metric_ssim = pyiqa.create_metric('ssim', device=device)
    iqa_metric = pyiqa.create_metric('same', as_loss=False, k1=50, k2=50, test_y_channel=True,
                                         color_space='ycbcr').to(device)
    iqa_metric_psnr = pyiqa.create_metric('psnr', device=device)
    iqa_metric_hist_intersect = pyiqa.create_metric('hist_intersect', bins=1000, range=(0, 1), test_y_channel=True,
                                                    color_space='ycbcr').to(device)

    for i in range(len(y_batch)):
        img_batch = X_batch[i]
        label_batch = y_batch[i]
        img_name = X_batch_name[i]
        cur_similarity_result = network_similarity_results[i]
        query_image_tensor = resizeConvertImgTensor(query_image_data)
        img_batch_tensor = resizeConvertImgTensor(img_batch)
        same_score_t = iqa_metric(query_image_tensor,img_batch_tensor)
        same_score = same_score_t.detach().cpu().numpy()[0]
        same_score_show = str(np.around(same_score,4))
        # calc ssim
        ssim_score_t = iqa_metric_ssim(query_image_tensor,img_batch_tensor)
        ssim_score = ssim_score_t.detach().cpu().numpy()[0]
        ssim_core_show = str(np.around(ssim_score,4))
        # calc psnr
        # psnr_score_t = iqa_metric_psnr(query_image_tensor, img_batch_tensor)
        # psnr_score = psnr_score_t.detach().cpu().numpy()[0]
        # psnr_score_show = str(np.around(psnr_score, 4))
        # hist_intersect_score_t = iqa_metric_hist_intersect(query_image_tensor, img_batch_tensor)
        # hist_intersect_score = hist_intersect_score_t.detach().cpu().numpy()
        # hist_intersect_score_show = str(np.around(hist_intersect_score, 4))
        msg_color = "green" if true_label == label_batch else "red"
        img_rank_id = i + 1 if i<max_c else -1 * (i-max_c + 1)
        axes[i + max_c].imshow(img_batch, cmap='gray')
        axes[i + max_c].set_title(
            'No {}:{}, \n class:{} \n logsim:{} \n same:{} \n ssim:{} '.format(str(img_rank_id),
                img_name, label_batch, cur_similarity_result, same_score_show, ssim_core_show), color=msg_color)
        axes[i + max_c].set_axis_off()
    # plt.tight_layout()
    plt.savefig(os.path.join(img_save_dir, f"recommended_{query_image_name}"),
                bbox_inches='tight', pad_inches=0.0, dpi=300)
    if visible:
       plt.show()

def compute_similar_orb_features(image_path, num_images, embedding, nfeatures=30):
    """
    Given a image, it computes features using ORB detector and finds similar images to it
    Args:
    image_path: Path to image whose features and simlar images are required.
    num_images: Number of similar images required.
    embedding: 2 Dimensional Embedding vector.
    nfeatures: (optional) Number of features ORB needs to compute
    """

    image = cv2.imread(image_path)
    orb = cv2.ORB_create(nfeatures=nfeatures)

    # Detect features
    keypoint_features = orb.detect(image)
    # compute the descriptors with ORB
    keypoint_features, des = orb.compute(image, keypoint_features)

    # des contains the description to features

    des = des / 255.0
    des = np.expand_dims(des, axis=0)
    des = np.reshape(des, (des.shape[0], -1))
    # print(des.shape)
    # print(embedding.shape)

    pca = PCA(n_components=des.shape[-1])
    reduced_embedding = pca.fit_transform(
        embedding,
    )
    # print(reduced_embedding.shape)

    knn = NearestNeighbors(n_neighbors=num_images, metric="cosine")
    knn.fit(reduced_embedding)
    _, indices = knn.kneighbors(des)

    indices_list = indices.tolist()
    # print(indices_list)
    # calc similarity
    nn_similarity_results = []
    if len(indices_list) > 0:
        indices = indices_list[0]
        metric_method = "pearson"  # jaccard ,cosine,euclidean, dtw, pearson
        features_distance_fun = obtain_distance_fun(metric_method)
        similarity_fun = similarity_fun_choose(metric_method)
        for i in tqdm(range(len(indices))):
            candidate_indice = indices[i]
            # features_distance = pairwise_distances(flattened_embedding, embedding[candidate_indice],metric=metric_method)
            # features_distance = cosine_distances(flattened_embedding, embedding[candidate_indice])
            # 方式一：
            similarity_result = compute_similarity(des[0], reduced_embedding[candidate_indice], metric_method)
            # 方式二: for之前就有
            # features_distance = features_distance_fun(flattened_embedding[0], embedding[candidate_indice])
            # similarity_result = similarity_fun(features_distance)
            cur_sim_result = np.around(similarity_result, 4)
            nn_similarity_results.append(cur_sim_result)
        # print(indices_list)
    return indices_list, nn_similarity_results


def inference_single(embedding, device, decoder, database_image_format, visible=True):
    enc_output, reconstruction_img_out, indices_list, nn_similarity_results = compute_similar_images(
        config.TEST_IMAGE_PATH, config.NUM_IMAGES, embedding, device
    )


    class_output = decoder(enc_output).detach().cpu().numpy()[0]
    reconstruction_img = reconstruction_img_out # dec_output.transpose((1, 2, 0))
    plot_similar_images(config.TEST_IMAGE_PATH, reconstruction_img, indices_list, nn_similarity_results,
                        database_image_format, visible=True)

    # Using orb detection
    # indices_list2, nn_similarity_results2 = compute_similar_orb_features(config.TEST_IMAGE_PATH, config.NUM_IMAGES, embedding, nfeatures=2)
    # plot_similar_images(config.TEST_IMAGE_PATH, reconstruction_img, indices_list2,nn_similarity_results2, database_image_format)

def inference_dir_imgs(dir_path, embedding, device, decoder, database_image_format,visible=False):
    category_list = os.listdir(dir_path)
    images_filepaths = []
    images_labels = []
    for sub_dir in category_list:
        current_dir = os.path.join(dir_path, sub_dir)
        cur_images_filepaths = sorted([os.path.join(current_dir, f) for f in os.listdir(current_dir)])
        img_label = [os.path.normpath(f).split(os.sep)[-2] for f in cur_images_filepaths]
        images_labels = images_labels + img_label
        images_filepaths = images_filepaths + cur_images_filepaths

    test_nums = len(images_filepaths)
    for i in tqdm(range(test_nums), desc="processing recommend..", total=test_nums):
        enc_output, dec_img_output, indices_list, nn_similarity_results = compute_similar_images(
            images_filepaths[i], config.NUM_IMAGES, embedding, device
        )


        dec_output = decoder(enc_output).detach().cpu().numpy()[0]
        # reconstruction_img = dec_output.transpose((1, 2, 0))
        reconstruction_img = dec_img_output
        plot_similar_images(images_filepaths[i], reconstruction_img, indices_list, nn_similarity_results,
                            database_image_format, visible=visible)


if __name__ == "__main__":
    # Loads the model
    print("Setting Seed for the run, seed = {}".format(config.SEED))

    utils.seed_everything(config.SEED)
    device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
    # device = torch.device("cpu")
    encoder = torch_model.ConvWithReconstructionNet(torch_model.ConvEncoder(), torch_model.ConvDecoder())
    decoder = torch_model.ConvClassifyNet(256, config.NUM_CLASSES)

    # Load the state dict of encoder
    encoder.load_state_dict(torch.load(config.ENCODER_MODEL_PATH, map_location=device))
    encoder.eval()
    encoder.to(device)
    decoder.load_state_dict(torch.load(config.DECODER_MODEL_PATH, map_location=device))
    decoder.eval()
    decoder.to(device)

    # Loads the embedding
    embedding = np.load(config.EMBEDDING_PATH)
    database_image_format = ".png"

    # inferce_single
    inference_single(embedding, device, decoder, database_image_format, visible=True)

    test_dir = f"../data/data_oil_for_classification/test/"
    inference_dir_imgs(test_dir, embedding, device, decoder, database_image_format, visible=False)