__all__ = ["ClassicalFeaturesEncoder", "ClassifierDecoder"]

import numpy
import numpy as np
import torch
import torch.nn as nn
from typing import Tuple

import torch.nn.functional as F
from torchmetrics.metric import Metric

import config_classical_features_oil as config
from skimage.feature import hog
from skimage.feature import local_binary_pattern
from skimage.feature import graycomatrix, graycoprops
from skimage.feature import ORB
from skimage.util import img_as_ubyte
from skimage.transform import resize
from skimage.measure import moments_central, moments_normalized, moments_hu
import cv2
import time
import multiprocessing
import threading
from threading import *
from joblib import Parallel, delayed

embedding_shape = config.EMBEDDING_SHAPE
class RBFLogits(nn.Module):
    def __init__(self, feature_dim, class_num, scale, gamma, device):
        super(RBFLogits, self).__init__()
        self.feature_dim = feature_dim
        self.class_num = class_num
        if device == "cpu":
            self.weight = nn.Parameter( torch.FloatTensor(class_num, feature_dim))
            self.bias = nn.Parameter(torch.FloatTensor(class_num))
        elif device == "cuda":
            self.weight = torch.Tensor(class_num, feature_dim).type(torch.cuda.FloatTensor)
            self.bias = torch.Tensor(class_num).type(torch.cuda.FloatTensor)
        else:
            self.weight = nn.Parameter(torch.FloatTensor(class_num, feature_dim))
            self.bias = nn.Parameter(torch.FloatTensor(class_num))
        self.scale = scale
        self.gamma = gamma
        self.device = device
        nn.init.xavier_uniform_(self.weight)
        # nn.init.kaiming_normal_(self.weight, a=0, mode='fan_in', nonlinearity='leaky_relu')

    def forward(self, feat):
        self.feat = feat.to(self.device)
        diff = torch.unsqueeze(self.weight, dim=0) - torch.unsqueeze(self.feat , dim=1)
        diff = torch.mul(diff, diff)
        metric = torch.sum(diff, dim=-1)
        kernal_metric = torch.exp(-1.0 * metric / self.gamma)
        if self.training:
            train_logits = self.scale * kernal_metric
            # ###
            # Add some codes to modify logits, e.g. margin, temperature and etc.
            # ###
            return train_logits
        else:
            test_logits = self.scale * kernal_metric
            return test_logits


class LinearLogits(nn.Module):
    def __init__(self, feature_dim, class_num, device):
        super(LinearLogits, self).__init__()
        self.feature_dim = feature_dim
        self.class_num = class_num
        if device == torch.device("cpu"):
            self.weight = nn.Parameter( torch.FloatTensor(class_num, feature_dim))
            self.bias = nn.Parameter(torch.FloatTensor(class_num))
        elif device == torch.device("cuda"):
            self.weight = nn.Parameter(torch.Tensor(class_num, feature_dim).type(torch.cuda.FloatTensor))
            self.bias = nn.Parameter(torch.Tensor(class_num).type(torch.cuda.FloatTensor))
        nn.init.xavier_uniform_(self.weight)

    def forward(self, feat):
        logits = nn.functional.linear(feat, self.weight) + self.bias
        if self.training:
            train_logits = logits
            # ###
            # Add some codes to modify logits, e.g. margin, temperature and etc.
            # ###
            return train_logits
        else:
            test_logits = logits
            return test_logits


class WeightedKNNClassifier(Metric):
    def __init__(
            self,
            k: int = 20,
            T: float = 0.07,
            max_distance_matrix_size: int = int(5e6),
            distance_fx: str = "cosine",
            epsilon: float = 0.00001,
            dist_sync_on_step: bool = False,
    ):
        """Implements the weighted k-NN classifier used for evaluation.
        Args:
            k (int, optional): number of neighbors. Defaults to 20.
            T (float, optional): temperature for the exponential. Only used with cosine
                distance. Defaults to 0.07.
            max_distance_matrix_size (int, optional): maximum number of elements in the
                distance matrix. Defaults to 5e6.
            distance_fx (str, optional): Distance function. Accepted arguments: "cosine" or
                "euclidean". Defaults to "cosine".
            epsilon (float, optional): Small value for numerical stability. Only used with
                euclidean distance. Defaults to 0.00001.
            dist_sync_on_step (bool, optional): whether to sync distributed values at every
                step. Defaults to False.
        """

        super().__init__(dist_sync_on_step=dist_sync_on_step, compute_on_step=False)

        self.k = k
        self.T = T
        self.max_distance_matrix_size = max_distance_matrix_size
        self.distance_fx = distance_fx
        self.epsilon = epsilon


        self.add_state("train_features", default=[], persistent=False)
        self.add_state("train_targets", default=[], persistent=False)
        self.add_state("test_features", default=[], persistent=False)
        self.add_state("test_targets", default=[], persistent=False)

    def update(
            self,
            train_features: torch.Tensor = None,
            train_targets: torch.Tensor = None,
            test_features: torch.Tensor = None,
            test_targets: torch.Tensor = None,
    ):
        """Updates the memory banks. If train (test) features are passed as input, the
        corresponding train (test) targets must be passed as well.
        Args:
            train_features (torch.Tensor, optional): a batch of train features. Defaults to None.
            train_targets (torch.Tensor, optional): a batch of train targets. Defaults to None.
            test_features (torch.Tensor, optional): a batch of test features. Defaults to None.
            test_targets (torch.Tensor, optional): a batch of test targets. Defaults to None.
        """
        assert (train_features is None) == (train_targets is None)
        assert (test_features is None) == (test_targets is None)

        if train_features is not None:
            assert train_features.size(0) == train_targets.size(0)
            self.train_features.append(train_features.detach())
            self.train_targets.append(train_targets.detach())

        if test_features is not None:
            assert test_features.size(0) == test_targets.size(0)
            self.test_features.append(test_features.detach())
            self.test_targets.append(test_targets.detach())

    def set_tk(self, T, k):
        self.T = T
        self.k = k

    @torch.no_grad()
    def compute(self) -> Tuple[float]:
        """Computes weighted k-NN accuracy @1 and @5. If cosine distance is selected,
        the weight is computed using the exponential of the temperature scaled cosine
        distance of the samples. If euclidean distance is selected, the weight corresponds
        to the inverse of the euclidean distance.
        Returns:
            Tuple[float]: k-NN accuracy @1 and @5.
        """

        # print(self.T, self.k)

        train_features = torch.cat(self.train_features)
        train_targets = torch.cat(self.train_targets)
        test_features = torch.cat(self.test_features)
        test_targets = torch.cat(self.test_targets)

        if self.distance_fx == "cosine":
            train_features = F.normalize(train_features)
            test_features = F.normalize(test_features)

        num_classes = torch.unique(test_targets).numel()
        # num_train_images = train_targets.size(0)
        num_test_images = test_targets.size(0)
        num_train_images = train_targets.size(0)
        chunk_size = min(
            max(1, self.max_distance_matrix_size // num_train_images),
            num_test_images,
        )
        k = min(self.k, num_train_images)

        top1, top5, total = 0.0, 0.0, 0
        retrieval_one_hot = torch.zeros(k, num_classes).to(train_features.device)
        # probs_tensor = torch.zeros(num_test_images, num_classes).to(train_features.device)
        for idx in range(0, num_test_images, chunk_size):
            # get the features for test images
            features = test_features[idx: min((idx + chunk_size), num_test_images), :]
            targets = test_targets[idx: min((idx + chunk_size), num_test_images)]
            batch_size = targets.size(0)

            # calculate the dot product and compute top-k neighbors
            if self.distance_fx == "cosine":
                similarities = torch.mm(features, train_features.t())
            elif self.distance_fx == "euclidean":
                similarities = 1 / (torch.cdist(features, train_features) + self.epsilon)
            else:
                raise NotImplementedError

            similarities, indices = similarities.topk(k, largest=True, sorted=True)
            candidates = train_targets.view(1, -1).expand(batch_size, -1)
            retrieved_neighbors = torch.gather(candidates, 1, indices)

            retrieval_one_hot.resize_(batch_size * k, num_classes).zero_()
            retrieval_one_hot.scatter_(1, retrieved_neighbors.view(-1, 1), 1)

            if self.distance_fx == "cosine":
                similarities = similarities.clone().div_(self.T).exp_()

            probs = torch.sum(
                torch.mul(
                    retrieval_one_hot.view(batch_size, -1, num_classes),
                    similarities.view(batch_size, -1, 1),
                ),
                1,
            )

            # probs_tensor[idx] = probs
            _, predictions = probs.sort(1, True)

            # find the predictions that match the target
            correct = predictions.eq(targets.data.view(-1, 1))
            top1 = top1 + correct.narrow(1, 0, 1).sum().item()
            top5 = (
                    top5 + correct.narrow(1, 0, min(5, k, correct.size(-1))).sum().item()
            )  # top5 does not make sense if k < 5
            total += targets.size(0)

        top1 = top1 * 100.0 / total
        top5 = top5 * 100.0 / total
        print("KNN (top1/top5):", top1, top5)

        self.reset()

        return top1, top5
        # return probs_tensor


def extract_lbp_features(image,return_feature_image=True):
    # 加载图像
    # gray = image
    # lbp特征提取
    radius = 3
    n_points = 8 * radius
    lbp_image = local_binary_pattern(image, n_points, radius, method='uniform')
    # print(lbp_image.shape)
    lbp_features = np.asarray(lbp_image).flatten()

    if return_feature_image:
        return lbp_features, lbp_image
    else:
        return lbp_features



def extract_hog_features(image,return_feature_image=True):
    # 加载图像
    gray = image
    # HOG特征提取
    hog_features, hog_image = hog(gray, orientations=9, pixels_per_cell=(config.HOG_pixels_per_cell, config.HOG_pixels_per_cell),
                                cells_per_block=(2, 2), visualize=True, block_norm='L2-Hys')
    # print(hog_features.shape)
    # fig.show()
    if return_feature_image:
        return hog_features, hog_image
    else:
        return hog_features


def extract_orb_features(image_data,  nfeatures=2):
    """
    Given a image, it computes features using ORB detector and finds similar images to it
    Args:
    image_data: data.
    nfeatures: (optional) Number of features ORB needs to compute
    """

    detector_extractor = ORB(n_keypoints=nfeatures,n_scales=4)

    # Detect features
    detector_extractor.detect_and_extract(image_data)
    # compute the descriptors with ORB
    # keypoint_features = detector_extractor.keypoints
    des = detector_extractor.descriptors
    # des contains the description to features

    des = des / 255.0
    des = np.expand_dims(des, axis=0)
    des = np.reshape(des, (des.shape[0], -1))
    return des, None


def cut_img_step(img,step):  # 每个块大小为50x50
    # 原文链接：https://blog.csdn.net/weixin_43005832/article/details/123579091
    '''传入Gray 图片与 step'''
    new_img = [] #250x250

    # 将归一化后的图像转换为整数类型的图像
    image_normalized_uint8 = ((img + 1) / 2 * 255).astype(np.uint8)
    for i in np.arange(0, img.shape[0] + 1, step):
        for j in np.arange(0, img.shape[1] + 1, step):
            if i + step <= img.shape[0] and j + step <= img.shape[1]:
                # 将浮点数类型的图像转换为整数类型的图像
                image_uint8 = image_normalized_uint8[i:i + step, j:j + step]
                # image_uint8 = img_as_ubyte(image_normalized_uint8[i:i + step, j:j + step])
                new_img.append(image_uint8)
    # new_img = np.array(new_img, dtype=np.int32)
    return new_img


def extract_glcm_sixFeature(IMG_gray):  # 传入灰度图像
    # input = IMG_gray  # 读取图像，灰度模式
    # 得到共生矩阵，参数：图像矩阵，距离，方向，灰度级别，是否对称，是否标准化
    # torch.IntTensor(input).detach().cpu().numpy(),
    glcm = graycomatrix(IMG_gray,
                        [1,2],
                        [0, np.pi / 4, np.pi / 2, np.pi * 3 / 4],
                        levels=256, symmetric=True,
                        normed=True)  # 100x100 -> 3x4

    # 得到共生矩阵统计值，官方文档
    # http://tonysyu.github.io/scikit-image/api/skimage.feature.html#skimage.feature.greycoprops
    feature = []
    feature_descrip = {'contrast', 'dissimilarity',
                 'homogeneity', 'energy', 'correlation', 'ASM'}

    for prop in feature_descrip:
        temp = graycoprops(glcm, prop).mean()
        feature.append(temp)

    return np.array(feature), glcm


def calc_feature(cut_images, features_fun, step=50):
    # '''格式要求 （x灰度图像二维，250，250）'''
    # print(f"Imgs shape is {image.shape}")  #
    # Imgs_number = image.shape[0]
    # Gray_feature_num = int((image.shape[1] /step)**2)
    Avg_gray = []

    # gray_image = image.detach().cpu().numpy()
    # gray_image = image

    for patch in cut_images:  # 每张图25个patch
        # Avg_gray.append(np.mean(patch))
        feature, _ = features_fun(patch)
        Avg_gray.append(feature)

    Avg_gray = np.array(Avg_gray)
    Avg_gray_feature = Avg_gray.flatten() # glcm no mean, 1200
    # print(f'Avg_gray shape ：{Avg_gray_feature.shape}')

    return Avg_gray_feature


def calc_feature_multiprocess(cut_images, features_fun, step=50):

    # '''格式要求 （x灰度图像二维，250，250）'''
    # print(f"Imgs shape is {image.shape}")  #
    # Imgs_number = image.shape[0]
    # Gray_feature_num = int((image.shape[1] /step)**2)
    Avg_gray = []

    # gray_image = image.detach().cpu().numpy()
    # gray_image = image
    pool = multiprocessing.Pool(config.IMG_WIDTH//config.PATCH_STEP_SIZE)
    results = pool.map(features_fun, cut_images)
    pool.close()
    pool.join()

    # for patch in cut_images:  # 每张图25个patch
    #     # Avg_gray.append(np.mean(patch))
    #     feature, _ = features_fun(patch)
    #
    #     Avg_gray.append(feature)
    # results = Parallel(n_jobs=-1)(delayed(features_fun)(patch) for patch in cut_images)
    Avg_gray = results
    Avg_gray = np.array(Avg_gray)
    Avg_gray_feature = Avg_gray.flatten() # glcm no mean, 1200
    # print(f'Avg_gray shape ：{Avg_gray_feature.shape}')

    return Avg_gray_feature


def calc_gray_image_mean_varaince(gray_image:numpy.ndarray):
    average = gray_image.mean()
    varaince = gray_image.var()
    return average, varaince


def calc_mean_varaince_feature(cut_images):
    # '''格式要求 （x灰度图像二维，250，250）'''
    Avg_gray = []
    Var_gray = []
    # gray_image = image.detach().cpu().numpy()
    # gray_image = image

    for patch in cut_images:  # 每张图25个patch
        # Avg_gray.append(np.mean(patch))
        avg_feature, variance = calc_gray_image_mean_varaince(patch)
        Avg_gray.append(avg_feature)
        Var_gray.append(variance)

    Avg_gray = np.array(Avg_gray)
    Var_gray = np.array(Var_gray)
    Avg_gray_feature = Avg_gray.flatten() # glcm  mean, 1200
    Var_gray_feature = Var_gray.flatten() # glcm  var, 1200
    # print(f'Avg_gray shape ：{Avg_gray_feature.shape}')

    return Avg_gray_feature, Var_gray_feature


def calc_gray_image_mean(gray_image):
    height, width = gray_image.shape
    size = gray_image.size
    average = 0
    for i in range(height):
        for j in range(width):
            average += gray_image[i][j] / size
    return average, None





def calc_gray_image_varance(gray_image):
    # https://blog.csdn.net/qq_43309286/article/details/101947471
    height, width = gray_image.shape
    size = gray_image.size
    p = [0] * 256

    for i in range(height):
        for j in range(width):
            p[gray_image[i][j]] += 1

    m = 0
    for i in range(256):
        p[i] /= 256
        m += i * p[i]

    s = 0
    for i in range(256):
        s += (i - m) * (i - m) * p[i]
    return s, m


def calc_all_feature(images, image_shape, features_list, patch_step_size=50):
    '''
    提取特征计算相似度
    image_data 多张图
    '''

    # resized_img = resize(image_data, image_shape, anti_aliasing=True)
    total_features_embedding = []
    for i in range(len(images)):
        # resized_img = image_data
        image_data = images[i].squeeze(0)
        image_input = image_data.detach().cpu().numpy()
        #  切割图片只进行一次

        if "lbp" in features_list:
            lbp_features, lbp_image = extract_lbp_features(image_input)
            # print("lbp_features.shape:", lbp_features.shape)
        # 前面判断是否lbp编码
            lbp_encoding_img = np.asarray(lbp_image, dtype=numpy.uint8)
            cut_imgs = cut_img_step(lbp_encoding_img, step=patch_step_size)
            # image_mean_features = calc_feature(cut_imgs, calc_gray_image_mean)
            # image_var_features = calc_feature(cut_imgs, calc_gray_image_varance)
            image_mean_features, image_var_features = calc_mean_varaince_feature(cut_imgs)
            # all_features = calc_feature_multiprocess(cut_imgs, extract_glcm_sixFeature)
            all_features = calc_feature(cut_imgs, extract_glcm_sixFeature)
        else:
            cut_imgs = cut_img_step(image_input, step=patch_step_size)
            # image_mean_features = calc_feature(cut_imgs, calc_gray_image_mean)
            # image_var_features = calc_feature(cut_imgs, calc_gray_image_varance)
            image_mean_features, image_var_features = calc_mean_varaince_feature(cut_imgs)
            # all_features = calc_feature_multiprocess(cut_imgs, extract_glcm_sixFeature)
            all_features = calc_feature(cut_imgs, extract_glcm_sixFeature)
            # print("glcm_features.shape:", all_features.shape)
            # print("mean_features.shape:", image_mean_features.shape)
            # print("var_features.shape:", image_var_features.shape)
        if "Hus" in features_list:
            # hus_features = calc_feature_multiprocess(cut_imgs, extract_hus_features, step=patch_step_size)
            hus_features = calc_feature(cut_imgs, extract_hus_features, step=patch_step_size)
            all_features = np.concatenate((all_features, hus_features))
            # print("hus_features.shape:", hus_features.shape)
        if "hog" in features_list:
            # hog_features = calc_feature_multiprocess(cut_imgs, extract_hog_features, step=patch_step_size)
            hog_features = calc_feature(cut_imgs, extract_hog_features, step=patch_step_size)
            # print("hog_features.shape:", hog_features.shape)
            all_features = np.concatenate((all_features, hog_features))
        if "orb" in features_list:
            # orb_feature =  calc_feature_multiprocess(cut_imgs, extract_orb_features, step=patch_step_size)
            orb_feature =  calc_feature(cut_imgs, extract_orb_features, step=patch_step_size)
            all_features = np.concatenate((all_features, orb_feature))
        all_conbine_features = np.concatenate((image_mean_features, image_var_features, all_features))
        total_features_embedding.append(all_conbine_features)
    # 输出纹理+结构特征向量
    return np.asarray(total_features_embedding)


def extract_hus_features(image_data):
    # 提取结构特征
    mu = moments_central(image_data)
    nu = moments_normalized(mu)
    hu_moments = moments_hu(nu)
    return hu_moments, nu


class ClassicalFeaturesEncoder(nn.Module):
    """
    A simple Convolutional Encoder Model
    """

    def __init__(self, classical_features_list):
        super().__init__()
        self.classical_features_list = classical_features_list

    def forward(self, x):
        # Calc the image data x with classical features etc.
        # print(x.shape)
        all_features = calc_all_feature(x, config.IMG_WIDTH,
                                        self.classical_features_list,config.PATCH_STEP_SIZE)

        # print(all_features.shape)
        # print(all_features)
        return all_features.astype(np.float32)


class ClassifierDecoder(nn.Module):
    """
    A simple Convolutional Decoder Model
    """

    def __init__(self, classifier_name, device="cuda"):
        super().__init__()
        self.name = classifier_name
        self.device = device
        if self.name == "rbf":
            self.classifier_model = RBFLogits(embedding_shape[1], config.NUM_CLASSES, 35, 16, device)
        elif self.name == "linear":
            self.classifier_model = LinearLogits(embedding_shape[1], config.NUM_CLASSES, device)
        elif self.name == "knn":
            self.classifier_model = WeightedKNNClassifier()
        else:
            self.classifier_model = RBFLogits(embedding_shape[1], config.NUM_CLASSES, 35, 16, device)

    def forward(self, x):
        # input_tensor = torch.from_numpy(x)
        # input_tensor = x.to(self.device)
        # x = x.to(torch.float32)
        logits = self.classifier_model(x)
        return logits


if __name__ == "__main__":
    device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
    if torch.cuda.is_available():
        print("GPU Availaible moving models to GPU")
    else:
        print("Moving models to CPU")
    # device = "cpu"
    # img_random = torch.randint(0, 255, (1, 1, 250, 250))
    # img_random2 = torch.randint(0, 255, (1, 1, 250, 250))
    # img_random = np.random.randint(0, 255, (10, 1, 250, 250))
    # img_random = np.random.randn(1, 1, 250, 250)
    image = cv2.imread("../data/small_data_oil_for_classification/images/liefeng/0000.png", cv2.IMREAD_GRAYSCALE)
    img_input_size = (250, 250)
    # image = cv2.imread("../data/cifar10/train_noclass/010030.jpg", cv2.IMREAD_GRAYSCALE)
    # img_input_size = (64, 64)
    image = cv2.resize(image, img_input_size)
    img_random = np.array(image, dtype=numpy.uint8)
    img_random = np.expand_dims(img_random, axis=0)  # 当image是灰度图像时候
    img_random = np.expand_dims(img_random, axis=0)
    print("img_random.shape:", img_random.shape)
    img_random = torch.from_numpy(img_random)  # .float()
    img_random2 = img_random
    print("img_random.shape:", img_random.shape)

    # feature_list = ["Hus","CLCM","hog"]
    # feature_list = ["Hus", "CLCM"]
    device = torch.device("cpu")
    enc = ClassicalFeaturesEncoder(config.feature_list)
    enc.to(device)
    enc.eval()
    # img_random = img_random.permute((0, 2, 3, 1))
    # img_random2 = img_random2.permute((0, 2, 3, 1))
    start_time = time.time()
    enc_out = enc(img_random)
    end_time = time.time()
    print("time use: {:.5f} s".format(end_time - start_time))
    # print("enc_out:", enc_out)
    enc_out2 = enc(img_random2)
    # print("enc_out2:", enc_out2)
    print("enc_out_length:", len(enc_out))
    print("enc_out2_length:", len(enc_out2))
    print("enc_out.shape:", enc_out.shape)
    print("enc_out2.shape:", enc_out2.shape)

    enc_out = torch.from_numpy(enc_out)
    enc_out2 = torch.from_numpy(enc_out2)
    emb = torch.cat((enc_out, enc_out2), 0)
    print("emb.shape:",emb.shape)

    embedding = torch.randn(config.EMBEDDING_SHAPE)
    print("embedding.shape:", embedding.shape)


    # RBF | Linear
    dec = ClassifierDecoder("Linear", device=device)
    dec.to(device)
    dec.eval()
    enc_out.to(device)

    label = 1
    dec_prob_out = dec(enc_out)
    print("dec_prob_out.shape:", dec_prob_out.shape)
    print("dec_prob_out:", dec_prob_out)

    pred = dec_prob_out.data.max(1, keepdim=True)[
        1
    ]  # get the index of the max probability
    # correct += pred.eq(target.data.view_as(pred)).cpu().sum()

    embedding_shape = config.EMBEDDING_SHAPE
    embedding_dim = embedding_shape[1]
    embedding = torch.randn(embedding_dim)
    print("embedding.shape:", embedding.shape)
    print("embedding.numpy().shape:", embedding.numpy().shape)

