__all__ = ["ClassicalFeaturesEncoder", "ClassifierDecoder"]

import numpy
import numpy as np
import torch
import torch.nn as nn

import config_classical_features_oil as config
from skimage.feature import hog
from skimage.feature import local_binary_pattern
from skimage.feature import graycomatrix, graycoprops
from skimage.feature import ORB
from skimage.transform import resize
from skimage.measure import moments_central, moments_normalized, moments_hu
import cv2
import time
import multiprocessing
import threading
from threading import *
from joblib import Parallel, delayed

embedding_shape = config.EMBEDDING_SHAPE
class RBFLogits(nn.Module):
    def __init__(self, feature_dim, class_num, scale, gamma, device):
        super(RBFLogits, self).__init__()
        self.feature_dim = feature_dim
        self.class_num = class_num
        if device == "cpu":
            self.weight = nn.Parameter( torch.FloatTensor(class_num, feature_dim))
            self.bias = nn.Parameter(torch.FloatTensor(class_num))
        elif device == "cuda":
            self.weight = torch.Tensor(class_num, feature_dim).type(torch.cuda.FloatTensor)
            self.bias = torch.Tensor(class_num).type(torch.cuda.FloatTensor)
        else:
            self.weight = nn.Parameter(torch.FloatTensor(class_num, feature_dim))
            self.bias = nn.Parameter(torch.FloatTensor(class_num))
        self.scale = scale
        self.gamma = gamma
        self.device = device
        nn.init.xavier_uniform_(self.weight)
        # nn.init.kaiming_normal_(self.weight, a=0, mode='fan_in', nonlinearity='leaky_relu')

    def forward(self, feat):
        self.feat = feat.to(self.device)
        diff = torch.unsqueeze(self.weight, dim=0) - torch.unsqueeze(self.feat , dim=1)
        diff = torch.mul(diff, diff)
        metric = torch.sum(diff, dim=-1)
        kernal_metric = torch.exp(-1.0 * metric / self.gamma)
        if self.training:
            train_logits = self.scale * kernal_metric
            # ###
            # Add some codes to modify logits, e.g. margin, temperature and etc.
            # ###
            return train_logits
        else:
            test_logits = self.scale * kernal_metric
            return test_logits


class LinearLogits(nn.Module):
    def __init__(self, feature_dim, class_num, device):
        super(LinearLogits, self).__init__()
        self.feature_dim = feature_dim
        self.class_num = class_num
        if device == "cpu":
            self.weight = nn.Parameter( torch.FloatTensor(class_num, feature_dim))
            self.bias = nn.Parameter(torch.FloatTensor(class_num))
        elif device == "cuda":
            self.weight = nn.Parameter(torch.Tensor(class_num, feature_dim).type(torch.cuda.FloatTensor))
            self.bias = nn.Parameter(torch.Tensor(class_num).type(torch.cuda.FloatTensor))
        nn.init.xavier_uniform_(self.weight)

    def forward(self, feat):
        logits = nn.functional.linear(feat, self.weight) + self.bias
        if self.training:
            train_logits = logits
            # ###
            # Add some codes to modify logits, e.g. margin, temperature and etc.
            # ###
            return train_logits
        else:
            test_logits = logits
            return test_logits


def extract_lbp_features(image,return_feature_image=True):
    # 加载图像
    # gray = image
    # lbp特征提取
    radius = 3
    n_points = 8 * radius
    lbp_image = local_binary_pattern(image, n_points, radius, method='uniform')
    # print(lbp_image.shape)
    lbp_features = np.asarray(lbp_image).flatten()

    if return_feature_image:
        return lbp_features, lbp_image
    else:
        return lbp_features



def extract_hog_features(image,return_feature_image=True):
    # 加载图像
    gray = image
    # HOG特征提取
    hog_features, hog_image = hog(gray, orientations=9, pixels_per_cell=(config.HOG_pixels_per_cell, config.HOG_pixels_per_cell),
                                cells_per_block=(2, 2), visualize=True, block_norm='L2-Hys')
    # print(hog_features.shape)
    # fig.show()
    if return_feature_image:
        return hog_features, hog_image
    else:
        return hog_features


def extract_orb_features(image_data,  nfeatures=2):
    """
    Given a image, it computes features using ORB detector and finds similar images to it
    Args:
    image_data: data.
    nfeatures: (optional) Number of features ORB needs to compute
    """

    detector_extractor = ORB(n_keypoints=nfeatures,n_scales=4)

    # Detect features
    detector_extractor.detect_and_extract(image_data)
    # compute the descriptors with ORB
    # keypoint_features = detector_extractor.keypoints
    des = detector_extractor.descriptors
    # des contains the description to features

    des = des / 255.0
    des = np.expand_dims(des, axis=0)
    des = np.reshape(des, (des.shape[0], -1))
    return des, None


def cut_img_step(img,step):  # 每个块大小为50x50
    # 原文链接：https://blog.csdn.net/weixin_43005832/article/details/123579091
    '''传入Gray 图片与 step'''
    new_img = [] #250x250
    for i in np.arange(0, img.shape[0] + 1, step):
        for j in np.arange(0, img.shape[1] + 1, step):
            if i + step <= img.shape[0] and j + step <= img.shape[1]:
                new_img.append(img[i:i + step, j:j + step])
    # new_img = np.array(new_img, dtype=np.int32)
    return new_img


def extract_glcm_sixFeature(IMG_gray):  # 传入灰度图像
    # input = IMG_gray  # 读取图像，灰度模式
    # 得到共生矩阵，参数：图像矩阵，距离，方向，灰度级别，是否对称，是否标准化
    # torch.IntTensor(input).detach().cpu().numpy(),
    glcm = graycomatrix(IMG_gray,
                        [1,2],
                        [0, np.pi / 4, np.pi / 2, np.pi * 3 / 4],
                        levels=256, symmetric=True,
                        normed=True)  # 100x100 -> 3x4

    # 得到共生矩阵统计值，官方文档
    # http://tonysyu.github.io/scikit-image/api/skimage.feature.html#skimage.feature.greycoprops
    feature = []
    feature_descrip = {'contrast', 'dissimilarity',
                 'homogeneity', 'energy', 'correlation', 'ASM'}

    for prop in feature_descrip:
        temp = graycoprops(glcm, prop).mean()
        feature.append(temp)

    return np.array(feature), glcm


def calc_feature(cut_images, features_fun, step=50):
    # '''格式要求 （x灰度图像二维，250，250）'''
    # print(f"Imgs shape is {image.shape}")  #
    # Imgs_number = image.shape[0]
    # Gray_feature_num = int((image.shape[1] /step)**2)
    Avg_gray = []

    # gray_image = image.detach().cpu().numpy()
    # gray_image = image

    for patch in cut_images:  # 每张图25个patch
        # Avg_gray.append(np.mean(patch))
        feature, _ = features_fun(patch)
        Avg_gray.append(feature)

    Avg_gray = np.array(Avg_gray)
    Avg_gray_feature = Avg_gray.flatten() # glcm no mean, 1200
    # print(f'Avg_gray shape ：{Avg_gray_feature.shape}')

    return Avg_gray_feature


def calc_feature_multiprocess(cut_images, features_fun, step=50):

    # '''格式要求 （x灰度图像二维，250，250）'''
    # print(f"Imgs shape is {image.shape}")  #
    # Imgs_number = image.shape[0]
    # Gray_feature_num = int((image.shape[1] /step)**2)
    Avg_gray = []

    # gray_image = image.detach().cpu().numpy()
    # gray_image = image
    pool = multiprocessing.Pool(config.IMG_WIDTH//config.PATCH_STEP_SIZE)
    results = pool.map(features_fun, cut_images)
    pool.close()
    pool.join()

    # for patch in cut_images:  # 每张图25个patch
    #     # Avg_gray.append(np.mean(patch))
    #     feature, _ = features_fun(patch)
    #
    #     Avg_gray.append(feature)
    # results = Parallel(n_jobs=-1)(delayed(features_fun)(patch) for patch in cut_images)
    Avg_gray = results
    Avg_gray = np.array(Avg_gray)
    Avg_gray_feature = Avg_gray.flatten() # glcm no mean, 1200
    # print(f'Avg_gray shape ：{Avg_gray_feature.shape}')

    return Avg_gray_feature


def calc_gray_image_mean_varaince(gray_image:numpy.ndarray):
    average = gray_image.mean()
    varaince = gray_image.var()
    return average, varaince


def calc_mean_varaince_feature(cut_images):
    # '''格式要求 （x灰度图像二维，250，250）'''
    Avg_gray = []
    Var_gray = []
    # gray_image = image.detach().cpu().numpy()
    # gray_image = image

    for patch in cut_images:  # 每张图25个patch
        # Avg_gray.append(np.mean(patch))
        avg_feature, variance = calc_gray_image_mean_varaince(patch)
        Avg_gray.append(avg_feature)
        Var_gray.append(variance)

    Avg_gray = np.array(Avg_gray)
    Var_gray = np.array(Var_gray)
    Avg_gray_feature = Avg_gray.flatten() # glcm  mean, 1200
    Var_gray_feature = Var_gray.flatten() # glcm  var, 1200
    # print(f'Avg_gray shape ：{Avg_gray_feature.shape}')

    return Avg_gray_feature, Var_gray_feature


def calc_gray_image_mean(gray_image):
    height, width = gray_image.shape
    size = gray_image.size
    average = 0
    for i in range(height):
        for j in range(width):
            average += gray_image[i][j] / size
    return average, None





def calc_gray_image_varance(gray_image):
    # https://blog.csdn.net/qq_43309286/article/details/101947471
    height, width = gray_image.shape
    size = gray_image.size
    p = [0] * 256

    for i in range(height):
        for j in range(width):
            p[gray_image[i][j]] += 1

    m = 0
    for i in range(256):
        p[i] /= 256
        m += i * p[i]

    s = 0
    for i in range(256):
        s += (i - m) * (i - m) * p[i]
    return s, m


def calc_all_feature(images, image_shape, features_list, patch_step_size=50):
    '''
    提取特征计算相似度
    image_data 多张图
    '''

    # resized_img = resize(image_data, image_shape, anti_aliasing=True)
    total_features_embedding = []
    for i in range(len(images)):
        # resized_img = image_data
        image_data = images[i].squeeze(0)
        image_input = image_data.detach().cpu().numpy()
        #  切割图片只进行一次

        if "lbp" in features_list:
            lbp_features, lbp_image = extract_lbp_features(image_input)
            # print("lbp_features.shape:", lbp_features.shape)
        # 前面判断是否lbp编码
            lbp_encoding_img = np.asarray(lbp_image, dtype=numpy.uint8)
            cut_imgs = cut_img_step(lbp_encoding_img, step=patch_step_size)
            # image_mean_features = calc_feature(cut_imgs, calc_gray_image_mean)
            # image_var_features = calc_feature(cut_imgs, calc_gray_image_varance)
            image_mean_features, image_var_features = calc_mean_varaince_feature(cut_imgs)
            # all_features = calc_feature_multiprocess(cut_imgs, extract_glcm_sixFeature)
            all_features = calc_feature(cut_imgs, extract_glcm_sixFeature)
        else:
            cut_imgs = cut_img_step(image_input, step=patch_step_size)
            # image_mean_features = calc_feature(cut_imgs, calc_gray_image_mean)
            # image_var_features = calc_feature(cut_imgs, calc_gray_image_varance)
            image_mean_features, image_var_features = calc_mean_varaince_feature(cut_imgs)
            # all_features = calc_feature_multiprocess(cut_imgs, extract_glcm_sixFeature)
            all_features = calc_feature(cut_imgs, extract_glcm_sixFeature)
            # print("glcm_features.shape:", all_features.shape)
            # print("mean_features.shape:", image_mean_features.shape)
            # print("var_features.shape:", image_var_features.shape)
        if "Hus" in features_list:
            # hus_features = calc_feature_multiprocess(cut_imgs, extract_hus_features, step=patch_step_size)
            hus_features = calc_feature(cut_imgs, extract_hus_features, step=patch_step_size)
            all_features = np.concatenate((all_features, hus_features))
            # print("hus_features.shape:", hus_features.shape)
        if "hog" in features_list:
            # hog_features = calc_feature_multiprocess(cut_imgs, extract_hog_features, step=patch_step_size)
            hog_features = calc_feature(cut_imgs, extract_hog_features, step=patch_step_size)
            # print("hog_features.shape:", hog_features.shape)
            all_features = np.concatenate((all_features, hog_features))
        if "orb" in features_list:
            # orb_feature =  calc_feature_multiprocess(cut_imgs, extract_orb_features, step=patch_step_size)
            orb_feature =  calc_feature(cut_imgs, extract_orb_features, step=patch_step_size)
            all_features = np.concatenate((all_features, orb_feature))
        all_conbine_features = np.concatenate((image_mean_features, image_var_features, all_features))
        total_features_embedding.append(all_conbine_features)
    # 输出纹理+结构特征向量
    return np.asarray(total_features_embedding)


def extract_hus_features(image_data):
    # 提取结构特征
    mu = moments_central(image_data)
    nu = moments_normalized(mu)
    hu_moments = moments_hu(nu)
    return hu_moments, nu


class ClassicalFeaturesEncoder(nn.Module):
    """
    A simple Convolutional Encoder Model
    """

    def __init__(self, classical_features_list):
        super().__init__()
        self.classical_features_list = classical_features_list

    def forward(self, x):
        # Calc the image data x with classical features etc.
        # print(x.shape)
        all_features = calc_all_feature(x, config.IMG_WIDTH,
                                        self.classical_features_list,config.PATCH_STEP_SIZE)

        # print(all_features.shape)
        # print(all_features)
        return all_features.astype(np.float32)


class ClassifierDecoder(nn.Module):
    """
    A simple Convolutional Decoder Model
    """

    def __init__(self, classifier_name, device="cuda"):
        super().__init__()
        self.name = classifier_name
        self.device = device
        if self.name == "rbf":
            self.classifier_model = RBFLogits(embedding_shape[1], config.NUM_CLASSES, 35, 16, device)
        elif self.name == "linear":
            self.classifier_model = LinearLogits(embedding_shape[1], config.NUM_CLASSES, device)
        else:
            self.classifier_model = RBFLogits(embedding_shape[1], config.NUM_CLASSES, 35, 16, device)

    def forward(self, x):
        # input_tensor = torch.from_numpy(x)
        # input_tensor = x.to(self.device)
        # x = x.to(torch.float32)
        logits = self.classifier_model(x)
        return logits


if __name__ == "__main__":
    device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
    if torch.cuda.is_available():
        print("GPU Availaible moving models to GPU")
    else:
        print("Moving models to CPU")
    # device = "cpu"
    # img_random = torch.randint(0, 255, (1, 1, 250, 250))
    # img_random2 = torch.randint(0, 255, (1, 1, 250, 250))
    # img_random = np.random.randint(0, 255, (10, 1, 250, 250))
    # img_random = np.random.randn(1, 1, 250, 250)
    image = cv2.imread("../data/data_oil_for_classification/images/liefeng/0000.png", cv2.IMREAD_GRAYSCALE)
    img_input_size = (250, 250)
    # image = cv2.imread("../data/cifar10/train_noclass/010030.jpg", cv2.IMREAD_GRAYSCALE)
    # img_input_size = (64, 64)
    image = cv2.resize(image, img_input_size)
    img_random = np.array(image, dtype=numpy.uint8)
    img_random = np.expand_dims(img_random, axis=0)  # 当image是灰度图像时候
    img_random = np.expand_dims(img_random, axis=0)
    print("img_random.shape:", img_random.shape)
    img_random = torch.from_numpy(img_random)  # .float()
    img_random2 = img_random
    print("img_random.shape:", img_random.shape)

    # feature_list = ["Hus","CLCM","hog"]
    # feature_list = ["Hus", "CLCM"]
    enc = ClassicalFeaturesEncoder(config.feature_list)
    enc.to(device)
    enc.eval()
    # img_random = img_random.permute((0, 2, 3, 1))
    # img_random2 = img_random2.permute((0, 2, 3, 1))
    start_time = time.time()
    enc_out = enc(img_random)
    end_time = time.time()
    print("time use: {:.5f} s".format(end_time - start_time))
    # print("enc_out:", enc_out)
    enc_out2 = enc(img_random2)
    # print("enc_out2:", enc_out2)
    print("enc_out_length:", len(enc_out))
    print("enc_out2_length:", len(enc_out2))
    print("enc_out.shape:", enc_out.shape)
    print("enc_out2.shape:", enc_out2.shape)

    enc_out = torch.from_numpy(enc_out)
    enc_out2 = torch.from_numpy(enc_out2)
    emb = torch.cat((enc_out, enc_out2), 0)
    print("emb.shape:",emb.shape)

    embedding = torch.randn(config.EMBEDDING_SHAPE)
    print("embedding.shape:", embedding.shape)


    # RBF | Linear
    dec = ClassifierDecoder("Linear", device=device)
    dec.to(device)
    dec.eval()
    enc_out.to(device)

    label = 1
    dec_prob_out = dec(enc_out)
    print("dec_prob_out.shape:", dec_prob_out.shape)
    print("dec_prob_out:", dec_prob_out)

    pred = dec_prob_out.data.max(1, keepdim=True)[
        1
    ]  # get the index of the max probability
    # correct += pred.eq(target.data.view_as(pred)).cpu().sum()

    embedding_shape = config.EMBEDDING_SHAPE
    embedding_dim = embedding_shape[1]
    embedding = torch.randn(embedding_dim)
    print("embedding.shape:", embedding.shape)
    print("embedding.numpy().shape:", embedding.numpy().shape)

