import os
import warnings

import cv2
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from pylab import mpl
from tensorflow import keras
from tensorflow.keras import Model
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Conv2DTranspose, UpSampling2D  # 处理平面数据
from tensorflow.keras.layers import Dense, Input
from tensorflow.keras.layers import Flatten  # 处理神经网络数据
from tensorflow.keras.models import load_model

warnings.filterwarnings("ignore")
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'  # 不显示等级2以下的提示信息
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True  # TensorFlow按需分配显存
sess = tf.compat.v1.Session(config=config)
path_train = "./data/train/"
path_test = "./data/test/"
name_ind = {'airplane': 10, "automobile": 11, "bird": 12, "cat": 13, "deer": 14, "dog": 15, "frog": 16, "horse": 17,
            "ship": 18, "truck": 19}


def set_ch():
    mpl.rcParams['font.sans-serif'] = ['FangSong']
    mpl.rcParams['axes.unicode_minus'] = False


set_ch()


def euc_dist_keras(x, y):
    return K.mean(K.square(x - y), axis=-1) / 2


def build_enco_deco_model():
    '''
    建立CAN编码神经网络模型
    :param shape:
    :param encoding_dim:
    :return:
    '''
    latentFC = 50
    input_img = Input(shape=(28, 28, 1))
    x = Conv2D(32, 3, padding='Same', activation="relu", strides=2)(input_img)
    x = MaxPooling2D((2, 2))(x)
    x = Conv2D(64, 3, padding='Same', activation="relu", strides=2)(x)
    x = MaxPooling2D((2, 2))(x)
    sh = x.shape
    print("--sh-------")
    print(sh)
    print("--sh------")
    x = Flatten()(x)
    x = Dense(latentFC)(x)
    y = tf.reshape(x, (-1, 1, 1, 50))
    print("---------")
    print(y)
    print("--------")
    y = Conv2DTranspose(filters=64, kernel_size=(3, 3), activation="relu", strides=2)(y)
    y = UpSampling2D((2, 2))(y)
    y = Conv2DTranspose(filters=32, kernel_size=(3, 3), activation="relu", strides=2)(y)
    y = UpSampling2D((2, 2))(y)
    y = Conv2DTranspose(filters=1, kernel_size=(3, 3), activation="sigmoid")(y)
    enco = Model(input_img, x)
    auto = Model(input_img, y)
    enco.summary()
    auto.summary()
    return auto, enco


def read_image(path):  # 由路径读取iamge
    filelist = os.listdir(path)
    x_orig = []
    y = []
    for i in range(len(filelist)):
        img = cv2.imread(path + filelist[i])
        img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        img2 = cv2.resize(img_gray, (28, 28))
        x_orig.append(img2)
        if (0 <= int(filelist[i][:1]) and int(filelist[i][:1]) <= 9):
            y.append(int(filelist[i][:1]))
        else:
            y.append(name_ind(filelist[i][:3]))

    return np.array(x_orig), np.array(y)


def get_data():
    img_rows = img_cols = 28
    # (x_train_orig, y_train), (x_test_orig, y_test) = tf.keras.datasets.fashion_mnist.load_data()
    # (x_train_orig, y_train), (x_test_orig, y_test) = tf.keras.datasets.mnist.load_data()
    x_train_orig, y_train = read_image(path_train)
    x_test_orig, y_test = read_image(path_test)
    x_train = x_train_orig.astype('float32') / 255.0
    x_test = x_test_orig.astype('float32') / 255.0
    if keras.backend.image_data_format() == "channels_first":
        x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
        x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
    else:
        x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
        x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
    return x_train, y_train, x_test, y_test


def show_decoder_images(x_train):
    decoded_images = load_model("CAEH_auto.h5").predict(x_train)
    decoded_images_all = decoded_images.reshape((decoded_images.shape[0], 28, 28))
    print(decoded_images_all.shape)
    print(np.max(decoded_images_all))
    print(np.max(x_train))
    num = 8
    for i in range(num):
        id = i * 2 + 1
        rand_ind = np.random.randint(low=0, high=x_train.shape[0])
        plt.subplot(num, 2, id)
        plt.imshow(x_train[rand_ind].reshape((28, 28)), cmap="gray")
        plt.subplot(num, 2, id + 1)
        plt.imshow(decoded_images_all[rand_ind], cmap="gray")
        plt.show()


def get_CNEH_hash(x_t, le=48):
    encoder = load_model("CAEH_encoder.h5").predict(x_t)
    x_t = encoder
    print(x_t.shape)
    print(np.max(encoder))
    print(np.min(encoder))
    n, r = x_t.shape
    x_t = x_t.reshape(n, r).astype(np.float32)
    y_t = np.zeros((n, le))
    for i in range(n):
        img = x_t[i][0: le]
        for j in range(img.shape[0]):
            if img[j] > 0.5:
                img[j] = 1
            else:
                img[j] = 0
        y_t[i] = img.reshape(le)
    return y_t


def get_phash(x_t, le=48):
    print(x_t.shape)
    n, r, l, c = x_t.shape
    x_t = x_t.reshape(n, r * l).astype(np.float32)
    y_t = np.zeros((n, le))
    for i in range(n):
        img = x_t[i][0: le]
        img = cv2.dct(img)  # step2:离散余弦变换
        avg = np.mean(img)  # step4:获得哈希
        for j in range(img.shape[0]):
            if img[j] > avg:
                img[j] = 1
            else:
                img[j] = 0
        y_t[i] = img.reshape(le)
    return y_t


def fit_CAEH(epo=20):
    auto, encoder = build_enco_deco_model()
    x_train, y_train, x_test, y_test = get_data()
    print(x_train.shape)
    print(x_test.shape)
    auto.compile(loss="mse", optimizer="adam")  # euc_dist_keras,mse
    auto.fit(x_train, x_train, epochs=epo, batch_size=256, shuffle=True, validation_data=(x_test, x_test))
    auto.save("CAEH_auto.h5")
    encoder.save("CAEH_encoder.h5")


if __name__ == '__main__':
    x_train, y_train, x_test, y_test = get_data()
    fit_CAEH(epo=20)  # 训练CAEH模型
    show_decoder_images(x_train)  # 展示CAEH的解码
