# -*- coding: utf-8 -*-
"""
Created by edc on 2020/7/27
"""

import tensorflow as tf
from tensorflow import keras
from imutils import paths
import numpy as np
import cv2
import os
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.utils import to_categorical  # 相当于one-hot
import random
from sklearn.model_selection import train_test_split
from hdfs import InsecureClient
from minio import Minio


def testTrain(json=""):
    print(tf.__version__)
    minioClient = Minio('172.16.2.116:29000',
                        access_key='AKIAIOSFODNN7EXAMPLE',
                        secret_key='wJalrXUtnFEMIGK7MDENGFFbPxRfiCYEXAMPLEKEY',
                        secure=False
                        )

    client = InsecureClient("http://172.16.1.127:50070",
                            root="/",
                            # proxy="root",
                            user="root",
                            )
    strategy = tf.distribute.MirroredStrategy()
    print("Number of devices: {}".format(strategy.num_replicas_in_sync))

    with strategy.scope():
        model = get_model('ResNet50', include_top=False, weights='imagenet', input_tensor=None, input_shape=(32, 32, 3),
                          pooling='max', classes=10, )
        model.compile(
            optimizer=tf.keras.optimizers.Adam(
                learning_rate=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False,
                name='Adam'
            ),
            loss='binary_crossentropy',
            metrics=['accuracy'],
            loss_weights=None,
            sample_weight_mode=None,
            weighted_metrics=None,
        )
        model.summary()

    # 导入数据
    # data_path = '/workspace/dog_vs_cat_raw'
    data_path = '/root/dog_vs_cat_raw'
    datas, labels = load_dataset(data_path)

    # 数据训练集和测试集分割
    (trainX, testX, trainY, testY) = train_test_split(datas, labels, test_size=0.25, random_state=42)

    # model.fit(x=trainX, y=trainY, epochs=10, batch_size=1000, verbose=2, validation_data=(testX, testY))

    model.save(filepath="/workspace/modesave.tf",
               overwrite=True,
               include_optimizer=True,
               save_format='tf',
               signatures=None,
               options=None)


    # client.delete(hdfs_path="/DL11")
    # client.makedirs(hdfs_path="/DL112", permission="777")
    # client.upload(hdfs_path="/", local_path="./modesave.h5")
    # client.makedirs(hdfs_path="/DL")
    # 上传modesave.tf到hdfs的DL下
    client.upload(hdfs_path="/DL", local_path="/workspace/modesave.tf", overwrite=True)
    # 上传modesave.tf到minio的dlModel下
    minioClient.fput_object(bucket_name='dlmodel', object_name='modesave.tf', file_path='/workspace/modesave.tf')
    # minioClient.fget_object(bucket_name='deeplearn', object_name='get-docker.sh', file_path='/root/get-do')

    # 生成模型json 保存到minio
    model_json = model.to_json()

    with open('/workspace/modesave.json') as f:
        f.write(model_json)
    minioClient.make_bucket("dlmodel", location='cn-north-1')
    minioClient.fput_object(bucket_name='dlmodel', object_name='modesave.json',
                            file_path='/workspace/modesave.json')


#####################        主方法           #########################
def train(json=""):
    print(tf.__version__)
    minioClient = Minio('172.16.2.116:29000',
                        access_key='AKIAIOSFODNN7EXAMPLE',
                        secret_key='wJalrXUtnFEMIGK7MDENGFFbPxRfiCYEXAMPLEKEY',
                        secure=False
                        )

    client = InsecureClient("http://172.16.1.127:50070",
                            root="/",
                            # proxy="root",
                            user="root",
                            )
    strategy = tf.distribute.MirroredStrategy()
    print("Number of devices: {}".format(strategy.num_replicas_in_sync))

    with strategy.scope():
        model = get_model('ResNet50', include_top=False, weights='imagenet', input_tensor=None, input_shape=(32, 32, 3),
                          pooling='max', classes=10, )
        model.compile(
            optimizer=tf.keras.optimizers.Adam(
                learning_rate=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False,
                name='Adam'
            ),
            loss='binary_crossentropy',
            metrics=['accuracy'],
            loss_weights=None,
            sample_weight_mode=None,
            weighted_metrics=None,
        )
        model.summary()

    # 导入数据
    # data_path = '/workspace/dog_vs_cat_raw'
    data_path = '/root/dog_vs_cat_raw'
    datas, labels = load_dataset(data_path)

    # 数据训练集和测试集分割
    (trainX, testX, trainY, testY) = train_test_split(datas, labels, test_size=0.25, random_state=42)

    # model.fit(x=trainX, y=trainY, epochs=10, batch_size=1000, verbose=2, validation_data=(testX, testY))

    model.save(filepath="/workspace/modesave.tf",
               overwrite=True,
               include_optimizer=True,
               save_format='tf',
               signatures=None,
               options=None)

    model_json = model.to_json()
    minioClient.make_bucket("dlModelJson", location='cn-north-1')
    minioClient.fput_object(bucket_name='dlModelJson', object_name='modesave.h5', file_path='/workspace/modesave.tf')

    # client.delete(hdfs_path="/DL11")
    # client.makedirs(hdfs_path="/DL112", permission="777")
    # client.upload(hdfs_path="/", local_path="./modesave.h5")
    # client.makedirs(hdfs_path="/DL")
    client.upload(hdfs_path="/DL", local_path="/workspace/modesave.tf", overwrite=True)

    minioClient.fput_object(bucket_name='deeplearn', object_name='modesave.tf', file_path='/workspace/modesave.tf')
    # minioClient.fget_object(bucket_name='deeplearn', object_name='get-docker.sh', file_path='/root/get-do')


##########################################################################


def load_dataset(data_path, weight=32, hight=32, verbose=1000):
    """

    @param weight: 图片的宽
    @param hight: 图片的高
    @param data_path: 数据集文件夹
    @param verbose: 多少张后打印日志
    @return: 完整的数据和标签

    """

    # image_types = (".jpg", ".jpeg", ".png", ".bmp", ".tif", ".tiff")
    image_paths = sorted(list(paths.list_images(data_path)))
    data = []
    labels = []

    random.seed(0)  # 保证每次数据顺序一致
    random.shuffle(image_paths)  # 将所有的文件路径打乱

    for (i, imagePath) in enumerate(image_paths):

        image = cv2.imread(imagePath)
        image = cv2.resize(image, (weight, hight))
        image = img_to_array(image)
        #
        label = int(imagePath.split(os.path.sep)[-2])

        data.append(image)
        labels.append(label)

        if verbose > 0 and i > 0 and (i + 1) % verbose == 0:
            print("[INFO] processed {}/{}".format(i + 1, len(image_paths)))

    data = np.array(data, dtype='float32') / 255.0

    labels = np.array(labels)

    return data, labels


def load_dataset_2(data_path, num_classes=10, verbose=10):
    """
    @param num_classes: 数据分类数目
    @param data_path: 数据集文件夹
    @param verbose: 多少张后打印日志
    @return: 完整的数据和标签

    """

    # image_types = (".jpg", ".jpeg", ".png", ".bmp", ".tif", ".tiff")
    image_paths = sorted(list(paths.list_images(data_path)))
    data = []
    labels = []

    random.seed(0)  # 保证每次数据顺序一致
    random.shuffle(image_paths)  # 将所有的文件路径打乱

    for (i, imagePath) in enumerate(image_paths):

        image = cv2.imread(imagePath)
        image = img_to_array(image)
        label = imagePath.split(os.path.sep)[-2]

        data.append(image)
        labels.append(label)

        if verbose > 0 and i > 0 and (i + 1) % verbose == 0:
            print("[INFO] processed {}/{}".format(i + 1, len(image_paths)))

    data = np.array(data, dtype='float') / 255.0
    # label_index = range(0, len(set(labels)))
    # dictt = dict()
    # for (i, label) in enumerate(set(labels)):
    #     dictt += (i, label)
    #
    # labels = np.array(label_index)

    labels = np.array(labels)
    labels = to_categorical(labels, num_classes=len(set(labels)))

    return data, labels


def get_model(model_name, input_shape=None, include_top=True, weights='imagenet', input_tensor=None,
              pooling=None, classes=1000, classifier_activation='softmax',
              alpha=1.0, depth_multiplier=1, dropout=0.001,
              **kwargs):
    """

    @param dropout:
    @param depth_multiplier:
    @param alpha:
    @param pooling:
    @param classes:
    @param classifier_activation:
    @param input_shape:
    @param input_tensor:
    @param weights:
    @param include_top:
    @param model_name: 获取模型的名称
    @param params: 模型的参数
    @return: 未build的模型
    include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape,
            pooling=pooling, classes=classes, classifier_activation=classifier_activation

             input_shape=None, include_top=True, weights='imagenet', input_tensor=None,
            pooling=None, classes=1000
    """
    if model_name == 'DenseNet121':
        model = tf.keras.applications.DenseNet121(
            input_shape=input_shape, include_top=include_top, weights=weights, input_tensor=input_tensor,
            pooling=pooling, classes=classes
        )
    elif model_name == 'DenseNet169':
        model = tf.keras.applications.DenseNet169(
            input_shape=input_shape, include_top=include_top, weights=weights, input_tensor=input_tensor,
            pooling=pooling, classes=classes
        )
    elif model_name == 'DenseNet201':
        model = tf.keras.applications.DenseNet201(
            input_shape=input_shape, include_top=include_top, weights=weights, input_tensor=input_tensor,
            pooling=pooling, classes=classes
        )
    elif model_name == 'NASNetLarge':
        model = tf.keras.applications.NASNetLarge(
            input_shape=input_shape, include_top=include_top, weights=weights, input_tensor=input_tensor,
            pooling=pooling, classes=classes
        )

    elif model_name == 'NASNetMobile':
        model = tf.keras.applications.NASNetMobile(
            input_shape=input_shape, include_top=include_top, weights=weights, input_tensor=input_tensor,
            pooling=pooling, classes=classes
        )
    elif model_name == 'InceptionResNetV2':
        model = tf.keras.applications.InceptionResNetV2(
            input_shape=input_shape, include_top=include_top, weights=weights, input_tensor=input_tensor,
            pooling=pooling, classes=classes, classifier_activation=classifier_activation,
            **kwargs
        )
    elif model_name == 'InceptionV3':
        model = tf.keras.applications.InceptionV3(
            input_shape=input_shape, include_top=include_top, weights=weights, input_tensor=input_tensor,
            pooling=pooling, classes=classes, classifier_activation=classifier_activation

        )
    elif model_name == 'MobileNet':
        model = tf.keras.applications.MobileNet(
            input_shape=input_shape, include_top=include_top, weights=weights, input_tensor=input_tensor,
            pooling=pooling, classes=classes, classifier_activation=classifier_activation,
            alpha=alpha, depth_multiplier=depth_multiplier, dropout=dropout, **kwargs
        )

    elif model_name == 'MobileNetV2':
        model = tf.keras.applications.MobileNetV2(
            input_shape=input_shape, include_top=include_top, weights=weights, input_tensor=input_tensor,
            pooling=pooling, classes=classes, classifier_activation=classifier_activation,
            alpha=alpha,
            **kwargs
        )
    elif model_name == 'ResNet101':
        model = tf.keras.applications.ResNet101(
            input_shape=input_shape, include_top=include_top, weights=weights, input_tensor=input_tensor,
            pooling=pooling, classes=classes,
            **kwargs
        )

    elif model_name == 'ResNet50':
        model = tf.keras.applications.ResNet50(
            input_shape=input_shape, include_top=include_top, weights=weights, input_tensor=input_tensor,
            pooling=pooling, classes=classes,
            **kwargs
        )
    elif model_name == 'ResNet152':
        model = tf.keras.applications.ResNet152(
            input_shape=input_shape, include_top=include_top, weights=weights, input_tensor=input_tensor,
            pooling=pooling, classes=classes,
            **kwargs
        )
    elif model_name == 'ResNet152V2':
        model = tf.keras.applications.ResNet152V2(
            input_shape=input_shape, include_top=include_top, weights=weights, input_tensor=input_tensor,
            pooling=pooling, classes=classes, classifier_activation=classifier_activation
        )
    elif model_name == 'ResNet101V2':
        model = tf.keras.applications.ResNet101V2(
            input_shape=input_shape, include_top=include_top, weights=weights, input_tensor=input_tensor,
            pooling=pooling, classes=classes, classifier_activation=classifier_activation
        )
    elif model_name == 'ResNet50V2':
        model = tf.keras.applications.ResNet50V2(
            input_shape=input_shape, include_top=include_top, weights=weights, input_tensor=input_tensor,
            pooling=pooling, classes=classes, classifier_activation=classifier_activation
        )

    elif model_name == 'VGG16':
        model = tf.keras.applications.VGG16(
            input_shape=input_shape, include_top=include_top, weights=weights, input_tensor=input_tensor,
            pooling=pooling, classes=classes, classifier_activation=classifier_activation
        )

    elif model_name == 'VGG19':
        model = tf.keras.applications.VGG19(
            input_shape=input_shape, include_top=include_top, weights=weights, input_tensor=input_tensor,
            pooling=pooling, classes=classes, classifier_activation=classifier_activation
        )

    elif model_name == 'Xception':
        model = tf.keras.applications.Xception(
            input_shape=input_shape, include_top=include_top, weights=weights, input_tensor=input_tensor,
            pooling=pooling, classes=classes, classifier_activation=classifier_activation
        )

    elif model_name == 'LeNet':
        model = LeNet.build(width=64, height=64, depth=3, classes=10)

    return model


class LeNet:
    def build(width, height, depth, classes):
        from tensorflow.keras.models import Sequential
        from tensorflow.keras.layers import Conv2D
        from tensorflow.keras.layers import MaxPooling2D
        from tensorflow.keras.layers import Activation
        from tensorflow.keras.layers import Flatten
        from tensorflow.keras.layers import Dense
        from tensorflow.keras import backend as K
        """

        @param width: 图片宽
        @param height: 图片高
        @param depth: 图片深
        @param classes: 分类数目
        @return: 未build的LeNet模型
        """
        model = Sequential()
        # 确保深度是在后面的
        if K.image_data_format() == "channels_first":
            inputShape = (depth, height, width)

        inputShape = (height, width, depth)

        model.add(Conv2D(20, (5, 5), padding="same", input_shape=inputShape))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

        model.add(Conv2D(50, (5, 5), padding="same"))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

        model.add(Flatten())
        model.add(Dense(500))
        model.add(Activation("relu"))

        model.add(Dense(classes))
        model.add(Activation("softmax"))

        return model


if __name__ == '__main__':
    # train()
    print()
