# coding: utf-8
from keras.applications.mobilenet import MobileNet
from keras.layers import GlobalAveragePooling2D, Dense
from scipy import misc
import numpy as np
import time
import os
import cv2
from keras import Model
import argparse
from matplotlib import pyplot as plt
from keras.utils import multi_gpu_model

plt.switch_backend('agg')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

flags = argparse.ArgumentParser()
flags.add_argument('--gpu', default=0, type=float, help='gpu number')
FLAGS = flags.parse_args()

# 使用GPU
gpun = FLAGS.gpu
# os.environ['CUDA_VISIBLE_DEVICES'] = str(gpun)

from keras import utils
from keras.utils import np_utils
from keras.models import Sequential
from keras.optimizers import RMSprop
import keras.callbacks
from keras.callbacks import Callback, EarlyStopping
from sklearn.model_selection import train_test_split
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
from keras.utils import multi_gpu_model
import tensorflow as tf
import numpy as np


alphas=['ONE','TWO','THREE','FOUR','FIVE','SIX','SEVEN','EIGHT']
# source data dir
original_base_dir = '/data/station/FATP_FB'
original_dirs = [os.path.join(original_base_dir,i) + '/' for i in os.listdir(original_base_dir)]
original_dataset_ONE_dir = original_dirs[0]
print('ONE',original_dataset_ONE_dir)
original_dataset_TWO_dir = original_dirs[1]
print('TWO',original_dataset_TWO_dir)
original_dataset_THREE_dir = original_dirs[2]
print('THREE',original_dataset_THREE_dir)
original_dataset_FOUR_dir = original_dirs[3]
print('FOUR',original_dataset_FOUR_dir)
original_dataset_FIVE_dir = original_dirs[4]
print('FIVE',original_dataset_FIVE_dir)
original_dataset_SIX_dir = original_dirs[5]
print('SIX',original_dataset_SIX_dir)
original_dataset_SEVEN_dir = original_dirs[6]
print('SEVEN',original_dataset_SEVEN_dir)
original_dataset_EIGHT_dir = original_dirs[7]
print('EIGHT',original_dataset_EIGHT_dir)
# original_dataset_OTHER_dir = r'/data/station/OTHER/'

for i,idir in enumerate(original_dirs):
    print(f'original {i} images:', len(os.listdir(idir)))
# print('original other images:',len(os.listdir(original_dataset_OTHER_dir)))

data_ONE = [(original_dataset_ONE_dir + i) for i in os.listdir(original_dataset_ONE_dir)]
data_TWO = [(original_dataset_TWO_dir + i) for i in os.listdir(original_dataset_TWO_dir)]
data_THREE = [(original_dataset_THREE_dir + i) for i in os.listdir(original_dataset_THREE_dir)]
data_FOUR = [(original_dataset_FOUR_dir + i) for i in os.listdir(original_dataset_FOUR_dir)]
data_FIVE = [(original_dataset_FIVE_dir + i) for i in os.listdir(original_dataset_FIVE_dir)]
data_SIX = [(original_dataset_SIX_dir + i) for i in os.listdir(original_dataset_SIX_dir)]
data_SEVEN = [(original_dataset_SEVEN_dir + i) for i in os.listdir(original_dataset_SEVEN_dir)]
data_EIGHT = [(original_dataset_EIGHT_dir + i) for i in os.listdir(original_dataset_EIGHT_dir)]
# data_OTHER = [(original_dataset_OTHER_dir + i) for i in os.listdir(original_dataset_OTHER_dir)]

ROWS = 180
COLS = 480
CHANNELS = 1

print(time.strftime('%Y/%m/%d %H:%M:%S', time.localtime(time.time())))


train=[]
for i in alphas:
    train += eval('data_'+i)
# train += data_OTHER

print("train lens: ", len(train))
t = []
for i,alpha in enumerate(alphas):
    t.append(np.repeat(i,len(eval('data_'+alpha))))
# t.append(np.repeat(len(alphas),len(data_OTHER)))
target = np.concatenate(t)
print('target:',target[[2,5,10001]])

train_X, test_X, train_y, test_y = train_test_split(train,
                                                    target,
                                                    test_size=0.1,
                                                    random_state=0)

train_images = []
for i in range(len(train_X)):
    train_images.append((train_X[i], int(train_y[i])))

test_images = []
for i in range(len(test_X)):
    test_images.append((test_X[i], int(test_y[i])))


def read_image(tuple_set):
    file_path = tuple_set[0]
    label = tuple_set[1]
    img = misc.imread(file_path,mode="L")
    return misc.imresize(img, (ROWS, COLS), interp='bilinear'), label


def regular(x_train):
    meannv = np.mean(x_train)
    stdv = np.std(x_train)
    return (x_train - meannv) / stdv


def prep_data(images):
    no_images = len(images)
    data = np.ndarray((no_images, ROWS, COLS,CHANNELS), dtype=np.uint8)
    labels = []
    for i, image_file in enumerate(images):
        image, label = read_image(image_file)
        image = np.expand_dims(image,axis=2)
        # data[i]=regular(image)
        data[i] = image
        labels.append(label)
    return data, labels


# convert class vector to binary class matrices
x_train, y_train = prep_data(train_images)
x_test, y_test = prep_data(test_images)

print('x_train length', len(x_train))
optimizer = RMSprop(lr=1e-3)
objective = 'categorical_crossentropy'
# 跟损失函数选择有关 categorical_crossentropy
y_train = np_utils.to_categorical(y_train, 8)
y_test = np_utils.to_categorical(y_test, 8)

with tf.device("/cpu:0"):
    #CPU可用于处理任何一种工作（比如在GPU内存上移动训练图像），
    # 而GPU本身则负责繁重的工作。在这种情况下，
    # CPU将用于实例化基本模型
    model = MobileNet(weights=None,
                  input_shape=(ROWS, COLS,CHANNELS),
                  include_top=True,dropout=0.7,
                  classes=8, alpha=2)

model = multi_gpu_model(model,gpus=2)


def add_new_last_layer(base_model, nb_classes):
    x = base_model.output
    # FIXME:全局池化 全局平均初始化函数GlobalAveragePooling2D将MxNxC张量转换后输出为1xC张量，其中C是图像的通道数。
    x = GlobalAveragePooling2D()(x)

    x = Dense(1024, activation='relu')(x)  # 全链接可以理解
    predictions = Dense(nb_classes, activation='softmax')(x)

    model = Model(inputs=base_model.input, outputs=predictions)
    return model


# 迁移学习
def setup_to_transfer_learn(model, base_model):
    """Freeze all layers and compile the model"""
    for layer in base_model.layers:
        # REW:因为如果不固定相关层，随机初始化网络权重会导致较大的梯度更新，进一步可能会破坏卷积层中的学习权重
        layer.trainable = False
    model.compile(optimizer="rmsprop",
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])


nb_epoch = 35
batch_size = 16
log_dir = "/hadoop/station_FATP_FB/tensor_log_dir"
tb_cb = keras.callbacks.TensorBoard(log_dir=log_dir,write_images=1, histogram_freq=1)


class LossHistory(Callback):
    # classes = len(np.unique(target))
    def on_train_begin(self, logs={}):
        self.losses = []
        self.val_losses = []
        self.acces = []
        self.val_acces = []

    def on_epoch_end(self, batch, logs={}):
        self.losses.append(logs.get('loss'))
        self.val_losses.append(logs.get('val_loss'))
        self.acces.append(logs.get('acc'))
        self.val_acces.append(logs.get('val_acc'))


early_stopping = EarlyStopping(monitor='val_loss', patience=50, verbose=0, mode='auto')
history = LossHistory()
y_train = np.array(y_train)
y_test = np.array(y_test)

# model = add_new_last_layer(base_model,3)
# setup_to_transfer_learn(model,base_model)
model.compile(optimizer=optimizer,
              loss=objective,
              metrics=['accuracy'])
print(model.summary())

model.fit(x=x_train, y=y_train, batch_size=batch_size, epochs=nb_epoch, validation_split=0.2, verbose=0, shuffle=True,
          callbacks=[history, early_stopping])
predictions = model.predict(x_test, verbose=0)

loss, accuracy = model.evaluate(x=x_test, y=y_test, batch_size=batch_size)
print('test loss: ', loss)
print('test accuracy: ', accuracy)

loss = history.losses
val_loss = history.val_losses
acc = history.acces
val_acc = history.val_acces

print('train loss:', loss)
print('val loss:', val_loss)
print('train acc:', acc)
print('val acc:', val_acc)

print(time.strftime('%Y/%m/%d %H:%M:%S', time.localtime(time.time())))

# save modename
model_save_dir = '/hadoop/station_FATP_FB/model_bak'
model.save(model_save_dir+"/FATP_0126_tmp.h5")

x = [i + 1 for i in range(len(loss))]

plt.figure()
plt.plot(x, loss, label="train_loss", color="red", linewidth=2)
plt.plot(x, val_loss, label="val_loss", color="green", linewidth=2)
plt.legend()
plt.xlabel("step")
plt.ylabel("loss")
plt.title("train")
plt.show()

