# coding: utf-8
from scipy import misc
import numpy as np
import datetime
import time
import os
import argparse
import math
from matplotlib import pyplot as plt
from numpy import *
from scipy.ndimage import filters
import cv2

plt.switch_backend('agg')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

flags = argparse.ArgumentParser()
flags.add_argument('--gpu', default=0, type=float, help='gpu number')
FLAGS = flags.parse_args()

# 使用GPU
gpun = FLAGS.gpu
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpun)

from keras import utils, Model, Input
from keras.utils import np_utils
from keras.models import load_model
from keras.layers import Flatten, Convolution2D, MaxPooling2D, Dense, AveragePooling2D, \
    ZeroPadding2D, add, BatchNormalization
from keras.optimizers import RMSprop
from keras.callbacks import Callback, EarlyStopping
from sklearn.model_selection import train_test_split
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
from keras import regularizers

alphas = ['ONE', 'TWO', 'THREE', 'FOUR', 'FIVE', 'SIX', 'SEVEN', 'EIGHT']
# source data dir
original_base_dir = '/data/station/FATP_FB'
original_dirs = [os.path.join(original_base_dir, i) + '/' for i in os.listdir(original_base_dir)]
original_dataset_ONE_dir = original_dirs[0]
print('ONE', original_dataset_ONE_dir)
original_dataset_TWO_dir = original_dirs[1]
print('TWO', original_dataset_TWO_dir)
original_dataset_THREE_dir = original_dirs[2]
print('THREE', original_dataset_THREE_dir)
original_dataset_FOUR_dir = original_dirs[3]
print('FOUR', original_dataset_FOUR_dir)
original_dataset_FIVE_dir = original_dirs[4]
print('FIVE', original_dataset_FIVE_dir)
original_dataset_SIX_dir = original_dirs[5]
print('SIX', original_dataset_SIX_dir)
original_dataset_SEVEN_dir = original_dirs[6]
print('SEVEN', original_dataset_SEVEN_dir)
original_dataset_EIGHT_dir = original_dirs[7]
print('EIGHT', original_dataset_EIGHT_dir)
# original_dataset_OTHER_dir = r'/data/station/OTHER/'

for i, idir in enumerate(original_dirs):
    print(f'original {i} images:', len(os.listdir(idir)))
# print('original other images:',len(os.listdir(original_dataset_OTHER_dir)))

data_ONE = [(original_dataset_ONE_dir + i) for i in os.listdir(original_dataset_ONE_dir)]
data_TWO = [(original_dataset_TWO_dir + i) for i in os.listdir(original_dataset_TWO_dir)]
data_THREE = [(original_dataset_THREE_dir + i) for i in os.listdir(original_dataset_THREE_dir)]
data_FOUR = [(original_dataset_FOUR_dir + i) for i in os.listdir(original_dataset_FOUR_dir)]
data_FIVE = [(original_dataset_FIVE_dir + i) for i in os.listdir(original_dataset_FIVE_dir)]
data_SIX = [(original_dataset_SIX_dir + i) for i in os.listdir(original_dataset_SIX_dir)]
data_SEVEN = [(original_dataset_SEVEN_dir + i) for i in os.listdir(original_dataset_SEVEN_dir)]
data_EIGHT = [(original_dataset_EIGHT_dir + i) for i in os.listdir(original_dataset_EIGHT_dir)]
# data_OTHER = [(original_dataset_OTHER_dir + i) for i in os.listdir(original_dataset_OTHER_dir)]

# 增强數據目錄
base_dir = '/data1/mydata/'
Augument_dataset_ONE_dir = os.path.join(base_dir, 'ONE/')
Augument_dataset_TWO_dir = os.path.join(base_dir, 'TWO/')
Augument_dataset_THREE_dir = os.path.join(base_dir, 'THREE/')
Augument_dataset_FOUR_dir = os.path.join(base_dir, 'FOUR/')
Augument_dataset_FIVE_dir = os.path.join(base_dir, 'FIVE/')
Augument_dataset_SIX_dir = os.path.join(base_dir, 'SIX/')
Augument_dataset_SEVEN_dir = os.path.join(base_dir, 'SEVEN/')
Augument_dataset_EIGHT_dir = os.path.join(base_dir, 'EIGHT/')

# 图片增强
ROWS = 180
COLS = 480
CHANNELS = 1

print('total ONE images:', len(os.listdir(original_dataset_ONE_dir)) + len(os.listdir(Augument_dataset_ONE_dir)))
print('total TWO images:', len(os.listdir(original_dataset_TWO_dir)) + len(os.listdir(Augument_dataset_TWO_dir)))
print('total THREE images:', len(os.listdir(original_dataset_THREE_dir)) + len(os.listdir(Augument_dataset_THREE_dir)))
print('total FOUR images:', len(os.listdir(original_dataset_FOUR_dir)) + len(os.listdir(Augument_dataset_FOUR_dir)))
print('total FIVE images:', len(os.listdir(original_dataset_FIVE_dir)) + len(os.listdir(Augument_dataset_FIVE_dir)))
print('total SIX images:', len(os.listdir(original_dataset_SIX_dir)) + len(os.listdir(Augument_dataset_SIX_dir)))
print('total SEVEN images:', len(os.listdir(original_dataset_SEVEN_dir)) + len(os.listdir(Augument_dataset_SEVEN_dir)))
print('total EIGHT images:', len(os.listdir(original_dataset_EIGHT_dir)) + len(os.listdir(Augument_dataset_EIGHT_dir)))
# print('total OTHER images:', len(os.listdir(original_dataset_OTHER_dir)))

print(time.strftime('%Y/%m/%d %H:%M:%S', time.localtime(time.time())))
augu_ONE = [(Augument_dataset_ONE_dir + i) for i in os.listdir(Augument_dataset_ONE_dir)]
augu_TWO = [(Augument_dataset_TWO_dir + i) for i in os.listdir(Augument_dataset_TWO_dir)]
augu_THREE = [(Augument_dataset_THREE_dir + i) for i in os.listdir(Augument_dataset_THREE_dir)]
augu_FOUR = [(Augument_dataset_FOUR_dir + i) for i in os.listdir(Augument_dataset_FOUR_dir)]
augu_FIVE = [(Augument_dataset_FIVE_dir + i) for i in os.listdir(Augument_dataset_FIVE_dir)]
augu_SIX = [(Augument_dataset_SIX_dir + i) for i in os.listdir(Augument_dataset_SIX_dir)]
augu_SEVEN = [(Augument_dataset_SEVEN_dir + i) for i in os.listdir(Augument_dataset_SEVEN_dir)]
augu_EIGHT = [(Augument_dataset_EIGHT_dir + i) for i in os.listdir(Augument_dataset_EIGHT_dir)]

train = []
for i in alphas:
    train += eval('data_' + i)
    train += eval('augu_' + i)
# train += data_OTHER

print("train lens: ", len(train))
t = []
for i, alpha in enumerate(alphas):
    t.append(np.repeat(i, len(eval('data_' + alpha)) + len(eval('augu_' + alpha))))
# t.append(np.repeat(len(alphas),len(data_OTHER)))
target = np.concatenate(t)
print('target:', target[[2, 5, 10001]])

train_X, test_X, train_y, test_y = train_test_split(train,
                                                    target,
                                                    test_size=0.2,
                                                    random_state=0)
train_images = []
for i in range(len(train_X)):
    train_images.append((train_X[i], int(train_y[i])))

test_images = []
for i in range(len(test_X)):
    test_images.append((test_X[i], int(test_y[i])))

print("prepare data done!")


def read_image(tuple_set):
    file_path = tuple_set[0]
    label = tuple_set[1]
    img = misc.imread(file_path, mode="L")
    return misc.imresize(img, (ROWS, COLS), interp='bilinear'), label


def regular(data):
    # meannv = np.mean(x_train)
    # stdv = np.std(x_train)
    # return (x_train - meannv) / stdv
    mina = np.min(data)
    maxa = np.max(data)
    return (data - mina) / (maxa - mina)


def prep_data(images):
    no_images = len(images)
    data = np.ndarray((no_images, ROWS, COLS, CHANNELS), dtype=np.uint8)
    labels = []
    for i, image_file in enumerate(images):
        image, label = read_image(image_file)
        image = np.expand_dims(image, axis=2)

        data[i] = image
        labels.append(label)
    return data, labels


# convert class vector to binary class matrices
x_train, y_train = prep_data(train_images)
x_test, y_shit = prep_data(test_images)

# x_train = batch_deal(x_train)
# x_test = batch_deal(x_test)
print('x train length', len(x_train))
print('x shape', x_train.shape)
optimizer = RMSprop(lr=1e-4)
objective = 'categorical_crossentropy'

y_train = np_utils.to_categorical(y_train, 8)
y_shit = np_utils.to_categorical(y_shit, 8)

print("start build model")


# 卷积+batch normation
def Conv2d_BN(x, nb_filter, kernel_size, strides=(1, 1), padding="same", name=None):
    if name is not None:
        bn_name = name + '_bn'
        conv_name = name + '_conv'
    else:
        bn_name = None
        conv_name = None
    x = Convolution2D(nb_filter, kernel_size, padding=padding, strides=strides, activation='relu', name=conv_name)(x)
    x = BatchNormalization(axis=3, name=bn_name)(x)
    return x


# building block 算一个层
def identity_block(inpt, nb_filter, kernel_size, strides=(1, 1), with_conv_shortcut=False):
    x = Conv2d_BN(inpt, nb_filter=nb_filter, kernel_size=kernel_size, strides=strides, padding="same")
    x = Conv2d_BN(x, nb_filter=nb_filter, kernel_size=kernel_size, padding='same')
    if with_conv_shortcut:
        shortcut = Conv2d_BN(inpt, nb_filter=nb_filter, strides=strides, kernel_size=kernel_size)
        x = add([x, shortcut])
        return x
    else:
        x = add([x, inpt])
        return x


# 两种结构之一
def bottleneck_Block(inpt, nb_filters, strides=(1, 1), with_conv_shortcut=False):
    k1, k2, k3 = nb_filters
    x = Conv2d_BN(inpt, nb_filter=k1, kernel_size=1, strides=strides)
    x = Conv2d_BN(x, nb_filter=k2, kernel_size=3, strides=strides, padding='same')
    x = Conv2d_BN(x, nb_filter=k3, kernel_size=1, strides=strides, padding='same')
    if with_conv_shortcut:
        shortcut = Conv2d_BN(inpt, nb_filter=k3, strides=strides, kernel_size=1)
        x = add([x, shortcut])
        return x
    else:
        x = add([x, inpt])
        return x


def resnet_34(width, height, channel, classes):
    inpt = Input(shape=(width, height, channel))
    x = ZeroPadding2D((3, 3))(inpt)
    # conv1
    x = Conv2d_BN(x, nb_filter=64, kernel_size=(7, 7), strides=(2, 2), padding='valid')  # valid 不填充
    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x)

    # conv2_x
    x = identity_block(x, nb_filter=64, kernel_size=(3, 3))  # 整个为一层
    x = identity_block(x, nb_filter=64, kernel_size=(3, 3))
    x = identity_block(x, nb_filter=64, kernel_size=(3, 3))

    # conv3_x
    # FAQ:步长为什么为2？
    x = identity_block(x, nb_filter=128, kernel_size=(3, 3), strides=(2, 2), with_conv_shortcut=True)
    x = identity_block(x, nb_filter=128, kernel_size=(3, 3))
    x = identity_block(x, nb_filter=128, kernel_size=(3, 3))
    x = identity_block(x, nb_filter=128, kernel_size=(3, 3))

    # conv4_x  FIXME:不同维度间的映射
    x = identity_block(x, nb_filter=256, kernel_size=(3, 3), strides=(2, 2), with_conv_shortcut=True)
    x = identity_block(x, nb_filter=256, kernel_size=(3, 3))
    x = identity_block(x, nb_filter=256, kernel_size=(3, 3))
    x = identity_block(x, nb_filter=256, kernel_size=(3, 3))
    x = identity_block(x, nb_filter=256, kernel_size=(3, 3))
    x = identity_block(x, nb_filter=256, kernel_size=(3, 3))

    # conv5_x
    x = identity_block(x, nb_filter=512, kernel_size=(3, 3), strides=(2, 2), with_conv_shortcut=True)
    x = identity_block(x, nb_filter=512, kernel_size=(3, 3))
    x = identity_block(x, nb_filter=512, kernel_size=(3, 3))
    x = AveragePooling2D(pool_size=(6, 6))(x)
    x = Flatten()(x)

    # x = Dense(512,activation="relu")(x)
    # x = Dense(256,activation="relu")(x)
    # x = Dense(128,activation="relu")(x)
    x = Dense(classes, kernel_regularizer=regularizers.l1(0.02),
              activity_regularizer=regularizers.l2(0.03),
              activation="softmax")(x)

    model = Model(inputs=inpt, outputs=x)

    return model


def check_print():
    # Create a Keras Model
    model = resnet_34(ROWS, COLS, CHANNELS, 8)

    model.compile(loss=objective, optimizer=optimizer, metrics=['accuracy'])

    print(model.summary())

    print('Model Compiled')
    return model


nb_epoch = 10
batch_size = 16


class LossHistory(Callback):
    # classes = len(np.unique(target))
    def on_train_begin(self, logs={}):
        self.losses = []
        self.val_losses = []
        self.acces = []
        self.val_acces = []

    def on_epoch_end(self, batch, logs={}):
        self.losses.append(logs.get('loss'))
        self.val_losses.append(logs.get('val_loss'))
        self.acces.append(logs.get('acc'))
        self.val_acces.append(logs.get('val_acc'))


early_stopping = EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='auto')
history = LossHistory()
y_train = np.array(y_train)
y_shit = np.array(y_shit)

if os.path.exists('resnet_34.h5'):
    model = load_model('resnet_34.h5')
else:
    model = check_print()

model.fit(x=x_train, y=y_train, batch_size=batch_size, epochs=nb_epoch, validation_split=0.2, verbose=0, shuffle=True,
          callbacks=[history, early_stopping])
predictions = model.predict(x_test, verbose=0)

loss, accuracy = model.evaluate(x=x_test, y=y_shit, batch_size=batch_size)
print('test loss: ', loss)
print('test accuracy: ', accuracy)

loss = history.losses
val_loss = history.val_losses
acc = history.acces
val_acc = history.val_acces

print('train loss:', loss)
print('val loss:', val_loss)
print('train acc:', acc)
print('val acc:', val_acc)

print(time.strftime('%Y/%m/%d %H:%M:%S', time.localtime(time.time())))

# save modename
model_save_dir = '/hadoop/station_FATP_FB/model_bak'
model.save(model_save_dir + "/FATP_0128_resnet.h5")
