# coding: utf-8
import tensorflow as tf
import keras.backend.tensorflow_backend as KTF
from keras.applications.xception import Xception
from keras.layers import GlobalAveragePooling2D, Dense
from scipy import misc
import numpy as np
import time
import os
import cv2
from keras import Model
import argparse
from matplotlib import pyplot as plt
plt.switch_backend('agg')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

flags = argparse.ArgumentParser()
flags.add_argument('--gpu', default=0, type=float, help='gpu number')
FLAGS = flags.parse_args()

# 使用GPU
gpun = FLAGS.gpu
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpun)

from keras import utils
from keras.utils import np_utils
from keras.models import Sequential
from keras.optimizers import RMSprop
from keras.callbacks import  Callback, EarlyStopping
from sklearn.model_selection import train_test_split
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img

config = tf.ConfigProto() 
config.gpu_options.per_process_gpu_memory_fraction = 0.8 # 占用GPU40%的显存 
session = tf.Session(config=config)
KTF.set_session(session)
#source data dir
original_dataset_ONE_dir = '/data/station/ONE/'
original_dataset_TWO_dir = '/data/station/TWO/'
original_dataset_THREE_dir = '/data/station/THREE/'
original_dataset_FOUR_dir = '/data/station/FOUR/'
original_dataset_FIVE_dir = '/data/station/FIVE/'
original_dataset_OTHER_dir = '/data/station/OTHER/'

print('original ONE images:', len(os.listdir(original_dataset_ONE_dir)))
print('original TWO images:', len(os.listdir(original_dataset_TWO_dir)))
print('original THREE images:', len(os.listdir(original_dataset_THREE_dir)))
print('original FOUR images:', len(os.listdir(original_dataset_FOUR_dir)))
print('original FIVE images:', len(os.listdir(original_dataset_FIVE_dir)))
print('original OTHER images:', len(os.listdir(original_dataset_OTHER_dir)))


DIR1 = original_dataset_ONE_dir
DIR2 = original_dataset_TWO_dir
DIR3 = original_dataset_THREE_dir
DIR4 = original_dataset_FOUR_dir
DIR5 = original_dataset_FIVE_dir
DIR6 = original_dataset_OTHER_dir
data_ONE = [(DIR1+i) for i in os.listdir(DIR1)]
data_TWO = [(DIR2+i) for i in os.listdir(DIR2)]
data_THREE = [(DIR3+i) for i in os.listdir(DIR3)]
data_FOUR = [(DIR4+i) for i in os.listdir(DIR4)]
data_FIVE = [(DIR5+i) for i in os.listdir(DIR5)]
data_OTHER = [(DIR6+i) for i in os.listdir(DIR6)]

#增强數據目錄
base_dir = '/data/station/augument_data/'
#base_dir = '/data/station/photobase/'
Augument_dataset_ONE_dir = os.path.join(base_dir, 'ONE/')
Augument_dataset_TWO_dir = os.path.join(base_dir, 'TWO/')
Augument_dataset_THREE_dir = os.path.join(base_dir, 'THREE/')
Augument_dataset_FOUR_dir = os.path.join(base_dir, 'FOUR/')
Augument_dataset_FIVE_dir = os.path.join(base_dir, 'FIVE/')
Augument_dataset_OTHER_dir = os.path.join(base_dir, 'OTHER/')

#新建储存增强图片的文件夹
if not os.path.exists(Augument_dataset_ONE_dir):
    os.mkdir(Augument_dataset_ONE_dir)
if not os.path.exists(Augument_dataset_TWO_dir):
    os.mkdir(Augument_dataset_TWO_dir)
if not os.path.exists(Augument_dataset_THREE_dir):
    os.mkdir(Augument_dataset_THREE_dir)
if not os.path.exists(Augument_dataset_FOUR_dir):
    os.mkdir(Augument_dataset_FOUR_dir)
if not os.path.exists(Augument_dataset_FIVE_dir):
    os.mkdir(Augument_dataset_FIVE_dir)
if not os.path.exists(Augument_dataset_OTHER_dir):
    os.mkdir(Augument_dataset_OTHER_dir)

# 图片增强
ROWS = 297
COLS = 396
CHANNELS = 3

print('total ONE images:', len(os.listdir(original_dataset_ONE_dir)) + len(os.listdir(Augument_dataset_ONE_dir)))
print('total TWO images:', len(os.listdir(original_dataset_TWO_dir)) + len(os.listdir(Augument_dataset_TWO_dir)))
print('total THREE images:', len(os.listdir(original_dataset_THREE_dir)) + len(os.listdir(Augument_dataset_THREE_dir)))
print('total FOUR images:', len(os.listdir(original_dataset_FOUR_dir)) + len(os.listdir(Augument_dataset_FOUR_dir)))
print('total FIVE images:', len(os.listdir(original_dataset_FIVE_dir)) + len(os.listdir(Augument_dataset_FIVE_dir)))
print('total OTHER images:', len(os.listdir(original_dataset_OTHER_dir)) + len(os.listdir(Augument_dataset_OTHER_dir)))
#'''
print(time.strftime('%Y/%m/%d %H:%M:%S', time.localtime(time.time())))

DIR11 = Augument_dataset_ONE_dir
DIR22 = Augument_dataset_TWO_dir
DIR33 = Augument_dataset_THREE_dir
DIR44 = Augument_dataset_FOUR_dir
DIR55 = Augument_dataset_FIVE_dir
DIR66 = Augument_dataset_OTHER_dir

augu_ONE = [(DIR11+i) for i in os.listdir(DIR11)]
augu_TWO = [(DIR22+i) for i in os.listdir(DIR22)]
augu_THREE = [(DIR33+i) for i in os.listdir(DIR33)]
augu_FOUR = [(DIR44+i) for i in os.listdir(DIR44)]
augu_FIVE = [(DIR55+i) for i in os.listdir(DIR55)]
augu_OTHER = [(DIR66+i) for i in os.listdir(DIR66)]


'''
train = data_ONE + data_TWO + data_THREE + data_FOUR + data_FIVE + data_OTHER
print "train lens: ",len(train)
target = np.concatenate((np.repeat(0.,len(data_ONE)),np.repeat(1.,len(data_TWO)),np.repeat(2.,len(data_THREE)),np.repeat(3.,len(data_FOUR)),np.repeat(4.,len(data_FIVE)),np.repeat(5,len(data_OTHER))))
'''

#'''
train = data_ONE + augu_ONE + data_TWO + augu_TWO + data_THREE + augu_THREE + data_FOUR + augu_FOUR + data_FIVE + augu_FIVE + data_OTHER + augu_OTHER
print("train lens: ", len(train))
target = np.concatenate((np.repeat(0.,len(data_ONE)+len(augu_ONE)),np.repeat(1.,len(data_TWO)+len(augu_TWO)),np.repeat(2.,len(data_THREE)+len(augu_THREE)),np.repeat(3.,len(data_FOUR)+len(augu_FOUR)),np.repeat(4.,len(data_FIVE)+len(augu_FIVE)),np.repeat(5,len(data_OTHER)+len(augu_OTHER))))
#'''


train_X,test_X, train_y, test_y = train_test_split(train,
                                                   target,
                                                   test_size = 0.02,
                                                   random_state = 0)

train_images = []
for i in range(len(train_X)):
    train_images.append((train_X[i],int(train_y[i])))

test_images = []
for i in range(len(test_X)):
    test_images.append((test_X[i],int(test_y[i])))


def read_image(tuple_set):
    file_path = tuple_set[0]
    label = tuple_set[1]
    img = misc.imread(file_path)
    return misc.imresize(img, (ROWS, COLS), interp='bilinear'), label


def regular(x_train):
    meannv = np.mean(x_train)
    stdv = np.std(x_train)
    return (x_train - meannv) / stdv


def prep_data(images):
    no_images = len(images)
    data = np.ndarray((no_images, ROWS, COLS,CHANNELS), dtype=np.uint8)
    labels = []
    for i, image_file in enumerate(images):
        image, label = read_image(image_file)
        #data[i]=regular(image) 
        data[i]=image
        labels.append(label)
    return data, labels


#convert class vector to binary class matrices
x_train, y_train = prep_data(train_images)
x_test, y_shit = prep_data(test_images)

print('x_train length',len(x_train))
optimizer = RMSprop(lr=1e-5)
objective = 'categorical_crossentropy'
# 跟损失函数选择有关 categorical_crossentropy
y_train =np_utils.to_categorical(y_train, 6)
y_shit = np_utils.to_categorical(y_shit,6)

"""
weights:None,随机权重
       默认：imagenet的权重
       或自己定义权重的路径
"""
#base_model = ResNet50(weights="/data/soft/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5",include_top=False)

model = Xception(weights=None,input_shape=(297,396,3),include_top=True,classes=6)


"去除ImageNet网络的全连接层权重，因为这是针对ImageNet竞赛的1000种日常对象预先训练好的网络权重。"
"因此，我们将添加一个新的全连接层，并进行初始化。"


def add_new_last_layer(base_model,nb_classes):
    x = base_model.output
    # FIXME:全局池化 全局平均初始化函数GlobalAveragePooling2D将MxNxC张量转换后输出为1xC张量，其中C是图像的通道数。
    x = GlobalAveragePooling2D()(x)

    x=Dense(1024,activation='relu')(x) # 全链接可以理解
    predictions = Dense(nb_classes,activation='softmax')(x)

    model = Model(inputs=base_model.input,outputs=predictions)
    return model


# 迁移学习
def setup_to_transfer_learn(model,base_model):
    """Freeze all layers and compile the model"""
    for layer in base_model.layers:
        # REW:因为如果不固定相关层，随机初始化网络权重会导致较大的梯度更新，进一步可能会破坏卷积层中的学习权重
        layer.trainable=False
    model.compile(optimizer="rmsprop",
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])


nb_epoch = 800
batch_size = 16


class LossHistory(Callback):
#classes = len(np.unique(target))
    def on_train_begin(self, logs={}):
        self.losses = []
        self.val_losses = []
        self.acces = []
        self.val_acces = []
    def on_epoch_end(self, batch, logs={}):
        self.losses.append(logs.get('loss'))
        self.val_losses.append(logs.get('val_loss'))
        self.acces.append(logs.get('acc'))
        self.val_acces.append(logs.get('val_acc'))


early_stopping = EarlyStopping(monitor='val_loss', patience=50, verbose=0, mode='auto')
history = LossHistory()
y_train = np.array(y_train)
y_shit = np.array(y_shit)
print(type(x_train))
print(type(y_train))

# model = add_new_last_layer(base_model,3)
# setup_to_transfer_learn(model,base_model)
model.compile(optimizer="rmsprop",
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

model.fit(x=x_train,y=y_train, batch_size=batch_size, epochs=nb_epoch,validation_split=0.2, verbose=0, shuffle=True, callbacks=[history, early_stopping])
predictions = model.predict(x_test, verbose=0)
print(predictions)

loss, accuracy = model.evaluate(x=x_test, y=y_shit,batch_size=batch_size)
print('test loss: ', loss)
print('test accuracy: ', accuracy)

loss = history.losses
val_loss = history.val_losses
acc = history.acces
val_acc = history.val_acces

print('train loss:', loss)
print('val loss:', val_loss)
print('train acc:', acc)
print('val acc:', val_acc)
print(model.summary())

print(time.strftime('%Y/%m/%d %H:%M:%S', time.localtime(time.time())))

#save modename
model.save('ws6_keras_xception_model_1107_no_tl.h5')

x = [i+1 for i in range(len(loss))]

plt.figure()
plt.plot(x, loss, label="train_loss", color="red", linewidth=2)
plt.plot(x, val_loss, label="val_loss", color="green", linewidth=2)
plt.legend()
plt.xlabel("step")
plt.ylabel("loss")
plt.title("train")
plt.show()
