import numpy as np 
import tensorflow as tf 
import cv2 as cv 
import os
import matplotlib.pyplot as plt
import sklearn
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt 
from keras.layers import Input,Dense,Activation,Dropout,MaxPooling2D,Softmax,Conv2D,Flatten,BatchNormalization
from keras.models import Model,Sequential,load_model
from keras.callbacks import ModelCheckpoint
from keras.preprocessing.image import img_to_array,load_img
from keras import optimizers
from keras.utils.generic_utils import get_custom_objects
import keras.backend as K 
from keras.callbacks import LearningRateScheduler
from PIL import Image
from resnet import resnet_model,conv_block,identity_block
import random
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
#万里长征始于足下，这个函数的用处是读取文件夹下的所有文件,并且同一resize
# def read_img(path):
#     arr=[]
#     for filename in os.listdir(path):

#         # img=cv.imread(path+"/"+filename)
#         img=load_img(path+"/"+filename,target_size=(500,500))
#         img=img_to_array(img)
#         img=np.expand_dims(img,axis=0)

#         # img=tf.image.convert_image_dtype(image=img,dtype=tf.float32)
#         # img=tf.image.resize(img,[500,500],method=0)

#         arr.append(img)
#         #img=np.concatenate(x for x in arr)
#     return img

# read_img("D:/facial/premiere/jpg/training")

def tanh10(x):
        return K.tanh(0.01*x)

def read_img(imageName):
    # im=Image.open(imageName).convert('L')#这里是转换成了灰度图，之后看看能不能搞个彩图把
    im=cv.imread(imageName)
#     im=cv.resize(im,(112,112))#resnet不用了
    im=cv.resize(im,(224,224))
#     im=np.array(im)#resnet用的
#     im=im.swapaxes(0,2)#resnet用的


    # im=np.expand_dims(im,axis=0)
    # im=np.expand_dims(im,axis=0)
    # im=tf.image.resize(im,(500,500))

    data=np.array(im)

    return data

def read_dir_image(path):
    for fn in os.listdir(path):
        fd=os.path.join(path,fn)
        images.append(read_img(fd))


def read_label(path):
    x=np.loadtxt(path+'/second_152.txt')
    # print(x)
    return x

# def generate_batch_data_random(x,y,batch_size):
#     ylen=len(y)
#     loopcount=ylen//batch_size
#     while(True):
#         i=randint(0,loopcount)
#         yield x[i * batch_size:(i + 1) * batch_size], y[i * batch_size:(i + 1) * batch_size]



def prepocess():
    x=read_img("D:/facial/premiere/ayan/training")
    y=read_label("D:")

    return x,y



# def nn_model():
#     model=Sequential()
#     model.add(Dense(32,input_shape=(500,500,),activation="sigmoid"))#表示又32个神经元，输入尺寸是500*500
#     model.add(MaxPooling2D(pool_size=(2,2)))#最大值池化
#     model.add(Dropout(0.3,noise_shape=None,seed=None))#增加一个dropout层，每次会有30%的神经元失效
#     model.add(Dense(50,activation="sigmoid"))#隐藏层中不需要输入尺寸，会自动计算
#     model.add(Dropout(0.2,noise_shape=None,seed=None))
#     model.add(MaxPooling2D(pool_size=(5,5)))#最大值池化
#     model.add(Dense(235,activation="sigmoid"))
#     model.summary()
#     model.compile(optimizer="adam",loss="mean_squared_error",metrics=["accuracy"])
#     return model
# nn_model()

# def nn_model():
#     model=Sequential()
#     model.add(Conv2D(filters=32,kernel_size=(3,3),input_shape=(1,720,1280,),activation="relu",padding="same"))#表示又32个神经元，输入尺寸是500*500
#     #model.add(MaxPooling2D(pool_size=(2,2)))#最大值池化
#     model.add(Dropout(0.3,noise_shape=None,seed=None))#增加一个dropout层，每次会有30%的神经元失效
#     model.add(Conv2D(64,kernel_size=(3,3),activation="relu",padding="same"))#隐藏层中不需要输入尺寸，会自动计算
#     model.add(Dropout(0.2,noise_shape=None,seed=None))
#     #model.add(MaxPooling2D(pool_size=(2,2)))#最大值池化
#     model.add(Flatten())
#     model.add(Dropout(rate=0.25))
#     model.add(Dense(1024,activation="relu"))
#     model.add(Dropout(rate=0.25))
#     model.add(Dense(235,activation="sigmoid"))
#     model.summary()
#     model.compile(optimizer="adam",loss="msle",metrics=["accuracy"])
#     return model
# nn_model()


def nn_model():#VGG16 准确率大约在50%（394个epoch）
    model=Sequential()
    model.add(Conv2D(filters=64,kernel_size=(3,3),input_shape=(224,224,3),activation="tanh",padding="same",data_format="channels_first"))
    model.add(Conv2D(filters=64,kernel_size=(3,3),activation="elu",padding="same"))
#     model.add(BatchNormalization(mode=0,axis=-1))
    model.add(Dropout(0.2))
    model.add(MaxPooling2D(strides=(2,2)))

    model.add(Conv2D(filters=128,kernel_size=(3,3),activation="tanh",padding="same"))
    model.add(Conv2D(filters=128,kernel_size=(3,3),activation="selu",padding="same"))
    model.add(BatchNormalization(mode=0,axis=-1))
#     model.add(Dropout(0.2))
    model.add(MaxPooling2D(strides=(2,2)))

    model.add(Conv2D(filters=256,kernel_size=(3,3),activation="tanh",padding="same"))
    model.add(Conv2D(filters=256,kernel_size=(3,3),activation="elu",padding="same"))
    model.add(Conv2D(filters=256,kernel_size=(3,3),activation="selu",padding="same"))
#     model.add(BatchNormalization(mode=0,axis=-1))
    model.add(Dropout(0.3))
    model.add(MaxPooling2D(strides=(2,2)))

    model.add(Conv2D(filters=512,kernel_size=(3,3),activation="tanh",padding="same"))
    model.add(Conv2D(filters=512,kernel_size=(3,3),activation="elu",padding="same"))
    model.add(Conv2D(filters=512,kernel_size=(3,3),activation="selu",padding="same"))
#     model.add(BatchNormalization(mode=0,axis=-1))
    model.add(Dropout(0.4))
    model.add(MaxPooling2D(strides=(2,2)))    

    model.add(Conv2D(filters=512,kernel_size=(3,3),activation="tanh",padding="same"))
    model.add(Conv2D(filters=512,kernel_size=(3,3),activation="elu",padding="same"))
    model.add(Conv2D(filters=512,kernel_size=(3,3),activation="selu",padding="same"))
    model.add(BatchNormalization(mode=0,axis=-1))
#     model.add(Dropout(0.2))
    model.add(MaxPooling2D(strides=(2,2)))    

    model.add(Flatten())
    model.add(Dense(4096,activation="selu"))
#     model.add(Dense(4096,activation=tanh10))
    model.add(Dense(4096,activation="tanh"))
    # model.add(Dense(235,activation="sigmoid"))
    model.add(Dense(152))
    model.summary()
    adam=optimizers.Adam(lr=0.001)
#     model.compile(optimizer=adam,loss="mse",metrics=["acc"])
    model.compile(optimizer="nadam",loss="mse",metrics=["accuracy"])
    return model


def scheduler(epoch):
    # 每隔100个epoch，学习率减小为原来的1/10
#     K.get_session.optimizer="sgd"    #还没实现的功能，先用adam下降，到了一定的epoch的时候改成sgd
    if epoch%5==0:
        lr = K.get_value(model.optimizer.lr)
        ra=random.uniform(0.1,0.2)
        if epoch % 30 == 0 and epoch != 0:
                # K.get_value(model.loss)
                
                
                # ra=1
                K.set_value(model.optimizer.lr, lr * ra)
        print("lr changed to {}".format(lr))
        print("random=%lf"%(ra))
    return K.get_value(model.optimizer.lr)


images=[]
read_dir_image("D:/facial/premiere/ayan/training/")
x=np.array(images)
y=np.array(read_label("D:"))
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.30, random_state=30)
print(x)
print(y)
# print("1")

# x=read_img("D:/facial/premiere/jpg/training")
# y=read_label("D:")
# x,y=prepocess()
# train_x=x[100:]
# test_x=x[:49]
# train_y=y[100:]
# test_y=y[:49]
#print(train_x[0].shape)
#print(test_x[0].shape)
# print(test_y.shape)
# print(test_y.shape)
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
# model=nn_model()

# reduce_lr = LearningRateScheduler(scheduler)
# model=resnet_model(152,(3,224,224))
model=nn_model()
# model = load_model('my_model -res_2000.h5')
# filepath="weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5"
filepath="weights-best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True,mode='min')
callbacks_list = [checkpoint]
# callbacks_list.append(reduce_lr)
results=model.fit(x_train,y_train,epochs=15000,batch_size=25,validation_data=(x_test,y_test),callbacks=callbacks_list)

model.save('my_model.h5')


loss = results.history['loss']
val_loss = results.history['val_loss']

epochs = range(1, len(loss) + 1)

plt.plot(epochs, loss, 'bo', label = 'Training loss')
plt.plot(epochs, val_loss, 'b', label = 'Validation loss')
plt.title('Training And Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()

plt.clf()

acc = results.history['acc']
val_acc = results.history['val_acc']

plt.plot(epochs, acc, 'bo', label = 'Training acc')
plt.plot(epochs, val_acc, 'b', label = 'Validation acc')
plt.title('Training And Validation Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()

# print(np.mean(results.history["mse"]))
