import numpy as np 
import tensorflow as tf 
import cv2 as cv 
import os
import matplotlib.pyplot as plt
import sklearn
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt 
from keras.layers import Input,Dense,Activation,Dropout,MaxPooling2D,Softmax,Conv2D,Flatten,BatchNormalization,Average,Permute,Reshape
from keras.models import Model,Sequential,load_model
from keras.callbacks import ModelCheckpoint
from keras.preprocessing.image import img_to_array,load_img
from keras import optimizers
from keras.utils.generic_utils import get_custom_objects
import keras.backend as K 
from keras.callbacks import LearningRateScheduler
from PIL import Image
from resnet import resnet_model,conv_block,identity_block
import random
from resnet import *

def read_input(path):
    x=np.load(path+'\\merge_train_152_81.npy')
    return x

def read_img(imageName):
    im=cv.imread(imageName)
    im=cv.resize(im,(224,224))
    im=np.array(im)#resnet用的
    im=im.swapaxes(0,2)#resnet用的


    # im=np.expand_dims(im,axis=0)
    # im=np.expand_dims(im,axis=0)
    # im=tf.image.resize(im,(500,500))

    data=np.array(im)

    return data

def read_dir_image(path):
    for fn in os.listdir(path):
        fd=os.path.join(path,fn)
        images.append(read_img(fd))


def read_label(path):
    x=np.loadtxt(path+'/second_152.txt')

    return x

def plot_result(results):

    loss = results.history['loss']
    val_loss = results.history['val_loss']

    epochs = range(1, len(loss) + 1)

    plt.plot(epochs, loss, 'r', label = 'Training loss')
    plt.plot(epochs, val_loss, 'b', label = 'Validation loss')
    plt.title('Training And Validation Loss')
    plt.xlabel('Epochs')
    plt.ylabel('Loss')
    plt.legend()
    plt.show()

    plt.clf()

    acc = results.history['acc']
    val_acc = results.history['val_acc']

    plt.plot(epochs, acc, 'r', label = 'Training acc')
    plt.plot(epochs, val_acc, 'b', label = 'Validation acc')
    plt.title('Training And Validation Accuracy')
    plt.xlabel('Epochs')
    plt.ylabel('Accuracy')
    plt.legend()
    plt.show()

def creat_resnet(out_class,input_shape):
    # model=Sequential()
    inputs=Input(shape=input_shape)
    x = Conv2D(64, (7, 7), strides=2, padding='same',data_format="channels_first")(inputs) #conv1  1,64,112,112
    
    x = BatchNormalization(axis=-1)(x) #bn_conv1
    x = Activation('tanh')(x) #conv1_relu

    x = MaxPool2D(pool_size=(3,3),strides=2)(x) # 1,64,56,56

    # block1  (64,64,256) 1,2 in:1,64,56,56
    x = conv_block(x, [64, 64, 256]) #out=1,256,56,56
    x = identity_block(x, [64, 64, 256]) #out=1,256,56,56
    x = identity_block(x, [64, 64, 256]) #out=1,256,56,56

    # block2  (128,128,512) 1,3 in:1,256,56,56
    x = conv_block(x, [128,128,512]) #out=1,512,28,28
    x = identity_block(x, [128,128,512]) #out=1,512,28,28
    x = identity_block(x, [128,128,512]) #out=1,512,28,28
    x = identity_block(x, [128, 128, 512])  # out=1,512,28,28

    # block 3 (256,256,1024) 1,5 in:1,512,28,28
    x = conv_block(x, [256,256,1024])  # out=1,1024,14,14
    x = identity_block(x, [256, 256, 1024])  # out=1,1024,14,14
    x = identity_block(x, [256, 256, 1024])  # out=1,1024,14,14
    x = identity_block(x, [256, 256, 1024])  # out=1,1024,14,14
    x = identity_block(x, [256, 256, 1024])  # out=1,1024,14,14
    x = identity_block(x, [256, 256, 1024])  # out=1,1024,14,14

    # block 4 (512,512,2048) 1,2 in:1,1024,14,14
    x = conv_block(x, [512,512,2048])  # out=1,2048,7,7
    x = identity_block(x, [512, 512, 2048])  # out=1,2048,7,7
    x = identity_block(x, [512, 512, 2048])  # out=1,2048,7,7

    # maxpool kernel_size=7, stride=1 out=1,2048,1,1
    x = MaxPool2D(pool_size=(7, 7), strides=1)(x)

    # flatten
    x = Flatten()(x)

    # # Dense
    # x = Dense(1000)(x) # out=1,1000

    # Dense,这里改造了一下，适应cifar10
    x = Dense(out_class)(x)  # out=1,1000

    # out = Activation('softmax')(x)
    # out=Activation('sigmoid')(x)
    model = Model(inputs=inputs, outputs=x)
    return model

def dense(model_input):
    model=Sequential()
    model_input=Input(shape=(162,))
    x=Dense(512,activation='elu')(model_input)
    x=Dropout(0.2)(x)
    x=Dense(512,activation='selu')(x)
    x=Dropout(0.3)(x)
    x=Dense(512,activation='elu')(x)
    x=Dropout(0.4)(x)
    x=Dense(512,activation='tanh')(x)
    x=Dropout(0.5)(x)
    x=Dense(512,activation='tanh')(x)
    x=Dropout(0.6)(x)
    x=Dense(152,activation='tanh')(x)
    model=Model(model_input,x)
    return model
    

# def merge_nn(res,den):


#     # opt = Adam(lr=1e-3, decay=1e-3 / 200)
    
#     return Model


images=[]
read_dir_image("D:/facial/premiere/ayan/training/")
x=np.array(images)
y=np.array(read_label("D:"))
# x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.30, random_state=30)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.30)

x=np.array(read_input('D:\\facial\\neural_network'))
y=np.array(read_label('D:'))
x_train_l, x_test_l, y_train_l, y_test_l = train_test_split(x, y, test_size=0.30)
x_train_l=x_train_l.reshape((len(x_train_l),162))
x_test_l=x_test_l.reshape((len(x_test_l),162))

den=dense((162,))
res=creat_resnet(152,(3,224,224))

# model=merge_nn(res,den)


# combineInput=concatenate([den.output,res.output])
combineInput=Average()([den.output,res.output])
# x=Dense(152,activation="tanh")(combineInput)
# x=Dense(152,activation="tanh")(x)
model=Model(inputs=[res.input,den.input],outputs=combineInput)
model.summary()
model.compile( optimizer='nadam',loss="mae",metrics=["accuracy"])


filepath="mix-weights-best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True,mode='min')
callbacks_list = [checkpoint]
# model.compile( optimizer='adam',loss="mse",metrics=["accuracy"])


result=model.fit([x_train,x_train_l],y_train,validation_data=([x_test,x_test_l],y_test),epochs=5000,batch_size=5,callbacks=callbacks_list)
model.save('mix_model.hdf5')
plot_result(result)





