import numpy as np
import tensorflow as tf
from tensorflow import keras
from  tensorflow.keras import layers
from load_data import load_cifar10_data
#from keras.optimizers import SGD
#from keras.layers import Input, Dense, Conv2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, add, Flatten, Activation
#from keras.layers.normalization import BatchNormalization
#from keras.models import Model
import matplotlib.pyplot as plt
from tensorflow.keras.utils import plot_model
from tensorflow.keras import utils
from solve_cudnn_error import *

solve_cudnn_error()
# ���d����
img_rows, img_cols = 224, 224 # h,w
channel = 3
num_classes = 10 
batch_size = 32
nb_epoch = 10

# 加载数据
X_train, Y_train, X_valid, Y_valid = load_cifar10_data(img_rows, img_cols)


base_model = keras.applications.ResNet50(weights='imagenet', include_top=False,input_shape=(img_rows, img_cols, channel))
#base_model.trainable = False

average_pool_layer = layers.AveragePooling2D((7,7))
flatten_layer = layers.Flatten()
prediction_layer = layers.Dense(10,activation='softmax')

#print(flatten_layer.output_shape)

#x_newfc = layers.AveragePooling2D((7, 7), name='avg_pool')(basemod)
#x_newfc = layers.Flatten()(x_newfc)
#x_newfc = layers.Dense(num_classes, activation='softmax', name='fc10')(x_newfc)

model = tf.keras.Sequential([
    base_model,
    average_pool_layer
    ,flatten_layer
    ,prediction_layer
])


#img_input = layers.Input(shape=(img_rows, img_cols, channel))
    # 创建新的模型
#model = keras.Model(img_input, x_newfc)
plot_model(model, to_file='resnet-cifar.png',show_shapes=True)

sgd = tf.keras.optimizers.SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
# ����ģ�Ϳ��ӻ��ṹͼ

    # Fine-tuning
H = model.fit(X_train, Y_train,
        batch_size=batch_size,
        epochs=nb_epoch,
        shuffle=True,
        verbose=1,
        validation_data=(X_valid, Y_valid),
             )
acc = H.history['accuracy']
val_acc = H.history['val_accuracy']

loss = H.history['loss']
val_loss = H.history['val_loss']
acc += H.history['accuracy']
val_acc += H.history['val_accuracy']

loss += H.history['loss']
val_loss += H.history['val_loss']

plt.figure(figsize=(12, 12))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.ylim([0, 1])
#plt.plot([nb_epoch-1, nb_epoch-1],
#         plt.ylim(), label='Start Fine Tuning')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
#plt.xlabel('epoch')
#plt.tight_layout()
#plt.show()

plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.ylim([0, 2.0])
#plt.plot([nb_epoch-1, nb_epoch-1],
#         plt.ylim(), label='Start Fine Tuning')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.tight_layout()
plt.show()


model.save("resnet_model")

