# -*- coding:utf8 -*-

from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD, Adadelta, Adagrad
from keras.utils import np_utils, generic_utils
from data_prepare.load_data import load_hdf5_data_train,load_hdf5_data_test
import matplotlib.pyplot as plt
from my_core_layer.PLRelu import *
import os
import csv

batch_size = 128
nb_classes = 40
nb_epoch = 50
data_augmentation = False

# shape of the image (SHAPE x SHAPE)
img_rows, img_cols = 64, 64

# the CIFAR10 images are RGB
img_channels = 3

print('ConVNet_offline')

# the data, shuffled and split between tran and test sets
(X_train, y_train) = load_hdf5_data_train(dataset='/media/dell/cb552bf1-c649-4cca-8aca-3c24afca817b/dell/wxm/Data/JD/ProData/64/submit/trainVec')
X_test = load_hdf5_data_test(dataset='/media/dell/cb552bf1-c649-4cca-8aca-3c24afca817b/dell/wxm/Data/JD/ProData/64/submit/testVec')

print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)

model = Sequential()
model.add(Convolution2D(96, 11, 11, input_shape=(img_channels, img_rows, img_cols),subsample=4))
model.add(relu())

model.add(Convolution2D(256, 5, 5))
model.add(relu())
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Convolution2D(384, 3, 3))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(relu())

model.add(Convolution2D(384, 3, 3))
model.add(relu())

model.add(Convolution2D(384, 3, 3))
model.add(relu())

model.add(Flatten())

model.add(Dense(4096))
model.add(relu())
model.add(Dropout(0.5))

model.add(Dense(2048))
model.add(relu())
model.add(Dropout(0.5))

model.add(Dense(nb_classes))
model.add(Activation('softmax'))

# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=0.01, decay=0.0005, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)

if not data_augmentation:
    print("Not using data augmentation or normalization")
    print('using relu activation')

    X_train = X_train.astype("float32")
    X_test = X_test.astype("float32")
    X_train /= 255
    X_test /= 255
    model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,show_accuracy=True)
    Pred = model.predict_classes(X_test)
    model.save_weights('/media/dell/cb552bf1-c649-4cca-8aca-3c24afca817b/dell/wxm/Data/JD/Weights/submit/224/Test1_Noise_relu.hdf5',overwrite=True)
    # answer
    answerFile = '/home/dell/wxm/Code/JD/log_records/submit/baseline/224/answer4_Noise_relu.csv'
    f = file(answerFile,'wb')
    csvwriter = csv.writer(f)
    names = []
    print Pred
    for img in os.listdir('/media/dell/cb552bf1-c649-4cca-8aca-3c24afca817b/dell/wxm/Data/JD/test224'):
        names.append(img)
    if len(Pred) != len(names):
        print 'submit fail'
    print "Pred length: " + str(len(Pred))
    print "names Length:" + str(len(names))
    for i in range(len(Pred)):
        answer = [names[i] , str(Pred[i])]
        csvwriter.writerow(answer)
    f.close()

else:
    print("Using real time data augmentation")

    trainAccu = []
    trainLoss = []
    # this will do preprocessing and realtime data augmentation
    datagen = ImageDataGenerator(
        featurewise_center=False,  # set input mean to 0 over the dataset
        samplewise_center=False,  # set each sample mean to 0
        featurewise_std_normalization=False,  # divide inputs by std of the dataset
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        rotation_range=20,  # randomly rotate images in the range (degrees, 0 to 180)
        width_shift_range=0.2,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=0.2,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=True,  # randomly flip images
        vertical_flip=False)  # randomly flip images

    # compute quantities required for featurewise normalization
    # (std, mean, and principal components if ZCA whitening is applied)
    datagen.fit(X_train)
    i = 1
    for e in range(nb_epoch):
        print('-'*40)
        print('Epoch', e)
        print('-'*40)
        print("Training...")
        train_flag = 0
        trainAcAll = 0
        trainLoAll = 0
        # batch train with realtime data augmentation
        progbar = generic_utils.Progbar(X_train.shape[0])
        for X_batch, Y_batch in datagen.flow(X_train, Y_train,batch_size=batch_size):
            train_flag += 1
            trainLo,trainAc = model.train_on_batch(X_batch, Y_batch,accuracy=True,)
            trainAcAll += trainAc
            trainLoAll += trainLo
            progbar.add(X_batch.shape[0], values=[("train loss", trainLo),("train accu", trainAc)])
        trainAccu.append(trainAcAll/train_flag)
        trainLoss.append(trainLoAll/train_flag)

    Pred = model.predict_classes(X_test)

    plt.plot(trainAccu,color = 'red')
    plt.savefig('/home/dell/wxm/Code/JD/log_records/submit/Aug/224/TestE50Accu.png', dpi=128)
    plt.figure()
    plt.plot(trainLoss,color = 'red')
    plt.savefig('/home/dell/wxm/Code/JD/log_records/submit/Aug/224/TestE50Loss.png', dpi=128)
    model.save_weights('/media/dell/cb552bf1-c649-4cca-8aca-3c24afca817b/dell/wxm/Data/JD/Weights/submit/64/TestE50_noise.hdf5',overwrite=True)
    # answer
    answerFile = '/home/dell/wxm/Code/JD/log_records/submit/Aug/224/answerE50_noise.csv'
    f = open(answerFile,'w')
    names = []
    for img in os.listdir('/media/dell/cb552bf1-c649-4cca-8aca-3c24afca817b/dell/wxm/Data/JD/test224'):
        names.append(img)
    if len(Pred) != len(names):
        print 'submit fail'
    for i in len(Pred):
        answer = names[i] + ',' + Pred[i] + '\n'
        f.write(answer)
    f.close()