#coding=utf-8


import Config as conf
import DataProcessor as dp
import keras
import keras_model.CustomModelCreator as cm
import keras_callback.CustomCallBack as cb
from keras.optimizers import Adam
from keras import losses
import time
import os
import math

#dp.getAd()
#dp.getUserProfile()
#dp.getAppCategory()

#exit()



def runModel(modelCreator, xTrain, yTrain, xTest, yTest):
    mc = modelCreator

    # model
    print('creating model')
    model = mc.getModel(xTrain.shape)

    # compile
    model.compile(Adam(), loss=losses.binary_crossentropy, metrics=['accuracy'])

    tbCallBack = keras.callbacks.TensorBoard(log_dir=mc.tensorBoardLogPath, histogram_freq=0, batch_size=conf.BATCH_SIZE, write_graph=True, write_grads=False, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None)

    # train
    print('training')
    model.fit(
        xTrain, yTrain,
        batch_size=conf.BATCH_SIZE,
        epochs=conf.EPOCHS,
        callbacks=[cb.ModelSave(lossSavePath=mc.trainLossFilePath, lossProgressSavePath=mc.trainLossProgressFilePath,
                                modelSavePath=mc.modelFilePath, weightsSavePath=mc.weightsFilePath), tbCallBack])

    #steps_per_epoch = math.floor(xTrain.shape[0] / conf.BATCH_SIZE)
    #if xTrain.shape[0] % conf.BATCH_SIZE != 0:
    #    steps_per_epoch += 1

    #model.fit_generator(
    #    generator=dp.batch_generator(xTrain, yTrain, features, batch_size=conf.BATCH_SIZE, samples_per_epoch=xTrain.shape[0], steps_per_epoch=steps_per_epoch),
    #    steps_per_epoch=steps_per_epoch,
    #    epochs=conf.EPOCHS,
    #    callbacks=[cb.ModelSave(lossSavePath=mc.trainLossFilePath, lossProgressSavePath=mc.trainLossProgressFilePath,
    #                            modelSavePath=mc.modelFilePath, weightsSavePath=mc.weightsFilePath), tbCallBack])

    # test
    print('testing')
    testLoss = model.evaluate(xTest, yTest, batch_size=conf.BATCH_SIZE)
    with open(mc.testLossFilePath, 'w') as f:
        f.write(str(testLoss[0]))

#isPredict = True
isPredict = False

if isPredict:

    # data
    print("preparing predict data")
    #ids, data = dp.getPredictData(useDb=True)
    ids, data = dp.getPredictData(useDb=False)

    # choose model
    #mc = cm.ThreeLayerModelCreator_DropLayer_Norm_64_32_8_1()
    #mc = cm.DynamicLayerModelCreator(layerCount=10, nx=64, dropout=0.1)
    mc = cm.DynamicLayerModelCreator(layerCount=2, nx=64, dropout=0.1)
    model = mc.getModel(data.shape)

    # predict
    print("predicting")
    results = model.predict(data)

    assert len(ids) == len(results)

    # save results
    saveFilePath = mc.modelDir + '/sample_submission.csv'
    with open(saveFilePath, 'w') as f:
        f.write('instance_id,prob' + os.linesep)
        for i in range(0, len(ids)):
            f.write(ids[i] + ',' + str(results[i][0]) + os.linesep)

else:

    # get data
    print("preparing for data")
    start = time.time()

    #xTrain, yTrain, xTest, yTest = dp.getData(useDb = True)
    xTrain, yTrain, xTest, yTest = dp.getData(useDb = False)

    print("preparing data cost %s seconds" % (time.time() - start))

    # model creator(can choose any object extend from BaseModel)
    # mc = cm.OneLayerModelCreator()
    # mc = cm.TwoLayerModelCreator()
    # mc = cm.ThreeLayerModelCreator()
    # mc = cm.ThreeLayerModelCreator_16_8_1()
    # mc = cm.ThreeLayerModelCreator_DropLayer_Norm_32_8_1()
    #mc = cm.ThreeLayerModelCreator_DropLayer_Norm_64_32_8_1()
    #mc = cm.DynamicLayerModelCreator(layerCount=10, nx=64, dropout=0.1)
    mc = cm.DynamicLayerModelCreator(layerCount=2, nx=64, dropout=0.2)
    runModel(mc, xTrain, yTrain, xTest, yTest)

    mc = cm.DynamicLayerModelCreator(layerCount=5, nx=64, dropout=0.2)
    runModel(mc, xTrain, yTrain, xTest, yTest)

    mc = cm.DynamicLayerModelCreator(layerCount=10, nx=64, dropout=0.2)
    runModel(mc, xTrain, yTrain, xTest, yTest)


