import os
import shutil
import sys
import importlib
import Utils.get_data
from sklearn.metrics.ranking import roc_auc_score
from keras.utils import np_utils
import numpy as np
import pandas as pd
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten, Reshape, Layer
from keras.layers.convolutional import Convolution2D, MaxPooling2D, UpSampling2D
from keras import backend as K
from Tested_models.sigle_layer_autoencoder.custom import Dense_dec

EXPERIMENTS_DIR = 'Tested_models'
MODEL = 'model.py'
CUSTOM = 'custom.py'
PREPROCESS = 'preprocess.py'
POSTPROCESS = 'postprocess.py'
TRAINING = 'training.py'
META_DIR = 'metadata'
MODEL_JSON = 'model.json'
TRAINING_JSON = 'training.json'
MODEL_W = 'weights.hdf5'
INIT = '__init__.py'

def import_module(experiment_name, file_name):
    return importlib.import_module(experiment_name.replace('/', '.') + '.' + file_name.replace('.py', ''))

def create(experiment_name):
    experiment_path = os.path.join(EXPERIMENTS_DIR,experiment_name).replace('\\','/')
    os.mkdir(experiment_path)
    py_files = [MODEL,CUSTOM,PREPROCESS,POSTPROCESS,TRAINING]
    f = open(experiment_path + '/' + INIT, 'wb')
    f.close()
    for file in py_files:
        shutil.copyfile('model_templet/'+file,experiment_path + '/' + file)
    os.mkdir(experiment_path + '/' + 'metadata')
    print 'Experiment \'{0}\' created'.format(experiment_name)

def run(experiment_name):
    print 'Running experiment \'{0}\''.format(experiment_name)
    experiment_path = os.path.join(EXPERIMENTS_DIR,experiment_name).replace('\\','/')
    experiment_metadata_path = experiment_path + '/' + 'metadata'

    print 'compiling model'
    model_modual = import_module(experiment_path,MODEL)
    training_modual = import_module(experiment_path,TRAINING)
    model = model_modual.get_model()
    training_metadata = training_modual.get_training_metadata()
    model.compile(optimizer=training_metadata.optimizer, loss=training_metadata.loss)
    if training_metadata.mode == 'autoencoder':
        model_autoencode = model
    print 'loading data'
    train_X,train_y = Utils.get_data.get_supervised_data('C:\DC_data')

    print 'preprocessing data pre training'
    preprocess_modual = import_module(experiment_path,PREPROCESS)
    train_X,train_y = preprocess_modual.pre_train(train_X,train_y)

    print 'split training set to train and val'
    split_ratio = 0.1
    a = 1 - split_ratio
    nb_samples = len(train_X)
    train_X_ = train_X[0:nb_samples*a,:]
    val_X = train_X[nb_samples*a:,:]
    train_y_ = train_y[0:nb_samples*a]
    val_y = train_y[nb_samples*a:]

    if training_metadata.mode == 'classify' or training_metadata.mode == 'autoencoder' :
        train_y_ = np_utils.to_categorical(train_y_,2)
        val_y = np_utils.to_categorical(val_y,2)
    print 'training model'
    for i in range(training_metadata.nb_iter):
        print '-' * 50
        print('Iteration {0}'.format(i))
        print '-' * 50

        print 'pre-processing per iteration'
        train_X_i = preprocess_modual.per_iter(train_X_)
        val_X_i = preprocess_modual.per_iter(val_X)

        print 'fitting model'
        val_auc_min = 1.
        for e in range(training_metadata.nb_epoch):
            print '-' * 50
            print('Epoch {0}'.format(e))
            print '-' * 50
            train_X_e = preprocess_modual.per_epoch(train_X_i)
            val_X_e = preprocess_modual.per_epoch(val_X_i)
            if training_metadata.mode == 'autoencoder':
                model_autoencode.fit(train_X_e, train_X_e, shuffle=training_metadata.shuffle, verbose=1,
                                                     validation_data = (val_X_e,val_X_e),
                                                     batch_size=training_metadata.batch_size,
                                                     nb_epoch=1)
                model_autoencode.save_weights(experiment_path + '/metadata/autoencoder_W',overwrite=True)

                model_dec = Sequential()
                model_dec.add(Dense_dec(500,input_shape=(1138,),activation='sigmoid',weights_path=''))
                dec_X_train = K.eval(model_dec(K.variable(train_X_e)))
                dec_X_val = K.eval(model_dec(K.variable(val_X_e)))
                #model_dec.add(Dropout(0.25))
                #model_dec.add(Dense(1138,activation='linear'))
                #model_dec.load_weights(experiment_path + '/metadata/autoencoder_W')

                model = model_modual.get_classify_model()
                model.compile(optimizer=training_metadata.optimizer, loss='categorical_crossentropy')
                print 'fitting classify model'
                history_ =  model.fit(dec_X_train, train_y_, shuffle=training_metadata.shuffle, verbose=1,
                                                     validation_data = (dec_X_val,val_y),
                                                     batch_size=training_metadata.batch_size,
                                                     nb_epoch=1,class_weight = training_metadata.class_weights)
                print 'evaluating AUC'
                train_predict = model.predict(dec_X_train,batch_size=training_metadata.batch_size, verbose=0)
                val_predict = model.predict(dec_X_val,batch_size=training_metadata.batch_size, verbose=0)
                train_auc = roc_auc_score(train_y_,train_predict)
                val_auc = roc_auc_score(val_y,val_predict)
                print 'train_AUC: ' + str(train_auc) + ' | ' + 'val_AUC: ' + str(val_auc)
            else:
                history_ =  model.fit(train_X_e, train_y_, shuffle=training_metadata.shuffle, verbose=1,
                                                     validation_data = (val_X_e,val_y),
                                                     batch_size=training_metadata.batch_size,
                                                     nb_epoch=1,class_weight = training_metadata.class_weights)
            #train_loss = history_.history['loss'][-1]
            #val_loss = history_.history['val_loss'][-1]

                print 'evaluating AUC'
                train_predict = model.predict(train_X_e,batch_size=training_metadata.batch_size, verbose=0)
                val_predict = model.predict(val_X_e,batch_size=training_metadata.batch_size, verbose=0)
                train_auc = roc_auc_score(train_y_,train_predict)
                val_auc = roc_auc_score(val_y,val_predict)
                print 'train_AUC: ' + str(train_auc) + ' | ' + 'val_AUC: ' + str(val_auc)

            print 'Updating performance log...'
            training_metadata.update_performance(history_.history,[train_auc.tolist()], [val_auc.tolist()])
            training_metadata.save_to_json(experiment_path + '/' + META_DIR + '/' + TRAINING_JSON)

            if val_auc < val_auc_min:
                print 'saving weights'
                val_auc_min = val_auc
                model.save_weights(experiment_path + '/' + META_DIR + '/best_auc_' + MODEL_W, overwrite=True)

def submit(experiment_name):
    print('Creating submission for experiment \'{0}\''.format(experiment_name))
    experiment_path = os.path.join(EXPERIMENTS_DIR, experiment_name).replace('\\', '/')

    print('Loading models...')
    model_module = import_module(experiment_path, MODEL)
    training_module = import_module(experiment_path, TRAINING)
    training_metadata = training_module.get_training_metadata()
    model = model_module.get_model()

    print 'loading weights'
    model.load_weights(experiment_path + '/' + META_DIR + '/best_auc_' + MODEL_W)
    print 'Compiling models...'
    model.compile(optimizer=training_metadata.optimizer, loss=training_metadata.loss)

    # import pre-process module
    preprocess_module = import_module(experiment_path, PREPROCESS)

    # import post-process module
    postprocess_module = import_module(experiment_path, POSTPROCESS)

    print 'loading test data'
    test_data = Utils.get_data.get_submit_raw_data('C:\DC_data')
    test_X = test_data[:,1:]
    test_uid = np.asarray(test_data[:,0],dtype=np.int32)

    print 'predicting test data'
    predict = model.predict(test_X)

    print 'saving submit csv file'
    test_result = pd.DataFrame(columns=['uid','score'])
    test_result.uid = test_uid # need to be 1-dim np array
    test_result.score = predict # need to be 1-dim np array
    file_path = experiment_path + '/' + 'metadata/' + 'submit.csv'
    test_result.to_csv(file_path,index=None,encoding='utf-8')

if __name__ == '__main__':
    if len(sys.argv) > 1:
        assert len(sys.argv) >= 3
        if sys.argv[1] == 'create':
            create(sys.argv[2])
        if sys.argv[1] == 'run':
            run(sys.argv[2])
        if sys.argv[1] == 'submit':
            submit(sys.argv[2])