# -*- coding: utf-8 -*-
"""
Created on Sun Sep  2 08:45:05 2018

@author: Administrator
"""

import numpy as np
import tensorflow as tf
import random as rn

import os
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(42)
rn.seed(12345)
session_conf = tf.ConfigProto(device_count={"CPU": 4},intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)

from keras import backend as K


tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)



import pandas as pd

import pickle
from sklearn.model_selection import PredefinedSplit

from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation

from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping
from keras import regularizers
from keras import initializers
from sklearn.metrics import mean_squared_error
Path = 'D:\\APViaML'
from keras.callbacks import ModelCheckpoint
from keras.models import load_model

from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
pd.set_option('display.max_columns', 50)
pd.set_option('display.max_rows', 100)
pd.set_option('display.float_format', lambda x: '%.3f' % x)


def get_demo_dict_data():
    file = open(Path + '\\data\\alldata_demo_top1000.pkl','rb')
    raw_data = pickle.load(file)
    file.close()
    return raw_data

data = get_demo_dict_data()

top_1000_data_X = data['X']
top_1000_data_Y = data['Y']
del data

def creat_data(num,df_X=top_1000_data_X,df_Y=top_1000_data_Y):
    '''
    Data providing function:

    This function is separated from model() so that hyperopt
    won't reload data for each evaluation run.
    '''
    traindata_startyear_str = str(1958) 
    traindata_endyear_str = str(num + 1987) 
    vdata_startyear_str = str(num + 1976) 
    vdata_endyear_str = str(num + 1987) 
    testdata_startyear_str = str(num + 1988) 
  
    X_traindata =  np.array(df_X.loc[traindata_startyear_str:traindata_endyear_str])
    Y_traindata = np.array(df_Y.loc[traindata_startyear_str:traindata_endyear_str])
    X_vdata = np.array(df_X.loc[vdata_startyear_str:vdata_endyear_str])
    Y_vdata = np.array(df_Y.loc[vdata_startyear_str:vdata_endyear_str])
    X_testdata = np.array(df_X.loc[testdata_startyear_str])
    Y_testdata = np.array(df_Y.loc[testdata_startyear_str])
        
    return X_traindata, Y_traindata, X_vdata, Y_vdata, X_testdata, Y_testdata


def Evaluation_fun(predict_array,real_array):
    List1 = []
    List2 = []
    if len(predict_array) != len(real_array):
        print('Something is worng!')
    else:
        for i in range(len(predict_array)):
            List1.append(np.square(predict_array[i]-real_array[i]))
            List2.append(np.square(real_array[i]))
        result = round(100*(1 - sum(List1)/sum(List2)),3)
    return result

#define search space
space = {'ll_float':hp.uniform('ll_float',0.01,0.2),
         'lr': hp.loguniform('lr',np.log(0.005),np.log(0.2)),
         'beta_1_float':hp.uniform('beta_1_float',0.8,0.95),
         'beta_2_float':hp.uniform('beta_2_float',0.98,0.9999),
         'epsilon_float':hp.uniform('epsilon_float',1e-09,1e-07), ##note
         'batch_size': hp.quniform('batch_size',10,500,1),
         'epochs': hp.quniform('epochs',20,50,1)
         }
    
## set params random search time,when set 50,something will be wrong
try_num1 = int(50)




i=10
print(i)
    #split data
X_traindata, Y_traindata, X_vdata, Y_vdata, X_testdata, Y_testdata = creat_data(num=i)

#define NN1
def f_NN1(params):
    ## define params
    ll_float= params["ll_float"]#0.1
    learn_rate_float= params["lr"] #0.01
    beta_1_float= params["beta_1_float"] # 0.9
    beta_2_float= params["beta_2_float"] #0.999
    epsilon_float= params["epsilon_float"] #1e-08
    batch_size_num = params['batch_size'] #
    epochs_num = params['epochs'] #50

    ## model structure
    model_NN1 = Sequential()
    init = initializers.he_normal(seed=100)
    model_NN1.add(Dense(32, input_dim =len(X_traindata[0]),
                        kernel_initializer=init ,
                        kernel_regularizer=regularizers.l1(ll_float)))
    model_NN1.add(Activation("relu"))
    model_NN1.add(BatchNormalization())
    model_NN1.add(Dense(1))
    
    ## comile model
    adam=Adam(lr=learn_rate_float, beta_1=beta_1_float, beta_2=beta_2_float, epsilon=epsilon_float)
    model_NN1.compile(loss='mse', optimizer=adam,metrics=['mse'])
    
    ## callback fun
    early_stopping = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=2, verbose=0, mode='auto')
    model_filepath = Path + '\\model\\NN1\\temp\\best_weights.h5'
    checkpoint = ModelCheckpoint(filepath=model_filepath                               ,save_weights_only=False,monitor='val_loss',mode='min' ,save_best_only='True')
    callback_lists = [early_stopping,checkpoint]
    
    ## fit model
    model_NN1.fit(X_traindata, Y_traindata,
              batch_size = int(batch_size_num) ,
              epochs = int(epochs_num),
              verbose = 0,
              validation_data=(X_vdata, Y_vdata),
              callbacks=callback_lists ,
              shuffle=False)

    ##get the best model
    best_model = load_model(model_filepath)
    # validate model
    Y_pre_v = best_model.predict(X_vdata,verbose = 0)

    Y_pre_vlist=[]
    for x in Y_pre_v[:,0]:
        Y_pre_vlist.append(x)

    v_score = Evaluation_fun(Y_pre_vlist, Y_vdata)

    ## prediction & save
    Y_pre =best_model.predict(X_testdata,verbose = 1)
    
    Y_pre_list=[]
    for x in Y_pre[:,0]:
        Y_pre_list.append(x)
    test_score = Evaluation_fun(Y_pre_list, Y_testdata)
   # print('Preformance:',v_score)
    return {'loss': -v_score , 'status': STATUS_OK, 
            'y_pre_list':Y_pre_list,'test_score':test_score,
            'models':best_model}

trials = Trials()
fmin(f_NN1, space, algo=tpe.suggest, max_evals=try_num1, trials=trials)

loss_list = trials.losses()
min_loss = min(loss_list)
for k in range(try_num1):
    if min_loss == loss_list[k]:
        key = k
best_results = trials.results[key]


final_model =  best_results['models']
final_model.save(Path + '\\model\\NN1\\'+ str(i+1988)+'_Model_NN1_Top1000_Prediction.h5')