# -*- coding: utf-8 -*-
"""
Created on Wed Aug 29 19:53:53 2018

@author: Administrator
"""

# -*- coding: utf-8 -*-
"""
Created on Wed Aug 29 14:26:55 2018

@author: Administrator
"""

import numpy as np
import tensorflow as tf
import random as rn

import os
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(42)
rn.seed(12345)
from keras import backend as K
 
K.clear_session()
tf.reset_default_graph()
tf.set_random_seed(1234)

import pandas as pd
import datetime
import pickle
from sklearn.model_selection import PredefinedSplit

from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation

from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping
from keras import regularizers
from keras import initializers
from sklearn.metrics import mean_squared_error
Path = 'D:\\APViaML'
from keras.callbacks import ModelCheckpoint
from keras.models import load_model

from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
pd.set_option('display.max_columns', 50)
pd.set_option('display.max_rows', 100)
pd.set_option('display.float_format', lambda x: '%.3f' % x)


def get_demo_dict_data():
    file = open(Path + '\\data\\alldata_demo_top1000.pkl','rb')
    raw_data = pickle.load(file)
    file.close()
    return raw_data

data = get_demo_dict_data()

top_1000_data_X = data['X']
top_1000_data_Y = data['Y']

def creat_data(num,df_X=top_1000_data_X,df_Y=top_1000_data_Y):
    '''
    Data providing function:

    This function is separated from model() so that hyperopt
    won't reload data for each evaluation run.
    '''
    traindata_startyear_str = str(1958) 
    traindata_endyear_str = str(num + 1987) 
    vdata_startyear_str = str(num + 1976) 
    vdata_endyear_str = str(num + 1987) 
    testdata_startyear_str = str(num + 1988) 
  
    X_traindata =  np.array(df_X.loc[traindata_startyear_str:traindata_endyear_str])
    Y_traindata = np.array(df_Y.loc[traindata_startyear_str:traindata_endyear_str])
    X_vdata = np.array(df_X.loc[vdata_startyear_str:vdata_endyear_str])
    Y_vdata = np.array(df_Y.loc[vdata_startyear_str:vdata_endyear_str])
    X_testdata = np.array(df_X.loc[testdata_startyear_str])
    Y_testdata = np.array(df_Y.loc[testdata_startyear_str])
        
    return X_traindata, Y_traindata, X_vdata, Y_vdata, X_testdata, Y_testdata


def Evaluation_fun(predict_array,real_array):
    List1 = []
    List2 = []
    if len(predict_array) != len(real_array):
        print('Something is worng!')
    else:
        for i in range(len(predict_array)):
            List1.append(np.square(predict_array[i]-real_array[i]))
            List2.append(np.square(real_array[i]))
        result = round(100*(1 - sum(List1)/sum(List2)),3)
    return result

#define search space
space = {'ll_float':hp.uniform('ll_float',0.01,0.2),
         'lr': hp.loguniform('lr',np.log(0.005),np.log(0.2)),
         'beta_1_float':hp.uniform('beta_1_float',0.8,0.95),
         'beta_2_float':hp.uniform('beta_2_float',0.98,0.9999),
         'epsilon_float':hp.uniform('epsilon_float',1e-09,1e-07), ##note
         'batch_size': hp.quniform('batch_size',10,500,1),
         'epochs': hp.quniform('epochs',20,50,1)
         }
    
## set params random search time,when set 50,something will be wrong
try_num1 = int(50)
Y_pre_list_final= []
test_performance_score_list = []

for i in range(30):
    print(i)
    starttime = datetime.datetime.now()
    #split data
    X_traindata, Y_traindata, X_vdata, Y_vdata, X_testdata, Y_testdata = creat_data(num=i)

    #define NN2
    def f_NN2(params):
        ## define params
        ll_float= params["ll_float"]#0.1
        learn_rate_float= params["lr"] #0.01
        beta_1_float= params["beta_1_float"] # 0.9
        beta_2_float= params["beta_2_float"] #0.999
        epsilon_float= params["epsilon_float"] #1e-08
        batch_size_num = params['batch_size'] #
        epochs_num = params['epochs'] #50
        
        ## model structure
        model_NN2 = Sequential()
        init = initializers.he_normal(seed=100)
        #init = initializers.RandomNormal(mean=0.0, stddev=0.05, seed=None)
        model_NN2.add(Dense(32, input_dim =len(X_traindata[0]),
                            kernel_initializer=init ,
                            kernel_regularizer=regularizers.l1(ll_float)))
        model_NN2.add(Activation("relu"))
        model_NN2.add(BatchNormalization())
        model_NN2.add(Dense(16, 
                            kernel_initializer=init ,
                            kernel_regularizer=regularizers.l1(ll_float)))
        model_NN2.add(Activation("relu"))
        model_NN2.add(BatchNormalization())
        model_NN2.add(Dense(1))
        
        ## comile model
        adam=Adam(lr=learn_rate_float, beta_1=beta_1_float, beta_2=beta_2_float, epsilon=epsilon_float)
        model_NN2.compile(loss='mse', optimizer=adam,metrics=['mse'])
        
        ## callback fun
        early_stopping = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=2, verbose=0, mode='auto')
        model_filepath = Path + '\\model\\NN2\\temp\\best_weights.h5'
        checkpoint = ModelCheckpoint(filepath=model_filepath,save_weights_only=False,monitor='val_loss',mode='min' ,save_best_only='True')
        callback_lists = [early_stopping,checkpoint]
        
        ## fit model
        model_NN2.fit(X_traindata, Y_traindata,
                  batch_size = int(batch_size_num) ,
                  epochs = int(epochs_num),
                  verbose = 0,
                  validation_data=(X_vdata, Y_vdata),
                  callbacks=callback_lists ,
                  shuffle=False)
    
        ##get the best model
        best_model = load_model(model_filepath)
        # validate model
        Y_pre_v = best_model.predict(X_vdata,verbose = 0)
    
        Y_pre_vlist=[]
        for x in Y_pre_v[:,0]:
            Y_pre_vlist.append(x)
    
        v_score = Evaluation_fun(Y_pre_vlist, Y_vdata)
    
        ## prediction & save
        Y_pre =best_model.predict(X_testdata,verbose = 0)
        
        Y_pre_list=[]
        for x in Y_pre[:,0]:
            Y_pre_list.append(x)
        test_score = Evaluation_fun(Y_pre_list, Y_testdata)
       # print('Preformance:',v_score)
        return {'loss': -v_score , 'status': STATUS_OK, 
                'y_pre_list':Y_pre_list,'test_score':test_score,
                'models':best_model}

    trials = Trials()
    fmin(f_NN2, space, algo=tpe.suggest, max_evals=try_num1, trials=trials)

    loss_list = trials.losses()
    min_loss = min(loss_list)
    for k in range(try_num1):
        if min_loss == loss_list[k]:
            key = k
    best_results = trials.results[key]
    
    Y_pre_list_final= Y_pre_list_final + best_results['y_pre_list']
    
    test_performance_score_list.append(best_results['test_score'])
    
    final_model =  best_results['models']
    final_model.save(Path + '\\model\\NN2\\'+ str(i+1988)+'_Model_NN2_Top1000_Prediction.h5')

    K.clear_session()
    tf.reset_default_graph()
    endtime = datetime.datetime.now()
    print ('time_cost',(endtime - starttime).seconds)
    
# save out my result
    
print('Model Performance by Average:',np.mean(test_performance_score_list))

y_real = np.array(top_1000_data_Y.loc['1988':])
print('Model Performance:',Evaluation_fun(Y_pre_list_final, y_real))

file = open(Path + '\\output\\data\\Model_NN2_Top1000_Prediction.pkl', 'wb')
pickle.dump(Y_pre_list_final, file)
file.close()




    
    