#!/usr/bin/python
# -*-coding:utf-8-*-
import os
import pandas as pd
import numpy as np
import graphviz
import pickle
import gc
from datetime import datetime

from dir_info import logs_dir as logs_data_dir
from dir_info import records_dir
from dir_info import key_interaction_data_dir
from dir_info import existing_factor_record_dir

from base.setlogger import Logger

from base.utils import delete_file_list
from base.utils import save_txt_file
from base.utils import loadFile

# from gplearn_evolve.functions import make_function
from gplearn_evolve.fitness import make_fitness
from gplearn_evolve.genetic import SymbolicRegressor

import warnings
warnings.filterwarnings("ignore")

from base.GPModelSave import model_predict
from base.GPModelSave import save_gpmodel_v2

from base.GPFitness import def_keyfit_metric

from base.GPFunctionSelect import get_gp_functions
from base.XSelect import selectX

from base.utils import get_previous_trade_date
from base.GPPopulationFilter import filter_population_v1
# from base.config import self_defined_func_dict
from gplearn_evolve.base.config import self_defined_seq_func_dict

from base.KeyTransform import stock_code_map
from base.KeyTransform import trade_date_map
from existing_gp_factor_management.get_existing_gp_mining_factor_list import main as get_existing_factor_list

from factor_selection.gp_factor_selection_v1 import gp_factor_selection_v1
# from factor_selection.gp_factor_selection_v1 import get_gp_factor
from factor_selection.get_gp_factor import get_gp_factor

### 底层读取数据的依赖（不提供）
from zbc_factor_lib.base.factors_library_base import NewRQFactorLib as DataReader

# TODO - 固定的参数
ic_threshold = 0.1

label_data_dir = './zbc_gplearn_factor_mining/label_data'
label_data_filename = 'label_data'

test_data_dir = './zbc_gplearn_factor_mining/processed_X_data'
test_data_filename = 'processed_X_user_behavior_data_v1'

test_y_data_dir = './zbc_gplearn_factor_mining/y_label_data'
test_y_data_filename = 'y_10day_return_label'
# test_y_data_filename = 'y_5day_return_label'
# test_y_data_filename = 'y_3day_return_label'

# TODO - 保存模型结果
save_model_dir = './zbc_gplearn_factor_mining/model/ths_user_behavior_factors'

if not os.path.exists(save_model_dir):
    os.makedirs(save_model_dir)

save_factor_db = 'gp_factors'
save_bfactor_db = 'gp_factors/barra_stripping'

##
# TODO - 设置log
factors_label = 'zbc_ths_user_hehavior_factor_mining_v2'

sub_records_dir = os.path.join(records_dir, factors_label)

if not os.path.exists(sub_records_dir):
    os.makedirs(sub_records_dir)

# log_date_time = datetime.now() - pd.Timedelta(days=1)
log_date_time = datetime.now()
log_date_time = log_date_time.strftime('%Y%m%d_%H')

gp_logger = Logger(os.path.join(logs_data_dir,
                                factors_label + '_log_%s.log' % log_date_time),
                   level='info')

#### TODO - 读取数据
def load_data_v1(test_shift_days=60,
                 select_all=False,
                 selected_number=None,
                 selected_pct=None,
                 interaction_label=None,
                 **kwargs):
    train_start_date = kwargs['train_start_date']
    train_end_date = kwargs['train_end_date']

    val_start_date_1 = kwargs['val_start_date_1']
    val_end_date_1 = kwargs['val_end_date_1']

    val_start_date_2 = kwargs['val_start_date_2']
    val_end_date_2 = kwargs['val_end_date_2']

    test_start_date_1 = kwargs['test_start_date_1']
    test_end_date_1 = kwargs['test_end_date_1']

    test_start_date_2 = kwargs['test_start_date_2']
    test_end_date_2 = kwargs['test_end_date_2']

    # TODO - 读取数据
    # TODO - 测试
    test_X_data = pd.read_hdf(os.path.join(test_data_dir, test_data_filename + '.h5'))
    test_y_data = pd.read_hdf(os.path.join(test_y_data_dir, test_y_data_filename+'.h5'))

    # TODO - 随机选择X
    X_candidates = test_X_data.columns.tolist()

    if selected_pct is None:
        selected_pct = 0.5

    if not select_all:
        if selected_number is None:
            selected_number = int(len(X_candidates) * selected_pct)

        selected_X_list = selectX(X_candidates, selected_number=selected_number)
    else:
        selected_X_list = X_candidates

    test_X_data = test_X_data[selected_X_list]

    test_X_data['y'] = test_y_data.loc[test_X_data.index]

    test_X_data = test_X_data.reset_index()

    # TODO - process data
    # TODO - get train data
    X_train = test_X_data[(test_X_data['date'] >= train_start_date) &
                          (test_X_data['date'] <= train_end_date)].copy()

    X_train = X_train.set_index(['date', 'stock_code'])

    # TODO - get validation data
    shift_val_start_date_1 = get_previous_trade_date(val_start_date_1, test_shift_days)

    X_validation_1 = test_X_data[(test_X_data['date'] >= shift_val_start_date_1) &
                                 (test_X_data['date'] <= val_end_date_1)].copy()

    shift_val_start_date_2 = get_previous_trade_date(val_start_date_2, test_shift_days)

    X_validation_2 = test_X_data[(test_X_data['date'] >= shift_val_start_date_2) &
                                 (test_X_data['date'] <= val_end_date_2)].copy()

    # TODO - get test data
    X_test_1 = test_X_data[(test_X_data['date'] >= test_start_date_1) &
                          (test_X_data['date'] <= test_end_date_1)].copy()

    shift_test_start_date_2 = get_previous_trade_date(test_start_date_2, test_shift_days)

    X_test_2 = test_X_data[(test_X_data['date'] >= shift_test_start_date_2) &
                          (test_X_data['date'] <= test_end_date_2)].copy()

    # TODO - Drop nan
    print('train data - before drop y nan shape is', X_train.shape)
    X_train = X_train[~X_train['y'].isnull()]
    print('train data - after drop y nan shape is', X_train.shape)

    print('validation data 1 - before drop y nan shape is', X_test_1.shape)
    X_validation_1 = X_validation_1[~X_validation_1['y'].isnull()]
    print('validation data 1 - after drop y nan shape is', X_test_1.shape)

    print('validation data 1 - before drop y nan shape is', X_test_1.shape)
    X_validation_2 = X_validation_2[~X_validation_2['y'].isnull()]
    print('validation data 1 - after drop y nan shape is', X_test_1.shape)

    print('test data 1 - before drop y nan shape is', X_test_1.shape)
    X_test_1 = X_test_1[~X_test_1['y'].isnull()]
    print('test data 1 - after drop y nan shape is', X_test_1.shape)

    print('test data 2 - before drop y nan shape is', X_test_2.shape)
    X_test_2 = X_test_2[~X_test_2['y'].isnull()]
    print('test data 2 - after drop y nan shape is', X_test_2.shape)

    # TODO - Map Keys
    test_X_data = trade_date_map(test_X_data, key='date', reverse=False)
    test_X_data = stock_code_map(test_X_data, key='stock_code', reverse=False)

    # train
    X_train = X_train.reset_index()

    X_train = trade_date_map(X_train, key='date', reverse=False)
    X_train = stock_code_map(X_train, key='stock_code', reverse=False)

    X_train = X_train.set_index(['date', 'stock_code'])

    y_train = X_train['y'].copy()
    X_train = X_train.drop('y', axis=1)

    # validation 1
    X_validation_1 = X_validation_1.reset_index()

    X_validation_1 = trade_date_map(X_validation_1, key='date', reverse=False)
    X_validation_1 = stock_code_map(X_validation_1, key='stock_code', reverse=False)

    X_validation_1 = X_validation_1.set_index(['date', 'stock_code'])

    y_validation_1 = X_validation_1['y'].copy()
    X_validation_1 = X_validation_1.drop('y', axis=1)

    # validation 2
    X_validation_2 = X_validation_2.reset_index()

    X_validation_2 = trade_date_map(X_validation_2, key='date', reverse=False)
    X_validation_2 = stock_code_map(X_validation_2, key='stock_code', reverse=False)

    X_validation_2 = X_validation_2.set_index(['date', 'stock_code'])

    y_validation_2 = X_validation_2['y'].copy()
    X_validation_2 = X_validation_2.drop('y', axis=1)

    # test 1
    X_test_1 = X_test_1.reset_index()

    X_test_1 = trade_date_map(X_test_1, key='date', reverse=False)
    X_test_1 = stock_code_map(X_test_1, key='stock_code', reverse=False)

    X_test_1 = X_test_1.set_index(['date', 'stock_code'])

    y_test_1 = X_test_1['y'].copy()
    X_test_1 = X_test_1.drop('y', axis=1)

    # test 2
    X_test_2 = X_test_2.reset_index()

    X_test_2 = trade_date_map(X_test_2, key='date', reverse=False)
    X_test_2 = stock_code_map(X_test_2, key='stock_code', reverse=False)

    X_test_2 = X_test_2.set_index(['date', 'stock_code'])

    y_test_2 = X_test_2['y'].copy()
    X_test_2 = X_test_2.drop('y', axis=1)

    #############################################
    # TODO - get keys
    concat_label_data = pd.read_hdf(os.path.join(label_data_dir, label_data_filename+'.h5'))

    # TODO - save keys
    if interaction_label is None:
        interaction_label = 'ths_user_behavior_v1'

    interact_key_filename = interaction_label + '%s' % (pd.datetime.now().strftime('%Y%m%d%H%M%S'))

    # whole
    test_X_data = test_X_data.set_index(['date', 'stock_code'])

    whole_neu_keys = concat_label_data.loc[test_X_data.index, ['ci1_code', 'cap']].copy()
    whole_metric_features_keys = test_X_data.index

    # train
    train_neu_keys = concat_label_data.loc[X_train.index, ['ci1_code', 'cap']].copy()
    train_metric_features_keys = X_train.index

    train_neu_keys = np.array(train_neu_keys.reset_index())
    train_metric_features_keys = np.array(pd.DataFrame(index=train_metric_features_keys).reset_index())

    train_neu_keys_path = os.path.join(key_interaction_data_dir, interact_key_filename+'_train_neu_keys.npy')
    train_metric_features_keys_path = os.path.join(key_interaction_data_dir, interact_key_filename+'_train_keys.npy')

    np.save(train_neu_keys_path, train_neu_keys)
    np.save(train_metric_features_keys_path, train_metric_features_keys)

    # validation 1
    validation1_neu_keys = concat_label_data.loc[X_validation_1.index, ['ci1_code', 'cap']].copy()
    validation1_metric_features_keys = X_validation_1.index

    validation1_neu_keys = np.array(validation1_neu_keys.reset_index())
    validation1_metric_features_keys = np.array(pd.DataFrame(index=validation1_metric_features_keys).reset_index())

    validation1_neu_keys_path = os.path.join(key_interaction_data_dir, interact_key_filename+'_val1_neu_keys.npy')
    validation1_metric_features_keys_path = os.path.join(key_interaction_data_dir, interact_key_filename+'_val1_keys.npy')

    np.save(validation1_neu_keys_path, validation1_neu_keys)
    np.save(validation1_metric_features_keys_path, validation1_metric_features_keys)

    # validation 2
    validation2_neu_keys = concat_label_data.loc[X_validation_2.index, ['ci1_code', 'cap']].copy()
    validation2_metric_features_keys = X_validation_2.index

    validation2_neu_keys = np.array(validation2_neu_keys.reset_index())
    validation2_metric_features_keys = np.array(pd.DataFrame(index=validation2_metric_features_keys).reset_index())

    validation2_neu_keys_path = os.path.join(key_interaction_data_dir, interact_key_filename+'_val2_neu_keys.npy')
    validation2_metric_features_keys_path = os.path.join(key_interaction_data_dir, interact_key_filename+'_val2_keys.npy')

    np.save(validation2_neu_keys_path, validation2_neu_keys)
    np.save(validation2_metric_features_keys_path, validation2_metric_features_keys)

    # test 1
    test1_neu_keys = concat_label_data.loc[X_test_1.index, ['ci1_code', 'cap']].copy()
    test1_metric_features_keys = X_test_1.index

    test1_neu_keys = np.array(test1_neu_keys.reset_index())
    test1_metric_features_keys = np.array(pd.DataFrame(index=test1_metric_features_keys).reset_index())

    test1_neu_keys_path = os.path.join(key_interaction_data_dir, interact_key_filename+'_test1_neu_keys.npy')
    test1_metric_features_keys_path = os.path.join(key_interaction_data_dir, interact_key_filename+'_test1_keys.npy')

    np.save(test1_neu_keys_path, test1_neu_keys)
    np.save(test1_metric_features_keys_path, test1_metric_features_keys)

    # test 2
    test2_neu_keys = concat_label_data.loc[X_test_2.index, ['ci1_code', 'cap']].copy()
    test2_metric_features_keys = X_test_2.index

    test2_neu_keys = np.array(test2_neu_keys.reset_index())
    test2_metric_features_keys = np.array(pd.DataFrame(index=test2_metric_features_keys).reset_index())

    test2_neu_keys_path = os.path.join(key_interaction_data_dir, interact_key_filename+'_test2_neu_keys.npy')
    test2_metric_features_keys_path = os.path.join(key_interaction_data_dir, interact_key_filename+'_test2_keys.npy')

    np.save(test2_neu_keys_path, test2_neu_keys)
    np.save(test2_metric_features_keys_path, test2_metric_features_keys)

    del concat_label_data
    # del test_X_data
    del test_y_data
    gc.collect()

    whole_data = [test_X_data, whole_neu_keys, whole_metric_features_keys]
    train_data = [X_train, y_train, train_neu_keys_path, train_metric_features_keys_path]

    validation1_data = [X_validation_1, y_validation_1, validation1_neu_keys_path, validation1_metric_features_keys_path]
    validation2_data = [X_validation_2, y_validation_2, validation2_neu_keys_path, validation2_metric_features_keys_path]

    test1_data = [X_test_1, y_test_1, test1_neu_keys_path, test1_metric_features_keys_path]
    test2_data = [X_test_2, y_test_2, test2_neu_keys_path, test2_metric_features_keys_path]

    return train_data, \
            validation1_data, \
            validation2_data, \
            test1_data, \
            test2_data, \
            whole_data, \
            selected_X_list

def load_data_v2(test_shift_days=60,
                 select_all=False,
                 selected_number=None,
                 selected_pct=None,
                 interaction_label=None,
                 **kwargs):
    train_start_date = kwargs['train_start_date']
    train_end_date = kwargs['train_end_date']

    val_start_date = kwargs['val_start_date']
    val_end_dat = kwargs['val_end_date']

    test_start_date_1 = kwargs['test_start_date_1']
    test_end_date_1 = kwargs['test_end_date_1']

    test_start_date_2 = kwargs['test_start_date_2']
    test_end_date_2 = kwargs['test_end_date_2']

    # TODO - 读取数据
    # TODO - 测试
    test_X_data = pd.read_hdf(os.path.join(test_data_dir, test_data_filename + '.h5'))
    test_y_data = pd.read_hdf(os.path.join(test_y_data_dir, test_y_data_filename+'.h5'))

    # TODO - 随机选择X
    X_candidates = test_X_data.columns.tolist()

    if selected_pct is None:
        # selected_pct = 0.25
        selected_pct = 0.5

    if not select_all:
        if selected_number is None:
            selected_number = int(len(X_candidates) * selected_pct)

        selected_X_list = selectX(X_candidates, selected_number=selected_number)
    else:
        selected_X_list = X_candidates

    test_X_data = test_X_data[selected_X_list]

    test_X_data['y'] = test_y_data.loc[test_X_data.index]

    test_X_data = test_X_data.reset_index()

    # TODO - process data
    # TODO - get train data
    X_train = test_X_data[(test_X_data['date'] >= train_start_date) &
                          (test_X_data['date'] <= train_end_date)].copy()

    X_train = X_train.set_index(['date', 'stock_code'])

    # TODO - get validation data
    shift_val_start_date_1 = get_previous_trade_date(val_start_date, test_shift_days)

    X_validation = test_X_data[(test_X_data['date'] >= shift_val_start_date_1) &
                                 (test_X_data['date'] <= val_end_dat)].copy()

    # TODO - get test data
    X_test_1 = test_X_data[(test_X_data['date'] >= test_start_date_1) &
                          (test_X_data['date'] <= test_end_date_1)].copy()

    shift_test_start_date_2 = get_previous_trade_date(test_start_date_2, test_shift_days)

    X_test_2 = test_X_data[(test_X_data['date'] >= shift_test_start_date_2) &
                          (test_X_data['date'] <= test_end_date_2)].copy()

    # TODO - Drop nan
    print('train data - before drop y nan shape is', X_train.shape)
    X_train = X_train[~X_train['y'].isnull()]
    print('train data - after drop y nan shape is', X_train.shape)

    print('validation data 1 - before drop y nan shape is', X_test_1.shape)
    X_validation = X_validation[~X_validation['y'].isnull()]
    print('validation data 1 - after drop y nan shape is', X_test_1.shape)

    print('test data 1 - before drop y nan shape is', X_test_1.shape)
    X_test_1 = X_test_1[~X_test_1['y'].isnull()]
    print('test data 1 - after drop y nan shape is', X_test_1.shape)

    print('test data 2 - before drop y nan shape is', X_test_2.shape)
    X_test_2 = X_test_2[~X_test_2['y'].isnull()]
    print('test data 2 - after drop y nan shape is', X_test_2.shape)

    # TODO - Map Keys
    # train
    X_train = X_train.reset_index()

    X_train = trade_date_map(X_train, key='date', reverse=False)
    X_train = stock_code_map(X_train, key='stock_code', reverse=False)

    X_train = X_train.set_index(['date', 'stock_code'])

    y_train = X_train['y'].copy()
    X_train = X_train.drop('y', axis=1)

    # validation 1
    X_validation = X_validation.reset_index()

    X_validation = trade_date_map(X_validation, key='date', reverse=False)
    X_validation = stock_code_map(X_validation, key='stock_code', reverse=False)

    X_validation = X_validation.set_index(['date', 'stock_code'])

    y_validation_1 = X_validation['y'].copy()
    X_validation = X_validation.drop('y', axis=1)

    # test 1
    X_test_1 = X_test_1.reset_index()

    X_test_1 = trade_date_map(X_test_1, key='date', reverse=False)
    X_test_1 = stock_code_map(X_test_1, key='stock_code', reverse=False)

    X_test_1 = X_test_1.set_index(['date', 'stock_code'])

    y_test_1 = X_test_1['y'].copy()
    X_test_1 = X_test_1.drop('y', axis=1)

    # test 2
    X_test_2 = X_test_2.reset_index()

    X_test_2 = trade_date_map(X_test_2, key='date', reverse=False)
    X_test_2 = stock_code_map(X_test_2, key='stock_code', reverse=False)

    X_test_2 = X_test_2.set_index(['date', 'stock_code'])

    y_test_2 = X_test_2['y'].copy()
    X_test_2 = X_test_2.drop('y', axis=1)

    # TODO - get keys
    concat_label_data = pd.read_hdf(os.path.join(label_data_dir, label_data_filename+'.h5'))

    # TODO - save keys
    if interaction_label is None:
        interaction_label = 'ths_user_behavior_v1'

    interact_key_filename = interaction_label + '%s' % (pd.datetime.now().strftime('%Y%m%d%H%M%S'))

    # train
    train_neu_keys = concat_label_data.loc[X_train.index, ['ci1_code', 'cap']].copy()
    train_metric_features_keys = X_train.index

    train_neu_keys = np.array(train_neu_keys.reset_index())
    train_metric_features_keys = np.array(pd.DataFrame(index=train_metric_features_keys).reset_index())

    train_neu_keys_path = os.path.join(key_interaction_data_dir, interact_key_filename+'_train_neu_keys.npy')
    train_metric_features_keys_path = os.path.join(key_interaction_data_dir, interact_key_filename+'_train_keys.npy')

    np.save(train_neu_keys_path, train_neu_keys)
    np.save(train_metric_features_keys_path, train_metric_features_keys)

    # validation 1
    validation_neu_keys = concat_label_data.loc[X_validation.index, ['ci1_code', 'cap']].copy()
    validation_metric_features_keys = X_validation.index

    validation_neu_keys = np.array(validation_neu_keys.reset_index())
    validation_metric_features_keys = np.array(pd.DataFrame(index=validation_metric_features_keys).reset_index())

    validation_neu_keys_path = os.path.join(key_interaction_data_dir, interact_key_filename+'_val1_neu_keys.npy')
    validation_metric_features_keys_path = os.path.join(key_interaction_data_dir, interact_key_filename+'_val1_keys.npy')

    np.save(validation_neu_keys_path, validation_neu_keys)
    np.save(validation_metric_features_keys_path, validation_metric_features_keys)

    # test 1
    test1_neu_keys = concat_label_data.loc[X_test_1.index, ['ci1_code', 'cap']].copy()
    test1_metric_features_keys = X_test_1.index

    test1_neu_keys = np.array(test1_neu_keys.reset_index())
    test1_metric_features_keys = np.array(pd.DataFrame(index=test1_metric_features_keys).reset_index())

    test1_neu_keys_path = os.path.join(key_interaction_data_dir, interact_key_filename+'_test1_neu_keys.npy')
    test1_metric_features_keys_path = os.path.join(key_interaction_data_dir, interact_key_filename+'_test1_keys.npy')

    np.save(test1_neu_keys_path, test1_neu_keys)
    np.save(test1_metric_features_keys_path, test1_metric_features_keys)

    # test 2
    test2_neu_keys = concat_label_data.loc[X_test_2.index, ['ci1_code', 'cap']].copy()
    test2_metric_features_keys = X_test_2.index

    test2_neu_keys = np.array(test2_neu_keys.reset_index())
    test2_metric_features_keys = np.array(pd.DataFrame(index=test2_metric_features_keys).reset_index())

    test2_neu_keys_path = os.path.join(key_interaction_data_dir, interact_key_filename+'_test2_neu_keys.npy')
    test2_metric_features_keys_path = os.path.join(key_interaction_data_dir, interact_key_filename+'_test2_keys.npy')

    np.save(test2_neu_keys_path, test2_neu_keys)
    np.save(test2_metric_features_keys_path, test2_metric_features_keys)

    del concat_label_data
    del test_X_data
    del test_y_data
    gc.collect()

    train_data = [X_train, y_train, train_neu_keys_path, train_metric_features_keys_path]

    validation_data = [X_validation, y_validation_1, validation_neu_keys_path, validation_metric_features_keys_path]

    test1_data = [X_test_1, y_test_1, test1_neu_keys_path, test1_metric_features_keys_path]
    test2_data = [X_test_2, y_test_2, test2_neu_keys_path, test2_metric_features_keys_path]

    return train_data, \
            validation_data, \
            test1_data, \
            test2_data, \
            selected_X_list

# no validation
def run_v1(**kwargs):
    ## TODO - 设置参数
    if 'n_jobs' in kwargs.keys():
        n_jobs = kwargs['n_jobs']
    else:
        n_jobs = 1

    if 'ts_base' in kwargs.keys():
        ts_base = kwargs['ts_base']
    else:
        ts_base = 5

    if 'population_size' in kwargs.keys():
        population_size = kwargs['population_size']
    else:
        population_size = 500

    if 'generations' in kwargs.keys():
        generations = kwargs['generations']
    else:
        generations = 2

    if 'tournament_size' in kwargs.keys():
        tournament_size = kwargs['tournament_size']
    else:
        tournament_size = 20

    if 'init_depth' in kwargs.keys():
        init_depth = kwargs['init_depth']
    else:
        init_depth = (2, 4)

    if 'init_method' in kwargs.keys():
        init_method = kwargs['init_method']
    else:
        init_method = 'full'
        # init_method='half and half'

    if 'parsimony_coefficient' in kwargs.keys():
        parsimony_coefficient = kwargs['parsimony_coefficient']
    else:
        parsimony_coefficient = 0.001

    if 'p_crossover' in kwargs.keys():
        p_crossover = kwargs['p_crossover']
    else:
        p_crossover = 0.6

    if 'p_subtree_mutation' in kwargs.keys():
        p_subtree_mutation = kwargs['p_subtree_mutation']
    else:
        p_subtree_mutation = 0.1

    if 'p_hoist_mutation' in kwargs.keys():
        p_hoist_mutation = kwargs['p_hoist_mutation']
    else:
        p_hoist_mutation = 0.01

    if 'p_point_mutation' in kwargs.keys():
        p_point_mutation = kwargs['p_point_mutation']
    else:
        p_point_mutation = 0.1

    # node replace prob independently
    if 'p_point_replace' in kwargs.keys():
        p_point_replace = kwargs['p_point_replace']
    else:
        p_point_replace = 0.4

    if 'gp_method' in kwargs.keys():
        gp_method = kwargs['gp_method']
    else:
        gp_method = 'weighted_rank_ic'
        # gp_method = 'simple_ic'

    if 'label' in kwargs.keys():
        label = kwargs['label']
    else:
        label = 'ths_user_hehavior_model'

    if 'factor_cover_rate' in kwargs.keys():
        factor_cover_rate = kwargs['factor_cover_rate']
    else:
        factor_cover_rate = 0.6

    if 'filter_duplicate_rate' in kwargs.keys():
        filter_duplicate_rate = kwargs['filter_duplicate_rate']
    else:
        filter_duplicate_rate = 0.1

    if 'multiprocess_backend' in kwargs.keys():
        multiprocess_backend = kwargs['multiprocess_backend']
    else:
        multiprocess_backend = 'loky'
        # multiprocess_backend = 'multiprocessing'

    if 'selected_number' in kwargs.keys():
        selected_number = kwargs['selected_number']
    else:
        selected_number = None

    if 'selected_pct' in kwargs.keys():
        selected_pct = kwargs['selected_pct']
    else:
        # selected_pct = 0.25
        selected_pct = 0.5

    if 'filter_population' in kwargs.keys():
        filter_population = kwargs['filter_population']
    else:
        filter_population = []

    if 'ts_d_range' in kwargs.keys():
        ts_d_range = kwargs['ts_d_range']
    else:
        ts_d_range = (0.1, 0.91)

    ''''''
    gp_logger.logger.info('ths user behavior factor mining starts...')

    # TODO - get data
    train_data, \
    val1_data, \
    val2_data, \
    test1_data, \
    test2_data, \
    whole_data, \
    selected_X_list = load_data_v1(test_shift_days=ts_base * 10,
                                   selected_number=selected_number,
                                   selected_pct=selected_pct,
                                   select_all=False,
                                   interaction_label=label,
                                   **kwargs)

    gpfunction_candidates = list(self_defined_seq_func_dict.keys())

    if 'selected_func_number' in kwargs.keys():
        selected_func_number = kwargs['selected_func_number']
    else:
        selected_func_number = len(gpfunction_candidates)//2

    # TODO - set up function sets
    my_function_set = get_gp_functions(gpfunction_candidates,
                                       selected_number=selected_func_number,
                                       keys=train_data[3],
                                       neu_keys=train_data[2],
                                       base=ts_base,
                                       key_by_path=True)

    # TODO - make fitness function
    # TODO - fitness只返回一个值
    my_metric = make_fitness(def_keyfit_metric(keys=train_data[3],
                                               method=gp_method,
                                               drop_first_nday=ts_base*10,
                                               split_date=None,
                                               neu_keys=train_data[2],
                                               factor_cover_rate=factor_cover_rate,
                                               filter_duplicate_rate=filter_duplicate_rate,
                                               is_cap_neu=False,
                                               is_ci1_neu=False,
                                               key_by_path=True,
                                               is_neu_y=False),
                             greater_is_better=True)

    """
    #####################TRAIN PERIOD######################
    """
    est_gp = SymbolicRegressor(
            population_size=population_size,
            generations=generations,
            tournament_size=tournament_size,
            stopping_criteria=3,
            const_range=None,
            init_depth=init_depth,
            init_method=init_method,
            function_set=my_function_set,
            metric=my_metric,
            parsimony_coefficient=parsimony_coefficient,
            p_crossover=p_crossover,
            p_subtree_mutation=p_subtree_mutation,
            p_hoist_mutation=p_hoist_mutation,
            p_point_mutation=p_point_mutation,
            p_point_replace=p_point_replace,    # node replace prob independently
            max_samples=1,
            feature_names=selected_X_list,
            warm_start=False,
            low_memory=True,
            n_jobs=n_jobs,
            # verbose=2,
            verbose=1,
            multiprocess_backend=multiprocess_backend,
            filter_population_func=filter_population_v1,
            filter_population=filter_population,
            ts_d_range=ts_d_range,
            random_state=None
    )

    est_gp.fit(train_data[0], train_data[1])

    program_depth = est_gp._program.depth_ + 1  # 获取树深度
    est_gp_program = est_gp._program.program
    est_gp_program = [selected_X_list[f] if isinstance(f, int) else f for f in est_gp_program]

    est_gp_program_print = [f.name if not isinstance(f, (int, str, float)) else f for f in est_gp_program]

    program = [est_gp_program, ts_base]

    print('program depth are', program_depth)
    gp_logger.logger.info('program depth are %s' % program_depth)

    # TODO - 计算train IC
    y_pred_train = model_predict(X=train_data[0],
                                 program=program,
                                 keys=train_data[3],
                                 neu_keys=train_data[2],
                                 by_name=True,
                                 key_by_path=True)

    used_func = def_keyfit_metric(keys=train_data[3],
                                  method='ic_mean',
                                  drop_first_nday=ts_base*10,
                                  neu_keys=None,
                                  is_cap_neu=False,
                                  is_ci1_neu=False,
                                  is_neu_y=False,
                                  key_by_path=True,
                                  verbose=True)

    train_ic = used_func(y=train_data[1].values, y_pred=y_pred_train, w=None)

    train_ic_direction = 1.0 if train_ic >= 0 else -1.0

    used_func = def_keyfit_metric(keys=train_data[3],
                                  method='weighted_rank_ic',
                                  drop_first_nday=ts_base*10,
                                  neu_keys=train_data[2],
                                  train_direction=train_ic_direction,
                                  is_cap_neu=True,
                                  is_ci1_neu=False,
                                  is_neu_y=True,
                                  factor_cover_rate=factor_cover_rate,
                                  filter_duplicate_rate=filter_duplicate_rate,
                                  verbose=True,
                                  key_by_path=True)

    train_IC = used_func(y=train_data[1].values, y_pred=y_pred_train, w=None)

    # TODO - 验证集预测
    y_pred_val1 = model_predict(X=val1_data[0],
                                 program=program,
                                 neu_keys=val1_data[2],
                                 keys=val1_data[3],
                                 by_name=True,
                                 key_by_path=True)

    used_func = def_keyfit_metric(keys=val1_data[3],
                                  method='weighted_rank_ic',
                                  drop_first_nday=ts_base*10,
                                  neu_keys=val1_data[2],
                                  train_direction=train_ic_direction,
                                  is_cap_neu=True,
                                  is_ci1_neu=False,
                                  is_neu_y=True,
                                  factor_cover_rate=factor_cover_rate,
                                  filter_duplicate_rate=filter_duplicate_rate,
                                  verbose=True,
                                  key_by_path=True)

    val1_IC = used_func(y=val1_data[1].values, y_pred=y_pred_val1, w=None)

    ###
    y_pred_val2 = model_predict(X=val2_data[0],
                                 program=program,
                                 neu_keys=val2_data[2],
                                 keys=val2_data[3],
                                 by_name=True,
                                 key_by_path=True)

    used_func = def_keyfit_metric(keys=val2_data[3],
                                  method='weighted_rank_ic',
                                  drop_first_nday=ts_base*10,
                                  neu_keys=val2_data[2],
                                  train_direction=train_ic_direction,
                                  is_cap_neu=True,
                                  is_ci1_neu=False,
                                  is_neu_y=True,
                                  factor_cover_rate=factor_cover_rate,
                                  filter_duplicate_rate=filter_duplicate_rate,
                                  verbose=True,
                                  key_by_path=True)

    val2_IC = used_func(y=val2_data[1].values, y_pred=y_pred_val2, w=None)

    # TODO - 样本外预测
    y_pred_test1 = model_predict(X=test1_data[0],
                                 program=program,
                                 neu_keys=test1_data[2],
                                 keys=test1_data[3],
                                 by_name=True,
                                 key_by_path=True)

    used_func = def_keyfit_metric(keys=test1_data[3],
                                  method='weighted_rank_ic',
                                  drop_first_nday=ts_base*10,
                                  neu_keys=test1_data[2],
                                  train_direction=train_ic_direction,
                                  is_cap_neu=True,
                                  is_ci1_neu=False,
                                  is_neu_y=True,
                                  factor_cover_rate=factor_cover_rate,
                                  filter_duplicate_rate=filter_duplicate_rate,
                                  verbose=True,
                                  key_by_path=True)

    test1_IC = used_func(y=test1_data[1].values, y_pred=y_pred_test1, w=None)

    ###
    y_pred_test2 = model_predict(X=test2_data[0],
                                 program=program,
                                 neu_keys=test2_data[2],
                                 keys=test2_data[3],
                                 by_name=True,
                                 key_by_path=True)

    used_func = def_keyfit_metric(keys=test2_data[3],
                                  method='weighted_rank_ic',
                                  drop_first_nday=ts_base*10,
                                  neu_keys=test2_data[2],
                                  train_direction=train_ic_direction,
                                  is_cap_neu=True,
                                  is_ci1_neu=False,
                                  is_neu_y=True,
                                  factor_cover_rate=factor_cover_rate,
                                  filter_duplicate_rate=filter_duplicate_rate,
                                  verbose=True,
                                  key_by_path=True)

    test2_IC = used_func(y=test2_data[1].values, y_pred=y_pred_test2, w=None)

    gp_logger.logger.info('\nprogram: %s\ntrain IC: %.4f\ntest 1 weight IC: %.4f\ntest 2 weight IC: %.4f\n' %
                          (est_gp_program_print, train_IC, test1_IC, test2_IC))

    # TODO - 综合两段预测效果以及附加条件
    num_X = sum([1.0 if isinstance(f, str) else 0 for f in est_gp_program])
    num_func = sum([1.0 if not isinstance(f, (str, int, float)) else 0 for f in est_gp_program])

    # TODO - 选择条件
    ic_cond = [
        train_IC > ic_threshold,
        val1_IC > ic_threshold,
        val2_IC > ic_threshold,
    ]

    is_save_model = (sum(ic_cond) >= 2) and \
                    (test1_IC > ic_threshold) and \
                    (test2_IC > ic_threshold) and \
                    (num_X >= 2 or num_func >= 2) and \
                    (program_depth >= 2)

    # TODO - 删掉keys交互文件
    to_del_filepath_list = [
        train_data[2],
        train_data[3],
        val1_data[2],
        val1_data[3],
        val2_data[2],
        val2_data[3],
        test1_data[2],
        test1_data[3],
        test2_data[2],
        test2_data[3],
    ]

    delete_file_list(to_del_filepath_list)

    # TODO - 保存模型
    if is_save_model:
        id = 1
        while True:
            save_filename = label + '_v%s' % id
            # if not os.path.exists(os.path.join(records_dir, save_filename+ '.pdf')):
            if not os.path.exists(os.path.join(sub_records_dir, save_filename+ '.pdf')):
                break

            id += 1

        is_selected = gp_factor_selection_v1(raw_data=whole_data[0],
                                             keys=whole_data[2],
                                             neu_keys=whole_data[1],
                                             model=est_gp_program,
                                             factor_name=save_filename,
                                             by_name=True,
                                             key_by_path=False,
                                             ts_base=ts_base,
                                             key_reverse=True)

        if is_selected:
            # TODO - 输出文件
            dot_data = est_gp._program.export_graphviz()
            graph = graphviz.Source(dot_data)

            graph.render(filename=save_filename,
                         # directory=records_dir,
                         directory=sub_records_dir,
                         cleanup=True)

            # TODO - 保存模型
            model_save_path = os.path.join(save_model_dir, '%s.pkl' % (save_filename))

            save_gpmodel_v2(program=est_gp_program,
                            base=ts_base,
                            path=model_save_path)

            # TODO - 保存记录数据
            train_start_date = kwargs['train_start_date']
            train_end_date = kwargs['train_end_date']
            val_start_date_1 = kwargs['val_start_date_1']
            val_end_date_1 = kwargs['val_end_date_1']
            val_start_date_2 = kwargs['val_start_date_2']
            val_end_date_2 = kwargs['val_end_date_2']
            test_start_date_1 = kwargs['test_start_date_1']
            test_end_date_1 = kwargs['test_end_date_1']
            test_start_date_2 = kwargs['test_start_date_2']
            test_end_date_2 = kwargs['test_end_date_2']

            # read_info = 'program: %s\ntrain IC: %.4f\ntest 1 weight IC: %.4f\ntest 2 weight IC: %.4f' % \
            #             (est_gp_program_print, train_IC, test1_IC, test2_IC)
            read_info = 'program(base:%s): %s\n' \
                        'train IC: %.4f (%s to %s)\n' \
                        'validation 1 weight IC: %.4f (%s to %s)\n' \
                        'validation 2 weight IC: %.4f (%s to %s)\n' \
                        'test 1 weight IC: %.4f (%s to %s)\n' \
                        'test 2 weight IC: %.4f (%s to %s)\n' % \
                        (ts_base, est_gp_program_print,
                        train_IC, train_start_date, train_end_date,
                        val1_IC, val_start_date_1, val_end_date_1,
                        val2_IC, val_start_date_2, val_end_date_2,
                        test1_IC, test_start_date_1, test_end_date_1,
                        test2_IC, test_start_date_2, test_end_date_2)

            # model_result_save_path = os.path.join(records_dir, '%s_result.txt' % (save_filename))
            model_result_save_path = os.path.join(sub_records_dir, '%s_result.txt' % (save_filename))

            save_txt_file(model_result_save_path, read_info)

            # TODO - 生成因子保存到db里面
            factor_data, \
            bfactor_data = get_gp_factor(raw_data=whole_data[0],
                                        keys=whole_data[2],
                                        neu_keys=whole_data[1],
                                        model=est_gp_program,
                                        factor_name=save_filename,
                                        by_name=True,
                                        key_by_path=False,
                                        ts_base=ts_base,
                                        key_reverse=True)

            if len(factor_data.index.shape) == 2:
                factor_data = factor_data.reset_index()
            else:
                factor_data = factor_data.reset_index(drop=True)

            if len(bfactor_data.index.shape) == 2:
                bfactor_data = bfactor_data.reset_index()
            else:
                bfactor_data = bfactor_data.reset_index(drop=True)

            data_reader = DataReader(db=save_factor_db)
            data_reader.update_data(factor_data)
            data_reader.create_factor_table(filename=save_filename, main_columns=['date', 'stock_code'])

            data_reader = DataReader(db=save_bfactor_db)
            data_reader.update_data(bfactor_data)
            data_reader.create_factor_table(filename=save_filename, main_columns=['date', 'stock_code'])

            del factor_data
            del bfactor_data
            gc.collect()

            print('program:{} saved!\n'.format(est_gp_program_print))
            gp_logger.logger.info('{} saved!\n'.format(est_gp_program_print))
        else:
            print('program:{} no selection!\n'.format(est_gp_program_print))
            gp_logger.logger.warn('{} no selection!\n'.format(est_gp_program_print))
    else:
        print('program:{} failed to save!\n'.format(est_gp_program_print))
        gp_logger.logger.warn('{} failed to save!\n'.format(est_gp_program_print))


def main(**kwargs):
    # TODO - 引入随机性（否则多次运行会出现重复结果）
    np.random.seed(None)

    # TODO - 设置训练集日期
    start_year = 2017
    end_year = 2019

    selected_year = np.random.randint(start_year, end_year + 1)
    # selected_year = 2019

    train_start_date = '%d-01-01' % (selected_year)
    train_end_date = '%d-12-31' % (selected_year)
    # train_end_date = '%d-12-31' % (selected_year+1)

    year_list = np.arange(start_year, end_year + 1).tolist()

    test_year_list = list(filter(lambda y: selected_year != y, year_list))
    # test_year_list = [2019 if selected_year==2017 else 2017]

    # TODO - 设置用于选因子的样本外数据日期
    val_start_date_1 = '%d-01-01' % (test_year_list[0])
    val_end_date_1 = '%d-12-31' % (test_year_list[0])

    val_start_date_2 = '%d-01-01' % (test_year_list[1])
    val_end_date_2 = '%d-12-31' % (test_year_list[1])

    test_start_date_1 = '2016-01-01'
    test_end_date_1 = '2016-12-31'

    # test_start_date_2 = '2019-11-01'
    test_start_date_2 = '2020-01-01'
    test_end_date_2 = '2020-03-31'

    print('train set from %s to %s' % (train_start_date, train_end_date))

    # TODO - 获取需要过滤的GP公式
    try:
        get_existing_factor_list(factor_type=factors_label,
                                 model_dir=save_model_dir)

        filter_population_path = os.path.join(existing_factor_record_dir, factors_label + '_existing.pkl')

        filter_population = loadFile(filter_population_path)

        print('there are %s factors to filter...' % (len(filter_population)))
    except:
        filter_population = []
        print('there are no factors to filter...')

    # TODO - 设置参数
    params = {
        'n_jobs': 1,
        'ts_base': 5,
        'population_size': 500,
        # 'population_size': 1000,
        # 'generations': 3,
        'generations': 2,
        'tournament_size': 40,
        'init_depth': (2, 4),
        # 'init_method': 'full',
        'init_method': 'half and half',
        'parsimony_coefficient': 0.0001,
        'p_crossover': 0.6,
        'p_subtree_mutation': 0.1,
        'p_hoist_mutation': 0.01,
        'p_point_mutation': 0.1,
        'p_point_replace': 0.4,
        'factor_cover_rate': 0.6,
        'filter_duplicate_rate': 0.05,
        'label': factors_label,
        'filter_population': filter_population,
        # 'multiprocess_backend': 'multiprocessing',
        # TODO - add date
        'train_start_date': train_start_date,
        'train_end_date': train_end_date,
        'val_start_date_1': val_start_date_1,
        'val_end_date_1': val_end_date_1,
        'val_start_date_2': val_start_date_2,
        'val_end_date_2': val_end_date_2,
        'test_start_date_1': test_start_date_1,
        'test_end_date_1': test_end_date_1,
        'test_start_date_2': test_start_date_2,
        'test_end_date_2': test_end_date_2,
        'ts_d_range': (0.1, 0.69),
    }


    try:
        run_v1(**params)
    except Exception as e:
        print('ths user behavior factor mining exception, info is', e)
        gp_logger.logger.error('ths user behavior factor mining exception, info is {}'.format(e))
    # run_v1(**params)



if __name__ == '__main__':
    run_number = 2

    for id in range(run_number):
        main()
        print(id, 'done!\n\n')


