#!/usr/bin/python
# -*-coding:utf-8-*-
import os
import pandas as pd
import numpy as np
import graphviz
import pickle
import gc
from datetime import datetime

from dir_info import logs_dir as logs_data_dir
from dir_info import records_dir
from dir_info import key_interaction_data_dir
from dir_info import existing_factor_record_dir

from base.setlogger import Logger

from base.utils import delete_file_list
from base.utils import save_txt_file
from base.utils import loadFile

# from gplearn_evolve.functions import make_function
from gplearn_evolve.fitness import make_fitness
from gplearn_evolve.genetic import SymbolicRegressor

import warnings

warnings.filterwarnings("ignore")

from base.GPModelSave import model_predict
from base.GPModelSave import save_gpmodel_v2

from base.GPFitness import def_keyfit_cw_metric as def_keyfit_metric

from base.GPFunctionSelect import get_gp_functions
from base.XSelect import selectX

from base.utils import get_previous_trade_date

# from base.GPPopulationFilter import filter_population_v1 as filter_population_func
from base.GPPopulationFilter import filter_population_v2 as filter_population_func
from gplearn_evolve.base.config import self_defined_cw_used_func_dict

from base.KeyTransform import stock_code_map
from base.KeyTransform import trade_date_map

from existing_gp_factor_management.get_existing_gp_mining_factor_list import main as get_existing_factor_list

# TODO - 固定的参数
# ic_threshold = 0.06
ic_threshold = 0.05

label_data_dir = './zbc_gplearn_factor_mining/label_data'
# label_data_filename = 'label_data'
label_data_filename = 'financial_label_data'

test_data_dir = './zbc_gplearn_factor_mining/raw_X_data'
test_data_filename = 'X_financial_statement_data_v1'

test_y_data_dir = './zbc_gplearn_factor_mining/y_label_data'
test_y_data_filename = 'financial_statement_y_60d_return_label'
# test_y_data_filename = 'financial_statement_y_30d_return_label'

# TODO - 保存模型结果
save_model_dir = './zbc_gplearn_factor_mining/model/financial_factors_v1'

if not os.path.exists(save_model_dir):
    os.makedirs(save_model_dir)

##
# TODO - 设置log
factors_label = 'zbc_financial_factor_mining_v1'

log_date_time = datetime.now()
log_date_time = log_date_time.strftime('%Y%m%d_%H')

gp_logger = Logger(os.path.join(logs_data_dir,
                                factors_label + '_log_%s.log' % log_date_time),
                   level='info')

#### TODO - 读取数据
def load_data_v1(test_shift_seasons=12,
                 select_all=False,
                 selected_number=None,
                 selected_pct=None,
                 interaction_label=None,
                 **kwargs):
    train_start_date = kwargs['train_start_date']
    train_end_date = kwargs['train_end_date']

    val_start_date = kwargs['val_start_date']
    val_end_dat = kwargs['val_end_date']

    test_start_date = kwargs['test_start_date']
    test_end_date = kwargs['test_end_date']

    # TODO - 读取数据
    # TODO - 测试
    test_X_data = pd.read_hdf(os.path.join(test_data_dir, test_data_filename + '.h5'))
    test_y_data = pd.read_hdf(os.path.join(test_y_data_dir, test_y_data_filename + '.h5'))

    # TODO - 随机选择X
    X_candidates = test_X_data.columns.tolist()

    if selected_pct is None:
        selected_pct = 0.6

    if not select_all:
        if selected_number is None:
            selected_number = int(len(X_candidates) * selected_pct)

        selected_X_list = selectX(X_candidates, selected_number=selected_number)
    else:
        selected_X_list = X_candidates

    test_X_data = test_X_data[selected_X_list]

    test_X_data['y'] = test_y_data.loc[test_X_data.index]

    test_X_data = test_X_data.reset_index()

    test_X_data = test_X_data.rename(columns={'report_date': 'date'})

    # TODO - process data
    # TODO - get train data
    X_train = test_X_data[(test_X_data['date'] >= train_start_date) &
                          (test_X_data['date'] <= train_end_date)].copy()

    X_train = X_train.set_index(['date', 'stock_code'])

    # TODO - get validation data
    # shift_val_start_date = get_previous_trade_date(val_start_date, test_shift_seasons)
    shift_val_start_date = pd.to_datetime(val_start_date) - pd.Timedelta(days=test_shift_seasons*3*31)
    shift_val_start_date = shift_val_start_date.strftime('%Y-%m-%d')

    X_validation = test_X_data[(test_X_data['date'] >= shift_val_start_date) &
                               (test_X_data['date'] <= val_end_dat)].copy()

    # TODO - get test data
    # shift_test_start_date = get_previous_trade_date(test_start_date, test_shift_seasons)
    shift_test_start_date = pd.to_datetime(test_start_date) - pd.Timedelta(days=test_shift_seasons*3*31)
    shift_test_start_date = shift_test_start_date.strftime('%Y-%m-%d')

    X_test = test_X_data[(test_X_data['date'] >= shift_test_start_date) &
                         (test_X_data['date'] <= test_end_date)].copy()

    # TODO - Drop nan
    print('train data - before drop y nan shape is', X_train.shape)
    X_train = X_train[~X_train['y'].isnull()]
    print('train data - after drop y nan shape is', X_train.shape)

    print('validation data - before drop y nan shape is', X_test.shape)
    X_validation = X_validation[~X_validation['y'].isnull()]
    print('validation data - after drop y nan shape is', X_test.shape)

    print('test data - before drop y nan shape is', X_test.shape)
    X_test = X_test[~X_test['y'].isnull()]
    print('test data - after drop y nan shape is', X_test.shape)

    # TODO - Map Keys
    # train
    X_train = X_train.reset_index()

    train_filter_season_keys = pd.DataFrame((pd.DatetimeIndex(X_train['date']).month == 3).astype(np.uint8),
                                                columns=['drop'])

    X_train = trade_date_map(X_train, key='date', reverse=False, is_financial_date=True)
    X_train = stock_code_map(X_train, key='stock_code', reverse=False)

    X_train = X_train.set_index(['date', 'stock_code'])
    train_filter_season_keys.index = X_train.index

    # validation
    X_validation = X_validation.reset_index()

    validation_filter_season_keys = pd.DataFrame((pd.DatetimeIndex(X_validation['date']).month == 3).astype(np.uint8),
                                                columns=['drop'])

    X_validation = trade_date_map(X_validation, key='date', reverse=False, is_financial_date=True)
    X_validation = stock_code_map(X_validation, key='stock_code', reverse=False)

    X_validation = X_validation.set_index(['date', 'stock_code'])
    validation_filter_season_keys.index = X_validation.index

    # test
    X_test = X_test.reset_index()

    test_filter_season_keys = pd.DataFrame((pd.DatetimeIndex(X_test['date']).month == 3).astype(np.uint8),
                                           columns=['drop'])

    X_test = trade_date_map(X_test, key='date', reverse=False, is_financial_date=True)
    X_test = stock_code_map(X_test, key='stock_code', reverse=False)

    X_test = X_test.set_index(['date', 'stock_code'])
    test_filter_season_keys.index = X_test.index

    # TODO - get keys
    concat_label_data = pd.read_hdf(os.path.join(label_data_dir, label_data_filename + '.h5'))

    # TODO - save keys
    if interaction_label is None:
        interaction_label = 'financial_factor_v1'

    interact_key_filename = interaction_label + '%s' % (pd.datetime.now().strftime('%Y%m%d%H%M%S'))

    ## TODO - 去掉金融相关行业的数据以及NAN的中信一级标签
    X_train['ci1_code'] = concat_label_data.loc[X_train.index, 'ci1_code'].copy()
    X_validation['ci1_code'] = concat_label_data.loc[X_validation.index, 'ci1_code'].copy()
    X_test['ci1_code'] = concat_label_data.loc[X_test.index, 'ci1_code'].copy()

    X_train = X_train[~X_train['ci1_code'].isin([20, 21, 29, np.nan])]
    X_validation = X_validation[~X_validation['ci1_code'].isin([20, 21, 29, np.nan])]
    X_test = X_test[~X_test['ci1_code'].isin([20, 21, 29, np.nan])]

    X_train = X_train.drop('ci1_code', axis=1)
    X_validation = X_validation.drop('ci1_code', axis=1)
    X_test = X_test.drop('ci1_code', axis=1)

    # TODO - get y target data
    y_train = X_train['y'].copy()
    X_train = X_train.drop('y', axis=1)

    y_validation = X_validation['y'].copy()
    X_validation = X_validation.drop('y', axis=1)

    y_test = X_test['y'].copy()
    X_test = X_test.drop('y', axis=1)

    # TODO - 保存keys
    # train
    train_neu_keys = concat_label_data.loc[X_train.index, ['ci1_code', 'cap']].copy()
    train_metric_features_keys = X_train.index

    train_neu_keys = np.array(train_neu_keys.reset_index())
    train_metric_features_keys = np.array(pd.DataFrame(index=train_metric_features_keys).reset_index())
    train_filter_season_keys = np.array(train_filter_season_keys.reset_index())

    train_neu_keys_path = os.path.join(key_interaction_data_dir, interact_key_filename + '_train_neu_keys.npy')
    train_metric_features_keys_path = os.path.join(key_interaction_data_dir, interact_key_filename + '_train_keys.npy')
    train_filter_season_keys_path = os.path.join(key_interaction_data_dir, interact_key_filename + '_train_filter_season_keys.npy')

    np.save(train_neu_keys_path, train_neu_keys)
    np.save(train_metric_features_keys_path, train_metric_features_keys)
    np.save(train_filter_season_keys_path, train_filter_season_keys)

    # validation
    validation_neu_keys = concat_label_data.loc[X_validation.index, ['ci1_code', 'cap']].copy()
    validation_metric_features_keys = X_validation.index

    validation_neu_keys = np.array(validation_neu_keys.reset_index())
    validation_metric_features_keys = np.array(pd.DataFrame(index=validation_metric_features_keys).reset_index())
    validation_filter_season_keys = np.array(validation_filter_season_keys.reset_index())

    validation_neu_keys_path = os.path.join(key_interaction_data_dir, interact_key_filename + '_val_neu_keys.npy')
    validation_metric_features_keys_path = os.path.join(key_interaction_data_dir, interact_key_filename + '_val_keys.npy')
    validation_filter_season_keys_path = os.path.join(key_interaction_data_dir, interact_key_filename + '_val_filter_season_keys.npy')

    np.save(validation_neu_keys_path, validation_neu_keys)
    np.save(validation_metric_features_keys_path, validation_metric_features_keys)
    np.save(validation_filter_season_keys_path, validation_filter_season_keys)

    # test
    test_neu_keys = concat_label_data.loc[X_test.index, ['ci1_code', 'cap']].copy()
    test_metric_features_keys = X_test.index

    test_neu_keys = np.array(test_neu_keys.reset_index())
    test_metric_features_keys = np.array(pd.DataFrame(index=test_metric_features_keys).reset_index())
    test_filter_season_keys = np.array(test_filter_season_keys.reset_index())

    test_neu_keys_path = os.path.join(key_interaction_data_dir, interact_key_filename + '_test_neu_keys.npy')
    test_metric_features_keys_path = os.path.join(key_interaction_data_dir, interact_key_filename + '_test_keys.npy')
    test_filter_season_keys_path = os.path.join(key_interaction_data_dir, interact_key_filename + '_test_filter_season_keys.npy')

    np.save(test_neu_keys_path, test_neu_keys)
    np.save(test_metric_features_keys_path, test_metric_features_keys)
    np.save(test_filter_season_keys_path, test_filter_season_keys)

    del concat_label_data
    del test_X_data
    del test_y_data
    gc.collect()

    train_data = [X_train,
                  y_train,
                  train_neu_keys_path,
                  train_metric_features_keys_path,
                  train_filter_season_keys_path]

    validation_data = [X_validation,
                       y_validation,
                       validation_neu_keys_path,
                       validation_metric_features_keys_path,
                       validation_filter_season_keys_path]

    test_data = [X_test,
                 y_test,
                 test_neu_keys_path,
                 test_metric_features_keys_path,
                 test_filter_season_keys_path]

    return train_data, \
            validation_data, \
            test_data, \
            selected_X_list


# no validation
def run_v1(**kwargs):
    ## TODO - 设置参数
    if 'n_jobs' in kwargs.keys():
        n_jobs = kwargs['n_jobs']
    else:
        n_jobs = 1

    if 'population_size' in kwargs.keys():
        population_size = kwargs['population_size']
    else:
        population_size = 500

    if 'generations' in kwargs.keys():
        generations = kwargs['generations']
    else:
        generations = 2

    if 'tournament_size' in kwargs.keys():
        tournament_size = kwargs['tournament_size']
    else:
        tournament_size = 20

    if 'init_depth' in kwargs.keys():
        init_depth = kwargs['init_depth']
    else:
        init_depth = (2, 4)

    if 'init_method' in kwargs.keys():
        init_method = kwargs['init_method']
    else:
        init_method = 'full'
        # init_method='half and half'

    if 'parsimony_coefficient' in kwargs.keys():
        parsimony_coefficient = kwargs['parsimony_coefficient']
    else:
        parsimony_coefficient = 0.001

    if 'p_crossover' in kwargs.keys():
        p_crossover = kwargs['p_crossover']
    else:
        p_crossover = 0.6

    if 'p_subtree_mutation' in kwargs.keys():
        p_subtree_mutation = kwargs['p_subtree_mutation']
    else:
        p_subtree_mutation = 0.1

    if 'p_hoist_mutation' in kwargs.keys():
        p_hoist_mutation = kwargs['p_hoist_mutation']
    else:
        p_hoist_mutation = 0.01

    if 'p_point_mutation' in kwargs.keys():
        p_point_mutation = kwargs['p_point_mutation']
    else:
        p_point_mutation = 0.1

    # node replace prob independently
    if 'p_point_replace' in kwargs.keys():
        p_point_replace = kwargs['p_point_replace']
    else:
        p_point_replace = 0.4

    if 'gp_method' in kwargs.keys():
        gp_method = kwargs['gp_method']
    else:
        gp_method = 'weighted_rank_ic'
        # gp_method = 'simple_ic'

    if 'label' in kwargs.keys():
        label = kwargs['label']
    else:
        label = 'financial_factor_model_v1'

    if 'factor_cover_rate' in kwargs.keys():
        factor_cover_rate = kwargs['factor_cover_rate']
    else:
        factor_cover_rate = 0.6

    if 'filter_duplicate_rate' in kwargs.keys():
        filter_duplicate_rate = kwargs['filter_duplicate_rate']
    else:
        filter_duplicate_rate = 0.1

    if 'multiprocess_backend' in kwargs.keys():
        multiprocess_backend = kwargs['multiprocess_backend']
    else:
        multiprocess_backend = 'loky'
        # multiprocess_backend = 'multiprocessing'

    if 'selected_number' in kwargs.keys():
        selected_number = kwargs['selected_number']
    else:
        selected_number = None

    if 'selected_pct' in kwargs.keys():
        selected_pct = kwargs['selected_pct']
    else:
        # selected_pct = 0.25
        selected_pct = 0.5

    if 'filter_population' in kwargs.keys():
        filter_population = kwargs['filter_population']
    else:
        filter_population = []

    if 'drop_first_n_season' in kwargs.keys():
        drop_first_n_season = kwargs['drop_first_n_season']
    else:
        drop_first_n_season = 12

    ''''''
    gp_logger.logger.info('financial factors mining starts...')

    # TODO - get data
    train_data, \
    val_data, \
    test_data, \
    selected_X_list = load_data_v1(test_shift_seasons=drop_first_n_season,
                                   selected_number=selected_number,
                                   selected_pct=selected_pct,
                                   select_all=False,
                                   interaction_label=label,
                                   **kwargs)

    # gpfunction_candidates = list(self_defined_func_dict.keys())
    gpfunction_candidates = list(self_defined_cw_used_func_dict.keys())

    if 'selected_func_number' in kwargs.keys():
        selected_func_number = kwargs['selected_func_number']
        if selected_func_number >= len(gpfunction_candidates):
            selected_all = True
        else:
            selected_all = False
    else:
        selected_func_number = len(gpfunction_candidates)
        selected_all = True

    # TODO - set up function sets
    my_function_set = get_gp_functions(gpfunction_candidates,
                                       selected_number=selected_func_number,
                                       keys=train_data[3],
                                       neu_keys=train_data[2],
                                       base=12,
                                       key_by_path=True,
                                       selected_all=selected_all)

    ###
    # train_filter_season_keys = None
    train_filter_season_keys = train_data[4]
    # val_filter_season_keys = None
    val_filter_season_keys = val_data[4]
    # test_filter_season_keys = None
    test_filter_season_keys = test_data[4]

    # TODO - make fitness function
    # TODO - fitness只返回一个值
    my_metric = make_fitness(def_keyfit_metric(keys=train_data[3],
                                               method=gp_method,
                                               drop_first_n_season=drop_first_n_season,
                                               split_date=None,
                                               neu_keys=train_data[2],
                                               filter_season_keys=train_filter_season_keys,
                                               factor_cover_rate=factor_cover_rate,
                                               filter_duplicate_rate=filter_duplicate_rate,
                                               is_cap_neu=False,
                                               is_ci1_neu=False,
                                               key_by_path=True,
                                               is_neu_y=False),
                             greater_is_better=True)

    """
    #####################TRAIN PERIOD######################
    """
    est_gp = SymbolicRegressor(
        population_size=population_size,
        generations=generations,
        tournament_size=tournament_size,
        stopping_criteria=3,
        const_range=None,
        init_depth=init_depth,
        init_method=init_method,
        function_set=my_function_set,
        metric=my_metric,
        parsimony_coefficient=parsimony_coefficient,
        p_crossover=p_crossover,
        p_subtree_mutation=p_subtree_mutation,
        p_hoist_mutation=p_hoist_mutation,
        p_point_mutation=p_point_mutation,
        p_point_replace=p_point_replace,  # node replace prob independently
        max_samples=1,
        feature_names=selected_X_list,
        warm_start=False,
        low_memory=True,
        n_jobs=n_jobs,
        # verbose=2,
        verbose=1,
        multiprocess_backend=multiprocess_backend,
        filter_population_func=filter_population_func,
        filter_population=filter_population,
        random_state=None
    )

    est_gp.fit(train_data[0], train_data[1])

    program_depth = est_gp._program.depth_ + 1  # 获取树深度
    est_gp_program = est_gp._program.program
    est_gp_program = [selected_X_list[f] if isinstance(f, int) else f for f in est_gp_program]

    est_gp_program_print = [f.name if not isinstance(f, (int, str, float)) else f for f in est_gp_program]

    program = [est_gp_program, 12]

    print('program depth are', program_depth)
    gp_logger.logger.info('program depth are %s' % program_depth)

    # TODO - 计算train IC
    y_pred_train = model_predict(X=train_data[0],
                                 program=program,
                                 keys=train_data[3],
                                 neu_keys=train_data[2],
                                 by_name=True,
                                 key_by_path=True)

    used_func = def_keyfit_metric(keys=train_data[3],
                                  method='ic_mean',
                                  drop_first_n_season=drop_first_n_season,
                                  neu_keys=None,
                                  filter_season_keys=train_filter_season_keys,
                                  is_cap_neu=False,
                                  is_ci1_neu=False,
                                  is_neu_y=False,
                                  key_by_path=True,
                                  verbose=True)

    train_ic = used_func(y=train_data[1].values, y_pred=y_pred_train, w=None)

    train_ic_direction = 1.0 if train_ic >= 0 else -1.0

    used_func = def_keyfit_metric(keys=train_data[3],
                                  method='weighted_rank_ic',
                                  drop_first_n_season=drop_first_n_season,
                                  neu_keys=train_data[2],
                                  train_direction=train_ic_direction,
                                  is_cap_neu=True,
                                  is_ci1_neu=False,
                                  is_neu_y=True,
                                  filter_season_keys=train_filter_season_keys,
                                  factor_cover_rate=factor_cover_rate,
                                  filter_duplicate_rate=filter_duplicate_rate,
                                  verbose=True,
                                  key_by_path=True)

    train_IC = used_func(y=train_data[1].values, y_pred=y_pred_train, w=None)

    # TODO - 验证集预测
    y_pred_val = model_predict(X=val_data[0],
                                program=program,
                                neu_keys=val_data[2],
                                keys=val_data[3],
                                by_name=True,
                                key_by_path=True)

    used_func = def_keyfit_metric(keys=val_data[3],
                                  method='weighted_rank_ic',
                                  drop_first_n_season=drop_first_n_season,
                                  neu_keys=val_data[2],
                                  filter_season_keys=val_filter_season_keys,
                                  train_direction=train_ic_direction,
                                  is_cap_neu=True,
                                  is_ci1_neu=False,
                                  is_neu_y=True,
                                  factor_cover_rate=factor_cover_rate,
                                  filter_duplicate_rate=filter_duplicate_rate,
                                  verbose=True,
                                  key_by_path=True)

    val_IC = used_func(y=val_data[1].values, y_pred=y_pred_val, w=None)

    # TODO - 样本外预测
    y_pred_test = model_predict(X=test_data[0],
                                 program=program,
                                 neu_keys=test_data[2],
                                 keys=test_data[3],
                                 by_name=True,
                                 key_by_path=True)

    used_func = def_keyfit_metric(keys=test_data[3],
                                  method='weighted_rank_ic',
                                  drop_first_n_season=drop_first_n_season,
                                  neu_keys=test_data[2],
                                  filter_season_keys=test_filter_season_keys,
                                  train_direction=train_ic_direction,
                                  is_cap_neu=True,
                                  is_ci1_neu=False,
                                  is_neu_y=True,
                                  factor_cover_rate=factor_cover_rate,
                                  filter_duplicate_rate=filter_duplicate_rate,
                                  verbose=True,
                                  key_by_path=True)

    test_IC = used_func(y=test_data[1].values, y_pred=y_pred_test, w=None)

    gp_logger.logger.info('\nprogram: %s\ntrain IC: %.4f\nvalidation weight IC: %.4f\ntest weight IC: %.4f\n' %
                          (est_gp_program_print, train_IC, val_IC, test_IC))

    # TODO - 综合两段预测效果以及附加条件
    num_X = sum([1.0 if isinstance(f, str) else 0 for f in est_gp_program])
    num_func = sum([1.0 if not isinstance(f, (str, int, float)) else 0 for f in est_gp_program])

    # TODO - 选择条件
    ic_cond = [
        train_IC > ic_threshold,
        val_IC > ic_threshold,
    ]

    is_save_model = (sum(ic_cond) >= 2) and \
                    (test_IC > ic_threshold) and \
                    (num_X >= 2 or num_func >= 2) and \
                    (program_depth >= 2)

    # TODO - 删掉keys交互文件
    to_del_filepath_list = [
        train_data[2],
        train_data[3],
        val_data[2],
        val_data[3],
        test_data[2],
        test_data[3],
    ]

    delete_file_list(to_del_filepath_list)

    # TODO - 保存模型
    if is_save_model:
        # TODO - 输出文件
        dot_data = est_gp._program.export_graphviz()
        graph = graphviz.Source(dot_data)

        id = 1
        while True:
            save_filename = label + '_v%s' % id
            if not os.path.exists(os.path.join(records_dir, save_filename + '.pdf')):
                break

            id += 1

        graph.render(filename=save_filename,
                     directory=records_dir,
                     cleanup=True)

        # TODO - 保存模型
        model_save_path = os.path.join(save_model_dir, '%s.pkl' % (save_filename))

        save_gpmodel_v2(program=est_gp_program,
                        base=12,
                        path=model_save_path)

        # TODO - 保存记录数据
        train_start_date = kwargs['train_start_date']
        train_end_date = kwargs['train_end_date']
        val_start_date = kwargs['val_start_date']
        val_end_date = kwargs['val_end_date']
        test_start_date = kwargs['test_start_date']
        test_end_date = kwargs['test_end_date']

        # read_info = 'program: %s\ntrain IC: %.4f\ntest 1 weight IC: %.4f\ntest 2 weight IC: %.4f' % \
        #             (est_gp_program_print, train_IC, test_IC, test2_IC)
        read_info = 'program(base:%s): %s\n' \
                    'train IC: %.4f (%s to %s)\n' \
                    'validation weight IC: %.4f (%s to %s)\n' \
                    'test weight IC: %.4f (%s to %s)\n' % \
                    (12, est_gp_program_print,
                     train_IC, train_start_date, train_end_date,
                     val_IC, val_start_date, val_end_date,
                     test_IC, test_start_date, test_end_date)

        model_result_save_path = os.path.join(records_dir, '%s_result.txt' % (save_filename))

        save_txt_file(model_result_save_path, read_info)

        print('program:{} saved!'.format(est_gp_program_print))
        print(save_filename, 'done!')
        gp_logger.logger.info('{} saved!'.format(est_gp_program_print))
    else:
        print('program:{} failed to save!'.format(est_gp_program_print))
        # print('{} failed to save!'.format(est_gp_program_print))
        gp_logger.logger.warn('{} failed to save!'.format(est_gp_program_print))


def main(**kwargs):
    # TODO - 引入随机性（否则多次运行会出现重复结果）
    np.random.seed(None)

    # TODO - 设置训练集日期
    train_start_date = '2008-01-01'
    train_end_date = '2014-12-31'

    # TODO - 设置用于选因子的样本外数据日期
    val_start_date = '2016-01-01'
    val_end_date = '2017-12-31'

    test_start_date = '2018-01-01'
    test_end_date = '2019-12-31'

    print('train set from %s to %s' % (train_start_date, train_end_date))

    # TODO - 获取需要过滤的GP公式
    try:
        get_existing_factor_list(factor_type = factors_label,
                                 model_dir=save_model_dir)

        filter_population_path = os.path.join(existing_factor_record_dir, factors_label + '_existing.pkl')
        filter_population = loadFile(filter_population_path)

        print('there are %s factors to filter...' % (len(filter_population)))
    except:
        filter_population = []
        print('there are no factors to filter...')
    # TODO - 设置参数
    params = {
        'n_jobs': 1,
        'population_size': 1000,
        # 'population_size': 500,
        # 'generations': 3,
        'generations': 2,
        'tournament_size': 40,
        'init_depth': (2, 4),
        # 'init_method': 'full',
        'init_method': 'half and half',
        'parsimony_coefficient': 0.0001,
        'p_crossover': 0.6,
        'p_subtree_mutation': 0.1,
        'p_hoist_mutation': 0.01,
        'p_point_mutation': 0.1,
        'p_point_replace': 0.4,
        'factor_cover_rate': 0.5,
        'filter_duplicate_rate': 0.1,
        'label': factors_label,
        'filter_population': filter_population,
        # TODO - add date
        'train_start_date': train_start_date,
        'train_end_date': train_end_date,
        'val_start_date': val_start_date,
        'val_end_date': val_end_date,
        'test_start_date': test_start_date,
        'test_end_date': test_end_date,
        # 'multiprocess_backend': 'multiprocessing',
    }

    # run_v1(**params)
    try:
        run_v1(**params)
    except Exception as e:
        print('financial factors mining exception, info is', e)
        gp_logger.logger.error('financial factors mining exception, info is {}'.format(e))

if __name__ == '__main__':
    run_number = 2

    for id in range(run_number):
        main()
        print(id, 'done!\n\n')

