#!/usr/bin/python
# -*-coding:utf-8-*-
import os
import pandas as pd
# import datetime
import gc
from time import sleep
from datetime import datetime
import _pickle as cPickle

from dir_info import cache_data_dir

from zg_factor_analysis_module.pipeline_template.zbc_factor_analysis_pipeline import FactorAnalysisPipeline
from zg_factor_analysis_module.pipeline_template.zbc_factor_analysis_pipeline_v2 import FactorAnalysisPipelineV2

from zg_factor_analysis_module.time_schedule import multiprocess_wrap_by_signal


def writeToFile(file, store_path, mode='wb'):
    with open(store_path, mode) as f:
        cPickle.dump(file, f)

def factor_analysis_func_v1(**kwargs):
    factor_name_list = kwargs['factor_name_list']

    if 'id' in kwargs.keys():
        id = kwargs['id']
    else:
        id = None

    #########################################
    # 参数
    start_date = '2010-01-01'
    end_date = '2019-05-31'

    group_method = 'cap'

    weight_method = 'cir_cap'

    rebalance_type = 'startegy'
    # rebalance_type = 'fixed'
    rebalance_periods = 21

    # universe_pool = 'all'
    universe_pool = '000905'

    benchmark_index_code = '000905'

    # group_quantile = 10
    group_quantile = 5

    industry_type = 'sw1'

    liquidity_filter = None
    # liquidity_filter = 'money'

    liquidity_filter_period = 21

    kwargs = dict(
        fillna=False,
        winsorize=False,
        normalization=False,
        drop_duplicates=True,
    )

    # 初始化API
    analysis_api = FactorAnalysisPipeline(start_date=start_date,
                                          end_date=end_date,
                                          rebalance_type=rebalance_type,
                                          rebalance_periods=rebalance_periods,
                                          universe_pool=universe_pool,
                                          liquidity_filter=liquidity_filter,
                                          liquidity_filter_period=liquidity_filter_period,
                                          benchmark_index_code=benchmark_index_code,
                                          group_quantile=group_quantile,
                                          group_method=group_method,
                                          weight_method=weight_method,
                                          industry_type=industry_type,
                                          # factor_database='public',
                                          factor_database='validation',
                                          is_plot_quantile_abs_return=False,
                                          is_save_quantile_detail=False,
                                          is_do_regress_analysis=False)

    # summary_result_prefix = 'all_ths_factor_%d' % (id)
    dt = datetime.now().strftime('%Y%m%d%H%S%M')

    if id is None:
        summary_result_prefix = 'all_ths_factor_%s' % (dt)
    else:
        summary_result_prefix = 'all_ths_factor_id%s_%s' % (id, dt)

    # TODO -
    analysis_name = 'ths_pool_%s_bench_%s_%s_%s_%dg_group_%s_weight_%s_%s_%dd_liquidity_%s_%dd' % (universe_pool,
                                                                                                           benchmark_index_code,
                                                                                                           pd.to_datetime(start_date).strftime('%Y%m%d'),
                                                                                                           pd.to_datetime(end_date).strftime('%Y%m%d'),
                                                                                                           group_quantile,
                                                                                                           group_method,
                                                                                                           weight_method,
                                                                                                           rebalance_type,
                                                                                                           rebalance_periods,
                                                                                                           liquidity_filter,
                                                                                                           liquidity_filter_period)

    summary_result_save_dir = os.path.join('/db/zg_data/zbc/factor_analysis/ths_jh_fund_project', analysis_name)
    single_factor_analysis_result_cache_dir = os.path.join(summary_result_save_dir, 'single_factor_result')
    single_factor_plot_save_dir = os.path.join(single_factor_analysis_result_cache_dir, 'plot')


    if not os.path.exists(summary_result_save_dir):
        print('analysis', analysis_name)

        os.makedirs(summary_result_save_dir)

    if not os.path.exists(single_factor_analysis_result_cache_dir):
        os.makedirs(single_factor_analysis_result_cache_dir)

    if not os.path.exists(single_factor_plot_save_dir):
        os.makedirs(single_factor_plot_save_dir)

    # TODO - start to analyze
    analysis_api.factor_list_analysis_main(factor_name_list=factor_name_list,
                                           summary_result_filename_prefix=summary_result_prefix,
                                           summary_result_save_dir=summary_result_save_dir,
                                           single_factor_analysis_result_cache_dir=single_factor_analysis_result_cache_dir,
                                           single_factor_plot_save_dir=single_factor_plot_save_dir,
                                           **kwargs)

    del analysis_api
    gc.collect()

    sleep(10)
    print(id, 'done!\n\n')

def factor_analysis_func_v2(**kwargs):
    '''一些参数通过参数形式传入'''
    factor_name_list = kwargs['factor_name_list']

    if 'id' in kwargs.keys():
        id = kwargs['id']
    else:
        id = None

    #########################################
    # 参数
    start_date = kwargs['start_date']
    end_date = kwargs['end_date']
    group_method = kwargs['group_method']
    weight_method = kwargs['weight_method']
    rebalance_type = kwargs['rebalance_type']
    rebalance_periods = kwargs['rebalance_periods']

    # universe_pool = 'all'
    universe_pool = kwargs['universe_pool']

    benchmark_index_code = kwargs['benchmark_index_code']
    group_quantile = kwargs['group_quantile']

    if 'industry_type' in kwargs.keys():
        industry_type = kwargs['industry_type']
    else:
        industry_type = 'sw1'

    if 'liquidity_filter' in kwargs.keys():
        liquidity_filter = kwargs['liquidity_filter']
    else:
        liquidity_filter = None
    # liquidity_filter = 'money'

    if 'liquidity_filter_period' in kwargs.keys():
        liquidity_filter_period = kwargs['liquidity_filter_period']
    else:
        liquidity_filter_period = 21

    #####
    if 'factor_database' in kwargs.keys():
        factor_database = kwargs['factor_database']
    else:
        factor_database = 'public'

    if 'fillna' in kwargs.keys():
        fillna = kwargs['fillna']
    else:
        fillna = False

    if 'drop_duplicates' in kwargs.keys():
        drop_duplicates = kwargs['drop_duplicates']
    else:
        drop_duplicates=True

    if 'normalization' in kwargs.keys():
        normalization = kwargs['normalization']
    else:
        normalization=False

    if 'winsorize' in kwargs.keys():
        winsorize = kwargs['winsorize']
    else:
        winsorize=False

    data_process_params = dict(
        fillna=fillna,
        winsorize=winsorize,
        normalization=normalization,
        drop_duplicates=drop_duplicates,
    )

    # 初始化API
    analysis_api = FactorAnalysisPipeline(start_date=start_date,
                                          end_date=end_date,
                                          rebalance_type=rebalance_type,
                                          rebalance_periods=rebalance_periods,
                                          universe_pool=universe_pool,
                                          liquidity_filter=liquidity_filter,
                                          liquidity_filter_period=liquidity_filter_period,
                                          benchmark_index_code=benchmark_index_code,
                                          group_quantile=group_quantile,
                                          group_method=group_method,
                                          weight_method=weight_method,
                                          industry_type=industry_type,
                                          factor_database=factor_database,
                                          is_plot_quantile_abs_return=False,
                                          is_save_quantile_detail=False,
                                          is_do_regress_analysis=False)

    # summary_result_prefix = 'all_ths_factor_%d' % (id)
    dt = datetime.now().strftime('%Y%m%d%H%S%M')

    if id is None:
        summary_result_prefix = 'all_ths_factor_%s' % (dt)
    else:
        summary_result_prefix = 'all_ths_factor_id%s_%s' % (id, dt)

    # TODO -
    if type(universe_pool) == str:
        analysis_name = 'ths_pool_%s_bench_%s_%s_%s_%dg_group_%s_weight_%s_%s_%dd_liquidity_%s_%dd' % (universe_pool,
                                                                                                                    benchmark_index_code,
                                                                                                                    pd.to_datetime(start_date).strftime('%Y%m%d'),
                                                                                                                    pd.to_datetime(end_date).strftime('%Y%m%d'),
                                                                                                                    group_quantile,
                                                                                                                    group_method,
                                                                                                                    weight_method,
                                                                                                                    rebalance_type,
                                                                                                                    rebalance_periods,
                                                                                                                    liquidity_filter,
                                                                                                                    liquidity_filter_period)
    else:
        analysis_name = 'ths_self_def_pool_bench_%s_%s_%s_%dg_group_%s_weight_%s_%s_%dd_liquidity_%s_%dd' % (benchmark_index_code,
                                                                                                                          pd.to_datetime(start_date).strftime('%Y%m%d'),
                                                                                                                          pd.to_datetime(end_date).strftime('%Y%m%d'),
                                                                                                                          group_quantile,
                                                                                                                          group_method,
                                                                                                                          weight_method,
                                                                                                                          rebalance_type,
                                                                                                                          rebalance_periods,
                                                                                                                          liquidity_filter,
                                                                                                                          liquidity_filter_period)

    if 'summary_result_save_dir' in kwargs.keys():
        summary_result_save_dir = kwargs['summary_result_save_dir']
        summary_result_save_dir = os.path.join(summary_result_save_dir, analysis_name)
    else:
        summary_result_save_dir = os.path.join('/db/zg_data/zbc/factor_analysis/public', analysis_name)

    single_factor_analysis_result_cache_dir = os.path.join(summary_result_save_dir, 'single_factor_result')
    single_factor_plot_save_dir = os.path.join(single_factor_analysis_result_cache_dir, 'plot')

    if not os.path.exists(summary_result_save_dir):
        print('analysis', analysis_name)

        os.makedirs(summary_result_save_dir)

    if not os.path.exists(single_factor_analysis_result_cache_dir):
        os.makedirs(single_factor_analysis_result_cache_dir)

    if not os.path.exists(single_factor_plot_save_dir):
        os.makedirs(single_factor_plot_save_dir)

    # TODO - start to analyze
    analysis_api.factor_list_analysis_main(factor_name_list=factor_name_list,
                                           summary_result_filename_prefix=summary_result_prefix,
                                           summary_result_save_dir=summary_result_save_dir,
                                           single_factor_analysis_result_cache_dir=single_factor_analysis_result_cache_dir,
                                           single_factor_plot_save_dir=single_factor_plot_save_dir,
                                           **data_process_params)

    del analysis_api
    gc.collect()

    sleep(10)
    print(id, 'done!\n\n')

def factor_analysis_func_v3(**kwargs):
    '''一些参数通过参数形式传入'''
    factor_name_list = kwargs['factor_name_list']

    if 'id' in kwargs.keys():
        id = kwargs['id']
    else:
        id = None

    #########################################
    # 参数
    start_date = kwargs['start_date']
    end_date = kwargs['end_date']
    group_method = kwargs['group_method']
    weight_method = kwargs['weight_method']
    rebalance_type = kwargs['rebalance_type']
    rebalance_periods = kwargs['rebalance_periods']

    # universe_pool = 'all'
    universe_pool = kwargs['universe_pool']

    benchmark_index_code = kwargs['benchmark_index_code']
    group_quantile = kwargs['group_quantile']

    if 'industry_type' in kwargs.keys():
        industry_type = kwargs['industry_type']
    else:
        industry_type = 'sw1'

    if 'liquidity_filter' in kwargs.keys():
        liquidity_filter = kwargs['liquidity_filter']
    else:
        liquidity_filter = None
    # liquidity_filter = 'money'

    if 'liquidity_filter_period' in kwargs.keys():
        liquidity_filter_period = kwargs['liquidity_filter_period']
    else:
        liquidity_filter_period = 21

    #####
    if 'factor_database' in kwargs.keys():
        factor_database = kwargs['factor_database']
    else:
        factor_database = 'public'

    if 'fillna' in kwargs.keys():
        fillna = kwargs['fillna']
    else:
        fillna = False

    if 'drop_duplicates' in kwargs.keys():
        drop_duplicates = kwargs['drop_duplicates']
    else:
        drop_duplicates=True

    if 'normalization' in kwargs.keys():
        normalization = kwargs['normalization']
    else:
        normalization=False

    if 'winsorize' in kwargs.keys():
        winsorize = kwargs['winsorize']
    else:
        winsorize=False

    if 'is_save_position_data' in kwargs.keys():
        is_save_position_data = kwargs['is_save_position_data']
    else:
        is_save_position_data = False

    data_process_params = dict(
        fillna=fillna,
        winsorize=winsorize,
        normalization=normalization,
        drop_duplicates=drop_duplicates,
        is_save_position_data = is_save_position_data,
    )

    # 初始化API
    analysis_api = FactorAnalysisPipelineV2(start_date=start_date,
                                            end_date=end_date,
                                            rebalance_type=rebalance_type,
                                            rebalance_periods=rebalance_periods,
                                            universe_pool=universe_pool,
                                            liquidity_filter=liquidity_filter,
                                            liquidity_filter_period=liquidity_filter_period,
                                            benchmark_index_code=benchmark_index_code,
                                            group_quantile=group_quantile,
                                            group_method=group_method,
                                            weight_method=weight_method,
                                            industry_type=industry_type,
                                            factor_database=factor_database,
                                            is_plot_quantile_abs_return=False,
                                            is_save_quantile_detail=False,
                                            is_do_regress_analysis=False)

    # summary_result_prefix = 'all_ths_factor_%d' % (id)
    dt = datetime.now().strftime('%Y%m%d%H%S%M')

    if id is None:
        summary_result_prefix = 'all_ths_factor_%s' % (dt)
    else:
        summary_result_prefix = 'all_ths_factor_id%s_%s' % (id, dt)

    # TODO -
    if type(universe_pool) == str:
        analysis_name = 'ths_pool_%s_bench_%s_%s_%s_%dg_group_%s_weight_%s_%s_%dd_liquidity_%s_%dd' % (universe_pool,
                                                                                                                    benchmark_index_code,
                                                                                                                    pd.to_datetime(start_date).strftime('%Y%m%d'),
                                                                                                                    pd.to_datetime(end_date).strftime('%Y%m%d'),
                                                                                                                    group_quantile,
                                                                                                                    group_method,
                                                                                                                    weight_method,
                                                                                                                    rebalance_type,
                                                                                                                    rebalance_periods,
                                                                                                                    liquidity_filter,
                                                                                                                    liquidity_filter_period)
    else:
        analysis_name = 'ths_self_def_pool_bench_%s_%s_%s_%dg_group_%s_weight_%s_%s_%dd_liquidity_%s_%dd' % (benchmark_index_code,
                                                                                                                          pd.to_datetime(start_date).strftime('%Y%m%d'),
                                                                                                                          pd.to_datetime(end_date).strftime('%Y%m%d'),
                                                                                                                          group_quantile,
                                                                                                                          group_method,
                                                                                                                          weight_method,
                                                                                                                          rebalance_type,
                                                                                                                          rebalance_periods,
                                                                                                                          liquidity_filter,
                                                                                                                          liquidity_filter_period)

    if 'summary_result_save_dir' in kwargs.keys():
        summary_result_save_dir = kwargs['summary_result_save_dir']
        summary_result_save_dir = os.path.join(summary_result_save_dir, analysis_name)
    else:
        summary_result_save_dir = os.path.join('/db/zg_data/zbc/factor_analysis/public', analysis_name)

    single_factor_analysis_result_cache_dir = os.path.join(summary_result_save_dir, 'single_factor_result')
    single_factor_plot_save_dir = os.path.join(single_factor_analysis_result_cache_dir, 'plot')

    if not os.path.exists(summary_result_save_dir):
        print('analysis', analysis_name)

        os.makedirs(summary_result_save_dir)

    if not os.path.exists(single_factor_analysis_result_cache_dir):
        os.makedirs(single_factor_analysis_result_cache_dir)

    if not os.path.exists(single_factor_plot_save_dir):
        os.makedirs(single_factor_plot_save_dir)

    # TODO - start to analyze
    analysis_api.factor_list_analysis_main(factor_name_list=factor_name_list,
                                           summary_result_filename_prefix=summary_result_prefix,
                                           summary_result_save_dir=summary_result_save_dir,
                                           single_factor_analysis_result_cache_dir=single_factor_analysis_result_cache_dir,
                                           single_factor_plot_save_dir=single_factor_plot_save_dir,
                                           **data_process_params)

    del analysis_api
    gc.collect()

    sleep(10)
    print(id, 'done!\n\n')


# 最新
def multi_process_by_signal(factor_name_list, batch=20, start_id=0, **kwargs):
    # factor_name_list = ths_factor_name_list

    # TODO - 按照batch来分配计划
    total_factor_num = len(factor_name_list)
    n = total_factor_num // batch

    run_func_dict = {}
    config = {}
    signal_ids_list = []
    for i in range(n + 1):
        if i * batch >= total_factor_num:
            continue

        if i == n:
            sid = i * batch
            eid = (i + 1) * batch
        else:
            sid = i * batch
            eid = (i + 1) * batch

        run_func_dict[str(start_id+i)] = factor_analysis_func_v2
        config[str(start_id+i)] = {
            'factor_name_list': factor_name_list[sid:eid],
            'id': start_id+i,
        }

        for k in kwargs:
            config[str(start_id + i)][k] = kwargs[k]

        signal_ids_list.append(start_id+i)

    # TODO - 生成计划表
    now_dt = datetime.now().strftime('%Y%m%d%H%M%S')
    signal_filename = 'zg02_all_pool_factor_analysis_schedule_' + now_dt

    run_schedule = {i: False if i > start_id else True for i in range(start_id, start_id+n+5)}

    run_schedule_file_path = os.path.join(cache_data_dir, signal_filename + '.pkl')

    writeToFile(run_schedule, run_schedule_file_path)

    interaction_file_path = run_schedule_file_path
    ###

    ###
    multiprocess_wrap_by_signal(run_func_dict,
                                interaction_file_path,
                                signal_ids_list,
                                is_sleep=True,
                                verbose=False,
                                **config)

def multi_process_by_signal_v2(factor_name_list, batch=20, start_id=0, **kwargs):
    # factor_name_list = ths_factor_name_list

    # TODO - 按照batch来分配计划
    total_factor_num = len(factor_name_list)
    n = total_factor_num // batch

    run_func_dict = {}
    config = {}
    signal_ids_list = []
    for i in range(n + 1):
        if i * batch >= total_factor_num:
            continue

        if i == n:
            sid = i * batch
            eid = (i + 1) * batch
        else:
            sid = i * batch
            eid = (i + 1) * batch

        run_func_dict[str(start_id+i)] = factor_analysis_func_v3

        config[str(start_id+i)] = {
            'factor_name_list': factor_name_list[sid:eid],
            'id': start_id+i,
        }

        for k in kwargs:
            config[str(start_id + i)][k] = kwargs[k]

        signal_ids_list.append(start_id+i)

    # TODO - 生成计划表
    now_dt = datetime.now().strftime('%Y%m%d%H%M%S')
    signal_filename = 'zg02_all_pool_factor_analysis_schedule_' + now_dt

    run_schedule = {i: False if i > start_id else True for i in range(start_id, start_id+n+5)}

    run_schedule_file_path = os.path.join(cache_data_dir, signal_filename + '.pkl')

    writeToFile(run_schedule, run_schedule_file_path)

    interaction_file_path = run_schedule_file_path
    ###

    ###
    multiprocess_wrap_by_signal(run_func_dict,
                                interaction_file_path,
                                signal_ids_list,
                                is_sleep=True,
                                verbose=False,
                                **config)

if __name__ == '__main__':
    from dir_info import external_data_dir
    from shared_factor_lib_management.config import ths_jh_shared_factor_lib_info_filename

    # 公用参数
    factor_test_config = {
        'start_date':  '2010-01-01',
        'end_date' : '2019-05-31',
        'group_method' : 'cap',
        'weight_method' : 'cir_cap',
        'rebalance_type' : 'startegy',
        'rebalance_periods' : 21,
        'universe_pool' : '000905',
        'benchmark_index_code' : '000905',
        'group_quantile' : 5,
        'industry_type' : 'sw1',
        'liquidity_filter' : None,
        'liquidity_filter_period' : 21,
        'summary_result_save_dir' : '/db/zg_data/zbc/factor_analysis/ths_jh_fund_project',
    }
    ###

    start_datetime = '2019-07-08 16:35:00'
    start_id = 0

    ths_jh_shared_factor_lib_info = pd.read_excel(os.path.join(external_data_dir, ths_jh_shared_factor_lib_info_filename + '.xlsx'))
    ths_jh_shared_factor_lib_info = ths_jh_shared_factor_lib_info['factor_name'].tolist()

    ths_factor_name_list = pd.read_excel('/db/zg_data/zbc/factor_analysis/selected_ths_factor_name_df.xlsx')
    ths_factor_name_list = ths_factor_name_list['factor_name'].tolist()
    ths_factor_name_list = sorted(ths_factor_name_list)

    ths_factor_name_list = list(set(ths_factor_name_list).difference(ths_jh_shared_factor_lib_info))

    selected_type = ['hf']
    ths_factor_name_list = [fn for fn in ths_factor_name_list if fn.split('_')[0] in selected_type]

    # total_num = len(ths_factor_name_list) // 2
    total_num = 20

    ths_factor_name_list = ths_factor_name_list[:total_num]

    multi_process_by_signal(factor_name_list=ths_factor_name_list,
                            batch=20,
                            start_id=0,
                            **factor_test_config)


