#!/usr/bin/python
# -*-coding:utf-8-*-
import os
import pandas as pd
import numpy as np
import gc
from time import time

from base.trade_date_data_generation import get_every_fixed_frequency_trade_date

from zg02_factor_lib.base.factors_library_base import NewFactorLib as DataReader


period_corr_data_save_dir = './zbc_gplearn_factor_mining/corr_analysis'

if not os.path.exists(period_corr_data_save_dir):
    os.makedirs(period_corr_data_save_dir)

## TODO - 设置
factor_type = 'gp_ub'

gp_factors_db = 'gp_factors/barra_stripping'

get_trade_start_date = '2016-01-01'
start_date = '2017-01-01'
end_date = '2020-03-31'

min_overlap_pct = 0.5

period_corr_data_save_filename = 'zg02_%s_factor_period_corr_data_test' % (factor_type)
mean_corr_data_save_filename = 'zg02_%s_factor_mean_corr_data_test' % (factor_type)


# TODO - 分时期参数
period_1_start_date = start_date
period_1_end_date = '2018-12-31'

period_2_start_date = '2019-01-01'
period_2_end_date = end_date


''''''
ths_data_reader = DataReader(db=gp_factors_db)

ths_to_analyze_factor_list = ths_data_reader.show_factor_library_db()

ths_to_analyze_factor_list = [fn.split('.h5')[0] for fn in ths_to_analyze_factor_list if '.h5' in fn]

# TODO - filtered
ths_to_analyze_factor_list = list(filter(lambda sc: 'zg02_ths_user_hehavior_factor_mining_v2' in sc, ths_to_analyze_factor_list))


#########################################################################################
def get_cross_sectional_corr(df, method='pearson', min_pct=0.5):
    df = df.copy()

    total_num = df.shape[0]

    corr_mat = df.corr(method, min_periods=int(total_num*min_pct))

    return corr_mat

def get_factor_corr(factor_data, method='pearson', min_pct=0.5):
    factor_data = factor_data.copy()

    corr_df = factor_data.groupby('date').apply(get_cross_sectional_corr,
                                                method=method,
                                                min_pct=min_pct)
    # corr_df = factor_data.groupby('date').corr('pearson')

    corr_df.index.names = ['date', 'factor_name']

    return corr_df

def get_mean_corr_data(corr_data, by='factor_name'):
    mean_corr_df = corr_data.groupby(by).mean()

    return mean_corr_df

# 读取数据和拼接
def read_concat_data(factor_name_list,
                     start_date=None,
                     end_date=None,
                     filter_date=None,
                     filter_csi500_pool=True):
    filter_list = None

    concat_factor_data = None

    print(start_date, 'to', end_date, 'read and concat data stars...')
    for factor_name in factor_name_list:
        try:
            factor_data = ths_data_reader.read_factor_table(factor_name,
                                                            filter_list=filter_list)

            factor_data[factor_name] = factor_data[factor_name].astype(np.float32)

            factor_data = factor_data[(factor_data['date'] >= start_date) &
                                      (factor_data['date'] <= end_date)]

            # TODO - 去重，如果存在NAN，则将值排在最后，在去重复值的时候，取first
            factor_data = factor_data.sort_values(['stock_code', 'date', factor_name], ascending=True)

            factor_data = factor_data.drop_duplicates(['date', 'stock_code'], keep='first')

            if filter_date is not None:
                factor_data = factor_data[factor_data['date'].isin(filter_date)]

            factor_data = factor_data.set_index(['date', 'stock_code'])
        except Exception as e:
            print(factor_name, 'exception, info is', e)
            continue

        if concat_factor_data is None:
            concat_factor_data = factor_data.copy()
        else:
            concat_factor_data = pd.concat([concat_factor_data, factor_data], axis=1)

        print(factor_name, 'done!')

    if filter_csi500_pool:
        processed_new_stock_label_sw1_data = \
            ths_data_reader.read_basic_data_table('processed_new_stock_label_ci1_data')

        processed_new_stock_label_sw1_data = \
            processed_new_stock_label_sw1_data.drop_duplicates(['date', 'stock_code'])

        processed_new_stock_label_sw1_data = \
            processed_new_stock_label_sw1_data.set_index(['date', 'stock_code'])

        processed_new_stock_label_sw1_data = processed_new_stock_label_sw1_data[['is_csi500']]

        concat_factor_data['is_csi500'] = \
            processed_new_stock_label_sw1_data.loc[concat_factor_data.index, 'is_csi500'].values

        concat_factor_data['is_csi500'] = concat_factor_data['is_csi500'].fillna(0.0)

        # TODO - 选择成分股
        concat_factor_data = concat_factor_data[concat_factor_data['is_csi500'] == 1.0]

        concat_factor_data.drop('is_csi500', axis=1, inplace=True)

    print(start_date, 'to', end_date, 'read and concat data done!')

    return concat_factor_data
#########################################################################################


# TODO - analysis
'''参数'''

date_period = [
    [start_date, end_date],
]

analysis_date_list = get_every_fixed_frequency_trade_date(start_date=get_trade_start_date,
                                                          end_date=end_date,
                                                          freq=5)

analysis_date_list = analysis_date_list['rebalance_start']

analysis_date_list = analysis_date_list[(analysis_date_list >= start_date) &
                                        (analysis_date_list <= end_date)]

analysis_date_list = analysis_date_list.tolist()

period_corr_data = None
for i in range(len(date_period)):
    tst = time()
    start, end = date_period[i]

    # TODO - read and concat data
    st = time()
    concat_factor_data = read_concat_data(factor_name_list=ths_to_analyze_factor_list,
                                          start_date=start,
                                          end_date=end,
                                          filter_date=analysis_date_list,
                                          filter_csi500_pool=False)
    et = time()
    print('read and concat data spent time is %.5f sec.' % (et-st))

    if period_corr_data is None:
        period_corr_data = get_factor_corr(concat_factor_data, min_pct=min_overlap_pct)
    else:
        period_corr_data = pd.concat([period_corr_data, get_factor_corr(concat_factor_data)], axis=0)

    del concat_factor_data
    gc.collect()

    tet = time()
    print(i, 'done, time spent is %.5f sec.\n' % (tet-tst))

period_corr_data.to_hdf(os.path.join(period_corr_data_save_dir, period_corr_data_save_filename + '.h5'),
                        key=period_corr_data_save_filename)

period_corr_data = pd.read_hdf(os.path.join(period_corr_data_save_dir, period_corr_data_save_filename + '.h5'))

# period_corr_data = pd.read_hdf(os.path.join(period_corr_data_save_dir, period_corr_data_save_filename + '.h5'))


# TODO - 获取均值
index_names = period_corr_data.index.names
period_1_corr_data = period_corr_data.reset_index()
period_1_corr_data = period_1_corr_data[(period_1_corr_data['date'] >= period_1_start_date) &
                                        (period_1_corr_data['date'] <= period_1_end_date)]

period_1_corr_data = period_1_corr_data.set_index(index_names)

period_2_corr_data = period_corr_data.reset_index()
period_2_corr_data = period_2_corr_data[(period_2_corr_data['date'] >= period_2_start_date) &
                                        (period_2_corr_data['date'] <= period_2_end_date)]

period_2_corr_data = period_2_corr_data.set_index(index_names)

mean_period_corr_data = get_mean_corr_data(period_corr_data)

mean_period_1_corr_data = get_mean_corr_data(period_1_corr_data)
mean_period_2_corr_data = get_mean_corr_data(period_2_corr_data)

mean_period_1_corr_data = mean_period_1_corr_data.loc[ths_to_analyze_factor_list, ths_to_analyze_factor_list]
mean_period_2_corr_data = mean_period_2_corr_data.loc[ths_to_analyze_factor_list, ths_to_analyze_factor_list]
mean_period_corr_data = mean_period_corr_data.loc[ths_to_analyze_factor_list, ths_to_analyze_factor_list]

mean_period_corr_data.to_hdf(os.path.join(period_corr_data_save_dir, mean_corr_data_save_filename + '_whole_period.h5'),
                        key=mean_corr_data_save_filename+'_whole_period')

mean_period_corr_data.to_excel(os.path.join(period_corr_data_save_dir, mean_corr_data_save_filename + '.xlsx'))
mean_period_1_corr_data.to_excel(os.path.join(period_corr_data_save_dir, mean_corr_data_save_filename + '_period_1.xlsx'))
mean_period_2_corr_data.to_excel(os.path.join(period_corr_data_save_dir, mean_corr_data_save_filename + '_period_2.xlsx'))

print(mean_period_corr_data)




