#!/usr/bin/python
# -*-coding:utf-8-*-
import os
import pandas as pd
import numpy as np

from zg_data_process.zg_data_process import DataProcess

data_processor = DataProcess()

cache_factor_dir = '/db/zg_data/zbc/strategy_experiment/mutual_fund_strategies/linear_dynamic_style_alloc/cache'


def process_and_cache_factor_data_v2(concat_factor_data,
                                      factor_name_list):
    concat_factor_data = concat_factor_data.copy()

    # 1. 去掉delist_date为nan的
    concat_factor_data = concat_factor_data[~concat_factor_data['delist_date'].isnull()]
    print('concat factor data drop delist date nan done!')

    ## TODO - 因子基本处理
    # 选取中证500成分股中的数据
    concat_factor_data = data_processor.get_csi500_component_data(df=concat_factor_data,
                                                                  drop_label=True)

    # 2. 缺失值处理
    concat_factor_data = data_processor.filled_with_sw1_stats_value(df=concat_factor_data,
                                                                    columns=factor_name_list,
                                                                    q=0.5)
    print('concat factor data sw1 fill nan done!')

    # 3. 去极值
    concat_factor_data = data_processor.cs_mad_outlier_process(df=concat_factor_data,
                                                               columns=factor_name_list,
                                                               drop=False,
                                                               copy=True,
                                                               verbose=False)
    print('concat factor data mad outlier processed!')

    # 4. 标准化
    concat_factor_data = data_processor.cs_z_score_normalization_process(df=concat_factor_data,
                                                                        columns=factor_name_list,
                                                                        copy=True,
                                                                        verbose=False)
    print('concat factor data normalized!')
    # concat_factor_data = concat_factor_data.reset_index()

    # 5. 行业和市值中性化
    concat_factor_data = data_processor.cs_cap_sw1_ind_neutral_process(df=concat_factor_data,
                                                                       factor_columns=factor_name_list)

    # TODO - 去重
    # 合并数据 - 主要是在去掉申万一级行业标签导致的重复数据
    concat_factor_data = data_processor.duplicates_merge(df=concat_factor_data,
                                                         columns=factor_name_list,
                                                         rule='mean')

    # concat_factor_data = concat_factor_data.drop_duplicates(['stock_code', 'date'], keep='last')
    concat_factor_data = concat_factor_data[['stock_code', 'date'] + factor_name_list]

    return concat_factor_data


update_data = False

# 因子处理
func_columns = [
    'stock_code',
    'date',
    'board',
    'down_limit',
    'down_one_line',
    'is_csi300',
    'is_csi500',
    'is_sh50',
    'paused',
    'sw1_code',
    'sw1_name',
    'up_limit',
    'up_one_line',
    'is_st',
    'stock_name',
    'list_date',
    'delist_date',
    'list_day',
    'is_new',
    'is_sub_new',
    'scale_total_market_size',
]

if update_data:
    id = 0

    try:
        concat_factor_data = pd.read_hdf(os.path.join(cache_factor_dir, 'concat_and_label_selected_style_factor_data_%d.h5' % (id)))
    except Exception as e:
        print('style factor process - id - %d exception, info:' % (id), e)
        raise Exception

    factor_name_list = list(concat_factor_data.columns.difference(func_columns))

    uni_date = pd.DatetimeIndex(concat_factor_data['date'].unique())

    selected_date = uni_date[np.sort(np.random.choice(uni_date.shape[0], 50, replace=False))]

    selected_concat_factor_data = concat_factor_data[concat_factor_data['date'].isin(selected_date)]

    selected_concat_factor_data.to_hdf('/db/zg_data/zbc/buffer/selected_concat_factor_data.h5', key='selected_concat_factor_data')
else:
    selected_concat_factor_data = pd.read_hdf('/db/zg_data/zbc/buffer/selected_concat_factor_data.h5')

factor_name_list = list(selected_concat_factor_data.columns.difference(func_columns))
# concat_factor_data = concat_factor_data[concat_factor_data['date'] == concat_factor_data['date'].max()]

process_and_cache_factor_data_v2(selected_concat_factor_data,
                                 factor_name_list)

print('style factor process done!')