#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Datetime: 2020/8/6 20:10
# @Author  : CHEN Wang
# @Site    :
# @File    : macro_index_construct.py
# @Software: PyCharm

"""
宏观指标指数合成
"""

import copy
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from quant_researcher.quant.factors.factor_preprocess import preprocess
from quant_researcher.quant.datasource_fetch.macro_api.macro_value_related import \
    get_indicator_value_and_info, get_indicator_value
from quant_researcher.quant.factors.factor_preprocess.preprocess import LLT, de_trend


def const_group_dict(filtered_indi_df):
    factor_name_dict = {}
    for groupname1, groupdata1 in filtered_indi_df.groupby('level_0'):
        factor_name_dict[groupname1] = {}
        for groupname2, groupdata2 in groupdata1.groupby('level_1'):
            factor_name_dict[groupname1][groupname2] = list(groupdata2['indic_code'])

    return factor_name_dict


def construct_index(factor_name, factor_data, target_data, kind, plot=False):
    if kind == 'diffusion':
        indic = index_construction(factor_name, factor_data).diffusion_index()
        cycle, trend = de_trend(indic)
        indic = trend
    elif kind == 'synthesis':
        indic = index_construction(factor_name, factor_data).synthesis_index()
    elif kind == 'pca':
        indic = index_construction(factor_name, factor_data).pca_index(method='dynamic')
        # 当设置的 pca_index 对应的method为static时，指数波动范围集中，需要进行滤波后方便与目标进行对比
        # 对于该目标为名义GDP时，static的领先预测效果较差
        # indic = LLT(indic)

    # 标准化数据，方便对比目标指标趋势
    stand_index = (indic - np.mean(indic)) / np.std(indic)

    if plot:
        fig, ax1 = plt.subplots()
        line1 = ax1.plot(stand_index, color='red', label=kind + ' index')

        ax2 = ax1.twinx()
        line2 = ax2.plot(target_data, color='blue', label='target')

        lines = line1 + line2
        labs = [l.get_label() for l in lines]
        ax1.legend(lines, labs, loc='best')

        plt.show()

    return indic


class index_construction:
    def  __init__(self, factor_name, factor_data):
        self.factor_name = factor_name
        self.factor_data = factor_data

    def diffusion_index(self):
        '''
        扩散指数，计算小类上升指标占比，再使用等权得到大类上涨指标占比，最后等权平均得到指数
        :return:
        '''
        class_DI_df = pd.DataFrame()
        for factor_class in self.factor_name.keys():
            sub_class_DI_df = pd.DataFrame()
            for sub_factor_class in self.factor_name[factor_class].keys():
                factor_list = self.factor_name[factor_class][sub_factor_class]
                factor_diff = self.factor_data[factor_list] - self.factor_data[factor_list].shift(1)
                factor_diff = factor_diff.dropna(how='any', axis=0)

                def positive_ratio(series):
                    positive_list = [1 if i > 0 else 0 for i in series]
                    ratio = np.sum(positive_list) / len(positive_list)
                    return ratio

                sub_class_index = factor_diff.apply(positive_ratio, axis=1)
                sub_class_DI_df[sub_factor_class] = sub_class_index

            class_index = sub_class_DI_df.apply(np.mean, axis=1)
            class_DI_df[factor_class] = class_index

        index = class_DI_df.apply(np.mean, axis=1)
        return index

    def synthesis_index(self):

        def inner_syns(temp_factor_data):
            # 计算各个因子的差分值，并标准化因子的差分
            factor_data_diff = temp_factor_data.diff()

            def single_index(series):
                series = np.abs(series)
                ave_series = series.rolling(window=72, min_periods=12).mean()
                return ave_series

            factor_data_diff_standard = factor_data_diff.apply(single_index, axis=0)
            # factor_data_diff_standard.columns = [i+'std' for i in factor_data_diff.columns]

            # factor_data_diff = pd.concat([factor_data_diff, factor_data_diff_standard], axis=1)
            factor_data_standarize_diff = factor_data_diff / factor_data_diff_standard
            # 计算标准化后的总差分
            # V = factor_data_standarize_diff.apply(np.sum, axis=1)
            V = factor_data_standarize_diff.sum(axis=1)
            # 剔除因滚动计算造成的数据缺失
            V = V.dropna(how='any', axis=0)
            I = np.zeros(len(V) + 1)
            # 初始化指数I，设置初始值为1
            I[0] = 1
            index_df = pd.DataFrame(columns=['syn_index'])
            # 使用总差分计算指数的变化，进而计算指数
            for i in range(len(V)):
                I_t = I[i] * ((200 + V[i]) / (200 - V[i]))
                I[i + 1] = I_t
                index_df.loc[V.index[i], 'syn_index'] = I_t
            return index_df['syn_index']

        temp_result = []
        for group in self.factor_name:
            group_indi_list = self.factor_name[group]['fake_level']
            temp_factor_data = self.factor_data[group_indi_list]
            temp_index = inner_syns(temp_factor_data).rename(group)
            temp_result.append(temp_index)
        result = pd.concat(temp_result,axis=1).mean(axis=1)
        return result

    def pca_index(self, start=50, method='static'):
        index_df = pd.DataFrame(columns=['pca_index'])
        factor_data_train = self.factor_data.iloc[start:, ]
        # 使用训练数据拟合标准化模型
        scaler = StandardScaler()
        scaler.fit(factor_data_train)
        factor_data = copy.deepcopy(self.factor_data)
        if method == 'static':
            # 对训练数据进行标准化
            factor_data_train = pd.DataFrame(scaler.transform(factor_data_train),
                                             index=factor_data_train.index,
                                             columns=factor_data_train.columns)
            # 使用训练好的标准化模型对全部因子数据标准化
            stand_factor_data = pd.DataFrame(scaler.transform(factor_data),
                                             index=factor_data.index,
                                             columns=factor_data.columns)
            pca = PCA(n_components=None)
            # 使用标准化数据进行pca及相应因子权重计算
            pca.fit(factor_data_train.dropna(how='any', axis=0))
            j = 0
            weights = []
            k1 = pca.components_ / np.sqrt(pca.explained_variance_.reshape(-1, 1))
            for j in range(len(k1)):
                for i in range(len(pca.explained_variance_)):
                    weights_coefficient = np.sum(100 * (pca.explained_variance_ratio_[i]) * (k1[i][j])) \
                                          / np.sum(pca.explained_variance_ratio_)
                weights.append(np.float(weights_coefficient))
            print('Weights', weights)
            # 标准化权重
            weights = np.array(weights) / np.sum(weights)
            print('stand_Weights', weights)
            # 使用权重计算指数
            for i in range(start, stand_factor_data.shape[0]):
                index_df.loc[stand_factor_data.index[i], 'pca_index'] = \
                    np.sum(stand_factor_data.iloc[i] * weights)

        if method == 'dynamic':
            for m in range(start, self.factor_data.shape[0]):
                loop_factor_data = factor_data.iloc[:m, ]
                # 标准化每次滚动的历史数据
                scaler = StandardScaler()
                scaler.fit(loop_factor_data)
                loop_factor_data = pd.DataFrame(scaler.transform(loop_factor_data),
                                                index=loop_factor_data.index,
                                                columns=loop_factor_data.columns)
                pca_model = PCA(n_components=None)
                pca1 = pca_model.fit(loop_factor_data.dropna(how='any', axis=1)).transform( \
                    loop_factor_data.dropna(how='any', axis=1))[:, 0]
                index_df.loc[self.factor_data.index[m], 'pca_index'] = pca1[-1]

        return index_df['pca_index']

    def construct_index(self, method):
        if method == 'diffusion':
            index = self.diffusion_index()
        elif method == 'synthesis':
            index = self.synthesis_index()
        elif method == 'PCA':
            index = self.pca_index(method='dynamic')


if __name__ == '__main__':
    filtered_indi_df = pd.read_csv('filtered_indi_df.csv')
    pd.set_option('display.max_columns', 20)
    print(filtered_indi_df)
    factor_name = const_group_dict(filtered_indi_df)

    target_indicator = 'GDP:不变价:当季同比'  # 实际GDP
    # target_indicator = 'GDP:现价:当季同比'    # 名义GDP
    # target_indicator = 'CPI:食品:当月同比'    # CPI
    target_data = get_indicator_value_and_info(indic_name=target_indicator, start_date='2000-01-01')
    print(target_data)
    target_data.drop_duplicates(subset=['end_date', 'indic_name'], keep='first', inplace=True)
    target_data = target_data.set_index(['end_date', 'indic_name'])['indic_value'].unstack()
    print(target_data)
    # target_data.to_csv('target.csv')
    # target_data = pd.read_csv('target.csv', index_col=0)
    target_data.index = pd.to_datetime(target_data.index, format="%Y-%m-%d")

    factor = list(filtered_indi_df['indicator_name'])
    data = get_indicator_value(indic_name=factor, start_date='2000-01-01',
                                      select=['indic_code', 'val_date as end_date',
                                              'data_value as indic_value', 'titime as update_time'])
    data.drop_duplicates(subset=['end_date', 'indic_code'], keep='first', inplace=True)
    factor_data = data.set_index(['end_date', 'indic_code'])['indic_value'].unstack()
    update_date_data = data.set_index(['end_date', 'indic_code'])['update_time'].unstack()
    indic_name_df = get_indicator_value_and_info(indic_code=factor_data.columns.tolist(), only_latest=True)
    factor_data.columns = indic_name_df.indic_name.tolist()
    update_date_data.columns = indic_name_df.indic_name.tolist()
    print(factor_data)
    print(update_date_data)
    # factor_data.to_csv('factor.csv')
    # factor_data = pd.read_csv('factor.csv', index_col=0)
    factor_data.index = pd.to_datetime(factor_data.index, format="%Y-%m-%d")

    # 对数据进行清洗, 补充空缺值
    for c in factor_data.columns:
        cleaned_smooth, cleaned_unsmooth = preprocess.factor_cleaning(factor_data[c], yoy=True).cleaning_data()
        factor_data[c] = cleaned_unsmooth

    gdp_index = index_construction(factor_name, factor_data)

    indic_df = pd.DataFrame()
    for k in ['diffusion', 'synthesis', 'pca']:
        indic = construct_index(factor_name,
                                factor_data.loc[:pd.to_datetime('20200131'), ],
                                target_data.loc[:pd.to_datetime('20200131'), ],
                                k,
                                True)
        indic = pd.DataFrame(indic)
        indic.columns = [k]
        indic_df = pd.concat([indic_df, indic], axis=1)

        print(indic_df)
        '''
                     diffusion  synthesis      pca
        2000-01-31       NaN   1.000000       NaN
        2000-02-29       NaN   1.000000       NaN
        2000-03-31  0.506151   1.000000       NaN
        2000-04-30  0.498354   1.000000       NaN
        2000-05-31  0.490398   1.000000       NaN
        ...              ...        ...       ...
        2019-09-30  0.401571   0.372305  -2.18742
        2019-10-31  0.399265   0.362114  -2.14027
        2019-11-30  0.396914   0.369301  -2.13515
        2019-12-31  0.394334   0.373029  -2.13286
        2020-01-31  0.391509   0.144827  -1.95398
        '''
