#!/usr/bin/python
# -*-coding:utf-8-*-

'''
实现一些fitness的定义
'''

import numpy as np
import pandas as pd
from copy import deepcopy

from joblib import wrap_non_picklable_objects
from numpy.linalg import lstsq
from scipy import stats
from sklearn.linear_model import LinearRegression
from sklearn.metrics import normalized_mutual_info_score as mutual_info
from statsmodels.api import OLS

from zg_data_process.zg_data_process import DataProcess

from base.utils import np_keys_to_pd
from base.utils import np_neu_keys_to_pd
from base.utils import np_filter_season_keys_to_pd

# from base.KeyTransform import stock_code_map
from base.KeyTransform import trade_date_map_int

data_process_api = DataProcess()

# TODO - weighted IC
def weighted_ic(data, direction=1):
    temp = data.copy()
    # temp = pd.DataFrame({'y':np.random.randn(100), 'y_pred':np.random.randn(100)})

    temp['y_pred'] *= direction

    temp['weight'] = temp['y_pred'].rank().apply(lambda x: 2 ** (2 * x / temp.shape[0] - 2))

    temp['weight_x'] = temp['y_pred'] * temp['weight']

    temp['weight_r'] = temp['y'] * temp['weight']

    # temp[['y_pred', 'y']].corr(method='spearman').values[1, 0]
    # temp[['weight_x', 'weight_r']].corr(method='spearman').values[1, 0]
    return temp[['weight_x', 'weight_r']].corr(method='spearman').values[1, 0]

# TODO - 目标市值分组中性化
def label_cap_group_neu(df, group_num=10, label_name='y', cap_name='cap', is_rank=False):
    # df = values.copy()
    df = df.copy()

    group = df.groupby('date',
                       as_index=False,
                       sort=False)[cap_name].apply(pd.qcut, group_num, labels=False)

    group.index = group.index.droplevel(level=0)

    df['group'] = group.loc[df.index]

    if not is_rank:
        neu_label = df.groupby(['date', 'group'],
                               as_index=False,
                               sort=False)[label_name].apply(lambda x: x - x.mean())
    else:
        neu_label = df.groupby(['date', 'group'],
                               as_index=False,
                               sort=False)[label_name].rank(pct=True)

    neu_label.index = neu_label.index.droplevel(level=0)

    df[label_name] = neu_label.loc[df.index]

    return df

# TODO - 目标行业分组中性化
def label_ci1_ind_group_neu(df, label_name='y', ci1_name='ci1_code', is_rank=False):
    # df = values.copy()
    df = df.copy()

    if not is_rank:
        neu_label = df.groupby(['date', ci1_name],
                               as_index=False,
                               sort=False)[label_name].apply(lambda x: x - x.mean())
    else:
        neu_label = df.groupby(['date', ci1_name],
                               as_index=False,
                               sort=False)[label_name].rank(pct=True)

    neu_label.index = neu_label.index.droplevel(level=0)

    df[label_name] = neu_label.loc[df.index]

    return df

@wrap_non_picklable_objects
def def_keyfit_metric(keys,
                      method='weighted_rank_ic',
                      drop_first_nday=None,
                      split_date=None,
                      train_direction=None,
                      neu_keys=None,
                      is_cap_neu=False,
                      is_ci1_neu=False,
                      is_neu_y=True,
                      factor_cover_rate=0.6,
                      filter_duplicate_rate=0.1,
                      key_by_path=True,
                      verbose=False):
    '''

    :param keys:
    :param method:
    :param drop_first_nday:
    :param split_date:
    :param train_direction:
    :param neu_keys:
    :param is_cap_neu:
    :param is_ci1_neu:
    :param is_neu_y: 是否中性化y, 前提是is_cap_neu或is_ci1_neu为True
    :param factor_cover_rate: 因子覆盖率, 默认:0.6
    :param filter_duplicate_rate: 重复因子值的比率, 如果超过该比率, 则返回最小值, 默认:0.1
    :param verbose:
    :return:
    '''
    def information_corr(y, y_pred, w=None):
        if key_by_path:
            # load key and neu key
            load_keys = np.load(keys, mmap_mode='r')
            load_keys = np_keys_to_pd(load_keys)

            if neu_keys is not None:
                load_neu_keys = np.load(neu_keys, mmap_mode='r')
                load_neu_keys = np_neu_keys_to_pd(load_neu_keys)
            else:
                load_neu_keys = neu_keys
        else:
            load_keys = keys.copy()

            if neu_keys is not None:
                load_neu_keys = neu_keys.copy()
            else:
                load_neu_keys = neu_keys

        if len(y)!=len(load_keys): # pass examination
            return 1.

        values = pd.DataFrame(y_pred, index=load_keys, columns=['y_pred'])
        values['y'] = y
        filter_keys = pd.DataFrame(index=load_keys).reset_index()

        # TODO - 去掉前N天
        if drop_first_nday is not None:
            # use_split_date = pd.DatetimeIndex(filter_keys['date'].unique())[drop_first_nday-1]
            use_split_date = filter_keys['date'].unique()[drop_first_nday-1]
        else:
            use_split_date = trade_date_map_int(split_date)

        values = values.loc[(filter_keys['date'] > use_split_date).values, :]
        # init_values_y = values['y'].copy()

        # TODO - Return an array of the modal (most common) value in the passed array.
        check_y_pred = values.loc[~values['y_pred'].isnull(), 'y_pred']
        exam_y_pred = stats.mode(check_y_pred)

        # 因子的覆盖率要高, 达到设定的factor_cover_rate
        if len(check_y_pred) > int(values.shape[0]*factor_cover_rate):
            # 因子的重复值要少
            if exam_y_pred.count[0] / len(check_y_pred) > filter_duplicate_rate:
                return 0.0
        else:
            # 存在太多nan, 则直接返回0.0
            return 0.0

        # TODO - keys除了date, stock_code, 也可以再加入一些label，例如market cap和行业标签(!!!)
        # TODO - neutralization operation
        if (is_cap_neu or is_ci1_neu) and (is_cap_neu is not None):
            # TODO - 去掉nan
            values = values[~values['y_pred'].isnull()]

            values = pd.concat([values, load_neu_keys], axis=1, join_axes=[values.index])

            values = values.dropna()

            if is_cap_neu:
                values = label_cap_group_neu(values, group_num=10, label_name='y_pred', is_rank=False)
                if is_neu_y:
                    values = label_cap_group_neu(values, group_num=10, label_name='y', is_rank=False)
            elif is_ci1_neu:
                values = label_ci1_ind_group_neu(values, label_name='y_pred', ci1_name='ci1_code', is_rank=False)
                if is_neu_y:
                    values = label_ci1_ind_group_neu(values, label_name='y', ci1_name='ci1_code', is_rank=False)

            values = values.drop(load_neu_keys.columns, axis=1)
        else:
            # values = values

            # TODO - 去掉nan
            values = values[~values['y_pred'].isnull()]

        del load_keys, load_neu_keys

        if str.lower(method) == 'simple_ic':
            simple_ic = values[['y_pred', 'y']].corr(method='spearman').iloc[0, 1]

            return simple_ic

        # TODO - 计算IC
        ic_data = values.groupby(as_index=True, level='date').apply(
            lambda x: x['y_pred'].corr(x['y'], method='spearman')
        )

        # TODO - 去掉ic为nan的
        ic_data = ic_data.dropna()

        # TODO - 查找最小值的重复次数
        exam = stats.mode(-1.0*ic_data.values)

        try:
            # exam.count[0] 是指ic_data.values中最小值重复的次数
            if exam.count[0] / len(ic_data) > 0.8:
                return 0.0
        except:
            return 0.0

        if train_direction is not None:
            direction = train_direction
        else:
            direction = None

        # TODO - 下面是计算指定的指标
        # TODO - 加权IC或ICIR
        if str.lower(method) == 'weighted_rank_ic' or str.lower(method) == 'weighted_rank_ir':
            ic_mean = np.nanmean(ic_data)

            if direction is None:
                ## IC direction
                direction = 1. if ic_mean > 0. else -1.

            weighted_ic_array = values.groupby(as_index=True, level='date').apply(
                weighted_ic, direction=direction
            )

            weighted_ic_mean = abs(np.nanmean(weighted_ic_array))

            if str.lower(method) == 'weighted_rank_ic':
                # weighted_ic_mean过大则返回ic_mean
                if weighted_ic_mean - abs(ic_mean) > 0.2 or weighted_ic_mean > 0.3:
                    if verbose: print('weight ic %.4f and ic %.4f, so return ic mean' % (weighted_ic_mean, abs(ic_mean)))
                    return abs(ic_mean)

                if np.isnan(weighted_ic_mean) or np.isinf(weighted_ic_mean):
                    return 0.0
                else:
                    return weighted_ic_mean
            else:
                try:
                    if weighted_ic_mean - abs(ic_mean) > 0.2 or weighted_ic_mean > 0.3:
                        if verbose: print('weight ic %.4f and ic %.4f, so return icir' % (weighted_ic_mean, abs(ic_mean)))
                        icir = abs(ic_mean) / np.nanstd(ic_data)
                        return icir

                    ic_std = np.nanstd(weighted_ic_array)

                    # ic_std should upper to 0.1 to protect too low std.
                    # weighted_ir = (weighted_ic_mean/ic_std) if ic_std > 0.083 else 0.0
                    weighted_ir = weighted_ic_mean/ic_std

                    if np.isnan(weighted_ir) or np.isinf(weighted_ir):
                        return 0.0
                    else:
                        return weighted_ir
                except:
                    # print('weighted_ir error:')
                    return 0.0
        # TODO - 互信息
        elif str.lower(method) == 'mutual_info':
            mutual_info_values =  np.nanmean(values.groupby(as_index=True, level=0).apply(
                lambda x: mutual_info(x['y_pred'], x['y']))
            )

            if np.isnan(mutual_info_values) or np.isinf(mutual_info_values):
                return 0.0
            else:
                return mutual_info_values
        # TODO - IC均值
        elif str.lower(method) == 'ic_mean':
            ic_mean = abs(np.nanmean(ic_data))

            if np.isnan(ic_mean) or np.isinf(ic_mean):
                return 0.0
            else:
                return ic_mean
        # TODO - IR
        elif str.lower(method) == 'ir':
            icir = abs(np.nanmean(ic_data))/np.nanstd(ic_data)

            if np.isnan(icir) or np.isinf(icir):
                return 0.0
            else:
                return icir

    return information_corr


def def_keyfit_cw_metric(keys,
                         method='weighted_rank_ic',
                         drop_first_n_season=None,
                         split_date=None,
                         train_direction=None,
                         neu_keys=None,
                         filter_season_keys=None,
                         is_cap_neu=False,
                         is_ci1_neu=False,
                         is_neu_y=True,
                         factor_cover_rate=0.5,
                         filter_duplicate_rate=0.1,
                         key_by_path=True,
                         verbose=False):
    '''

    :param keys:
    :param method:
    :param drop_first_n_season:
    :param split_date:
    :param train_direction:
    :param neu_keys:
    :param filter_season_keys:
    :param is_cap_neu:
    :param is_ci1_neu:
    :param is_neu_y: 是否中性化y, 前提是is_cap_neu或is_ci1_neu为True
    :param factor_cover_rate: 因子覆盖率, 默认:0.5
    :param filter_duplicate_rate: 重复因子值的比率, 如果超过该比率, 则返回最小值, 默认:0.1
    :param verbose:
    :return:
    '''
    def information_corr(y, y_pred, w=None):
        if key_by_path:
            # load key and neu key
            load_keys = np.load(keys, mmap_mode='r')
            load_keys = np_keys_to_pd(load_keys)

            if neu_keys is not None:
                load_neu_keys = np.load(neu_keys, mmap_mode='r')
                load_neu_keys = np_neu_keys_to_pd(load_neu_keys)
            else:
                load_neu_keys = neu_keys

            if filter_season_keys is not None:
                load_filter_season_keys = np.load(filter_season_keys, mmap_mode='r')
                load_filter_season_keys = np_filter_season_keys_to_pd(load_filter_season_keys)
            else:
                load_filter_season_keys = filter_season_keys
        else:
            load_keys = keys.copy()

            if neu_keys is not None:
                load_neu_keys = neu_keys.copy()
            else:
                load_neu_keys = neu_keys

            if filter_season_keys is not None:
                load_filter_season_keys = filter_season_keys.copy()
            else:
                load_filter_season_keys = filter_season_keys

        if len(y)!=len(load_keys): # pass examination
            return 0.0

        values = pd.DataFrame(y_pred, index=load_keys, columns=['y_pred'])
        values['y'] = y
        filter_keys = pd.DataFrame(index=load_keys).reset_index()

        # TODO - 去掉指定季度的数据
        if load_filter_season_keys is not None:
            values = values[load_filter_season_keys.loc[values.index, 'drop']==0]

        if values.shape[0] == 0.0:
            return 0.0

        # TODO - 去掉前N天
        if drop_first_n_season is not None:
            use_split_date = filter_keys['date'].unique()[drop_first_n_season-1]
        else:
            use_split_date = trade_date_map_int(split_date)

        values = values.loc[values.index.get_level_values('date') > use_split_date, :]
        # init_values_y = values['y'].copy()

        # TODO - Return an array of the modal (most common) value in the passed array.
        check_y_pred = values.loc[~values['y_pred'].isnull(), 'y_pred']
        exam_y_pred = stats.mode(check_y_pred)

        # 因子的覆盖率要高, 达到设定的factor_cover_rate
        if len(check_y_pred) > int(values.shape[0]*factor_cover_rate):
            # 因子的重复值要少
            if exam_y_pred.count[0] / len(check_y_pred) > filter_duplicate_rate:
                return 0.0
        else:
            # 存在太多nan, 则直接返回0.0
            return 0.0

        # TODO - keys除了date, stock_code, 也可以再加入一些label，例如market cap和行业标签(!!!)
        # TODO - neutralization operation
        if (is_cap_neu or is_ci1_neu) and (is_cap_neu is not None):
            # TODO - 去掉nan
            values = values[~values['y_pred'].isnull()]

            values = pd.concat([values, load_neu_keys], axis=1, join_axes=[values.index])

            values = values.dropna()

            if is_cap_neu:
                values = label_cap_group_neu(values, group_num=10, label_name='y_pred', is_rank=False)
                if is_neu_y:
                    values = label_cap_group_neu(values, group_num=10, label_name='y', is_rank=False)
            elif is_ci1_neu:
                values = label_ci1_ind_group_neu(values, label_name='y_pred', ci1_name='ci1_code', is_rank=False)
                if is_neu_y:
                    values = label_ci1_ind_group_neu(values, label_name='y', ci1_name='ci1_code', is_rank=False)

            values = values.drop(load_neu_keys.columns, axis=1)
        else:
            # values = values

            # TODO - 去掉nan
            values = values[~values['y_pred'].isnull()]

        del load_keys, load_neu_keys

        if str.lower(method) == 'simple_ic':
            simple_ic = values[['y_pred', 'y']].corr(method='spearman').iloc[0, 1]

            return simple_ic

        # TODO - 计算IC
        ic_data = values.groupby(as_index=True, level='date').apply(
            lambda x: x['y_pred'].corr(x['y'], method='spearman')
        )

        # TODO - 去掉ic为nan的
        ic_data = ic_data.dropna()

        # TODO - 查找最小值的重复次数
        exam = stats.mode(-1.0*ic_data.values)

        try:
            # exam.count[0] 是指ic_data.values中最小值重复的次数
            if exam.count[0] / len(ic_data) > 0.8:
                return 0.0
        except:
            return 0.0

        if train_direction is not None:
            direction = train_direction
        else:
            direction = None

        # TODO - 下面是计算指定的指标
        # TODO - 加权IC或ICIR
        if str.lower(method) == 'weighted_rank_ic' or str.lower(method) == 'weighted_rank_ir':
            ic_mean = np.nanmean(ic_data)

            if direction is None:
                ## IC direction
                direction = 1. if ic_mean > 0. else -1.

            weighted_ic_array = values.groupby(as_index=True, level='date').apply(
                weighted_ic, direction=direction
            )

            weighted_ic_mean = abs(np.nanmean(weighted_ic_array))

            if str.lower(method) == 'weighted_rank_ic':
                # weighted_ic_mean过大则返回ic_mean
                if weighted_ic_mean - abs(ic_mean) > 0.2 or weighted_ic_mean > 0.3:
                    if verbose: print('weight ic %.4f and ic %.4f, so return ic mean' % (weighted_ic_mean, abs(ic_mean)))
                    return abs(ic_mean)

                if np.isnan(weighted_ic_mean) or np.isinf(weighted_ic_mean):
                    return 0.0
                else:
                    return weighted_ic_mean
            else:
                try:
                    if weighted_ic_mean - abs(ic_mean) > 0.2 or weighted_ic_mean > 0.3:
                        if verbose: print('weight ic %.4f and ic %.4f, so return icir' % (weighted_ic_mean, abs(ic_mean)))
                        icir = abs(ic_mean) / np.nanstd(ic_data)
                        return icir

                    ic_std = np.nanstd(weighted_ic_array)

                    # ic_std should upper to 0.1 to protect too low std.
                    # weighted_ir = (weighted_ic_mean/ic_std) if ic_std > 0.083 else 0.0
                    weighted_ir = weighted_ic_mean/ic_std

                    if np.isnan(weighted_ir) or np.isinf(weighted_ir):
                        return 0.0
                    else:
                        return weighted_ir
                except:
                    # print('weighted_ir error:')
                    return 0.0
        # TODO - 互信息
        elif str.lower(method) == 'mutual_info':
            mutual_info_values =  np.nanmean(values.groupby(as_index=True, level=0).apply(
                lambda x: mutual_info(x['y_pred'], x['y']))
            )

            if np.isnan(mutual_info_values) or np.isinf(mutual_info_values):
                return 0.0
            else:
                return mutual_info_values
        # TODO - IC均值
        elif str.lower(method) == 'ic_mean':
            ic_mean = abs(np.nanmean(ic_data))

            if np.isnan(ic_mean) or np.isinf(ic_mean):
                return 0.0
            else:
                return ic_mean
        # TODO - IR
        elif str.lower(method) == 'ir':
            icir = abs(np.nanmean(ic_data))/np.nanstd(ic_data)

            if np.isnan(icir) or np.isinf(icir):
                return 0.0
            else:
                return icir

    return information_corr

