import os.path

import pandas as pd
from joblib import Parallel, delayed
from dc import *
from algo_features_processor import *
from algo_features_evaluation import *

class Model(DataCenter):
    """
    __run__计算样本内因子值与因子表现
    单因子数据存入data/
    单因子检测结果存入charts/
    """
    # 训练集与测试集
    train_test_split = datetime.date(2023, 12, 31)

    def __init__(self):
        super().__init__()
        self.ft_train: (None, pd.DataFrame) = None
        self.y_train: (None, pd.DataFrame) = None

    def calc_return(self):  # 复权并计算self.uni储存的收益率
        lag = 1
        # vwap
        self.uni['adj_vwap'] = self.uni['vwap'] * self.uni['adj_factor']
        ret = self.uni.pivot_table('adj_vwap', index='date', columns='code').pct_change(lag).dropna(how='all')
        ret = ret.stack().to_frame(name='pct_chg_vwap').reset_index().replace({np.inf: np.nan, -np.inf: np.nan})
        self.uni = self.uni.merge(ret, how='left', on=['date', 'code'])
        # close
        self.uni['adj_close'] = self.uni['close'] * self.uni['adj_factor']
        ret = self.uni.pivot_table('adj_close', index='date', columns='code').pct_change(lag).dropna(how='all')
        ret = ret.stack().to_frame(name='pct_chg').reset_index().replace({np.inf: np.nan, -np.inf: np.nan})
        self.uni = self.uni.merge(ret, how='left', on=['date', 'code'])

        # 剔除na日期
        self.uni = self.uni[self.uni['date'].isin(self.dates[lag:])]


    def __call__(self, *args, **kwargs):
        # 行业、市值中性化+单因子计算
        if os.path.exists('data/factor_exposure_train.parquet'):
            self.ft_train = read_parquet('data/factor_exposure_train.parquet')
        else:
            uni_train = self.uni[self.uni['date'] <= self.train_test_split].copy(deep=False)
            self.ft_train = self.calc_ft(uni_train, self.fin_field)
            self.ft_train.to_parquet('data/factor_exposure_train.parquet', engine='pyarrow', index=False)
        # 计算y
        self.calc_y()
        # 整理数据
        self.ft_train = self.ft_train.merge(self.y_train[['date', 'code', 'r_e']], how='left', on=['date', 'code'])
        self.ft_train = self.ft_train.loc[:, self.ft_train.columns.str.contains(u'[u4e00-u9fff]+')] # 剔除行业dummy variables
        # 单因子检验
        FTEvaCore(self.ft_train)

    def calc_y(self, lag: int = 1):  # 计算y
        self.calc_return()
        # 向后多取固定区间数据
        y = self.uni[self.uni['date'] <= self.train_test_split][['date', 'code', 'pct_chg']].copy(deep=False)
        self.read_idx_eod() # 读取基准收益率
        y = y.merge(self.idx, on=['date'], how='left').rename(columns={'000906.SH': 'r_b'})
        # 计算超额收益
        y.fillna(0, inplace=True)
        y['r_e'] = y['pct_chg'] - y['r_b']
        y = y[y['date'] != y['date'][0]]
        # relabel dates
        map = {}
        for i, v in enumerate(self.dates[1:]):
            map[v] = self.dates[self.dates.index(v) - lag]
        y.replace({'date': map}, inplace=True)
        # 输出y_train
        self.y_train = y.copy(deep=False)
        del y

    def calc_ft(self, uni: pd.DataFrame, fields: list):
        ft = self.init_size(uni) # 计算市值
        ft = self.init_indus(uni, ft) # 计算行业
        ft = self.calc_neutralized_fin_ft(uni, ft, fields) # 计算因子值
        return ft

    def init_size(self, uni: pd.DataFrame):
        df = uni[['date', 'code', 'tot_cap']].copy(deep=False)
        df['size'] = np.log(df['tot_cap'])
        df.drop(columns=['tot_cap'], inplace=True)
        def __inner(x: pd.DataFrame):
            x['size'] = FTProcessor.standardize_cap(x['size'].values)  # 标准化去极值
            return x
        return df.groupby('date', as_index=False).apply(lambda x: __inner(x)).reset_index(drop=True)

    def init_indus(self, uni: pd.DataFrame, ft: pd.DataFrame):
        df = uni[['date', 'code', 'sector']].copy(deep=False)
        def __inner(x: pd.DataFrame):  # sectors to dummy variables
            x = pd.get_dummies(x.set_index(['date', 'code']), prefix_sep='', prefix='').replace({True:1, False:0}).reset_index()
            return x
        df = df.groupby('date', as_index=False).apply(lambda x: __inner(x)).reset_index(drop=True)
        ft = ft.merge(df, on=['date', 'code'], how='left')
        return ft

    def calc_neutralized_fin_ft(self, uni: pd.DataFrame, ft: pd.DataFrame, fields: list) -> pd.DataFrame:
        def __inner(x: pd.DataFrame):
            reg_x: np.ndarray = x.iloc[:, 2:].dropna(how='all', axis=1).values
            reg_y_collection = uni.merge(x, how='right', on=['date', 'code']).copy()[fields]
            if reg_y_collection.isna().sum().sum() != 0:
                reg_y_collection.fillna(reg_y_collection.median(), inplace=True)
            reg_y_collection: np.ndarray = reg_y_collection.values
            # def __para_inner(k: str):
            for i in range(0, reg_y_collection.shape[1]):
                reg_y: np.ndarray = reg_y_collection[:, i]
                # 标准化去极值 + 行业市值中性
                reg_y_collection[:, i] = FTProcessor.sort_cap(Regression.ols(reg_x, reg_y, add_constant=True)[1])
            # Parallel(n_jobs=-1)(delayed(__para_inner)(k) for k in self.fin_field)
            x.loc[:, fields] = reg_y_collection
            return x
        ft = ft.groupby('date', as_index=False).apply(lambda x: __inner(x)).reset_index(drop=True)
        return ft


if __name__ == '__main__':
    mdl = Model()
    mdl()
    print('.')

