import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
from statsmodels.api import OLS, add_constant


def get_month_first_day(str):
    return str[:8] + '01'

def get_split_weight(count=0, n=1):
    per = count / n
    periods = [(i * per, (i + 1) * per) for i in range(int(n))]
    periods_int = [(int(i[0]), int(i[1]) - 1) if i[1].is_integer() else (int(i[0]), int(i[1])) for i in periods]
    list = []
    for (start, end) in periods:
        tmp = []
        if start.is_integer() and end.is_integer():
            for i in range(int(end - start)):
                tmp.append(1)
        elif start.is_integer() and not end.is_integer():
            for i in range(int(math.floor(end) - start)):
                tmp.append(1)
            tmp.append(math.modf(end)[0])
        elif not start.is_integer() and end.is_integer():
            tmp.append(1 - math.modf(start)[0])
            for i in range(int(end - math.ceil(start))):
                tmp.append(1)
        else:
            tmp.append(1 - math.modf(start)[0])
            for i in range(math.floor(end) - math.ceil(start)):
                tmp.append(1)
            tmp.append(math.modf(end)[0])

        a = np.array(tmp)
        # a = a / np.sum(a)  # 不能求均值，因为均值权重会导致收益变小，只需保证同一资产在不同组合中的收益和为原收益
        list.append(a)
    # print(list)
    return list, periods_int

class Eval():
    def __init__(self,
                 data: pd.DataFrame,
                 hs300_return: pd.DataFrame,
                 industry_weight: pd.DataFrame,
                 quantiles: int):
        # self.data = data.set_index(['date', 'code'])
        self.data = data
        self.hs300 = hs300_return
        self.industry_weight = industry_weight
        self.quantiles = quantiles
        self.base_return = self.get_base_return()


    def get_base_return(self):
        df_tmp = pd.merge(self.data, self.industry_weight, left_on='industry', right_on='industry', how='left')
        df_tmp['ind_count'] = df_tmp.groupby(['date', 'industry'])['weight'].transform('count')
        df_tmp['base_factor'] = df_tmp['forward_return'] * df_tmp['weight'] / df_tmp['ind_count']
        df_tmp = df_tmp[['date', 'base_factor']]
        df_tmp = df_tmp.groupby('date').sum()
        # print(df_tmp.head())
        return df_tmp

    def quantize_data(self):
        data = self.data.set_index('date')
        factor_time_range = list(data.index.drop_duplicates())
        factor_time_range = factor_time_range[:]
        df_hs300_section = self.hs300.loc[factor_time_range]
        df_all = pd.DataFrame()
        df_codes = pd.DataFrame()
        for date in factor_time_range:
            df_section = data.loc[date]
            df_section = df_section.sort_values(by='prob', ascending=False)
            weights, periods = get_split_weight(df_section.shape[0], n=self.quantiles)
            for i, (start, end) in enumerate(periods):
                df_tmp = df_section.iloc[start: end + 1].reset_index()
                assert df_tmp.shape[0] == len(weights[i]), '分位数据长度需要与权重一致'
                df_tmp['split_weighted'] = df_tmp['forward_return'] * weights[i]
                df_tmp['quantile'] = 'q' + str(i + 1)
                df_tmp = pd.merge(df_tmp, self.industry_weight, left_on='industry', right_on='industry', how='left')
                df_tmp['ind_count'] = df_tmp.groupby(['date', 'industry'])['weight'].transform('count')
                df_tmp['ind_weight'] = df_tmp['weight'] / df_tmp['ind_count']
                df_tmp['factor'] = df_tmp['split_weighted'] * df_tmp['ind_weight']
                df_tmp['ic'] = df_tmp[['prob', 'forward_return']].corr().iloc[1, 0]
                df_all = df_all.append(df_tmp)
                if i == 0:
                    df_codes = df_codes.append(pd.concat([df_tmp, pd.DataFrame(weights[i], columns=['q_weight'])], axis=1))
        df_codes.to_csv('q1_codes.csv', index=False, encoding='utf_8_sig')
        self.q1 = df_codes
        df_all = df_all.set_index(['date', 'quantile'])
        # df_all.to_csv('quantiled.csv', encoding='utf_8_sig')
        df_all.reset_index(inplace=True)
        df_all = df_all[['date', 'quantile', 'factor']]
        tmp = df_all.groupby(['date', 'quantile']).sum()
        tmp = tmp.unstack(-1)
        # tmp = tmp.reset_index()
        # tmp = pd.merge(tmp, self.base_return, left_on='date', right_on='date', how='left')
        tmp = tmp['factor']
        tmp['base'] = self.base_return['base_factor']
        tmp = pd.concat([tmp, df_hs300_section], axis=1)
        self.returns = tmp
        print(tmp)
        # tmp = tmp + tmp.shift(1, fill_value=0)
        # tmp['sum_base'] = tmp['base'].cumsum()
        df_new = pd.DataFrame(1, index=[get_month_first_day(factor_time_range[0])], columns=tmp.columns)
        df_new = pd.concat([df_new, tmp + 1], axis=0)
        self.com_returns = df_new
        # print(df_new)
        df_new = df_new.cumprod()
        print(df_new)
        # df_new.to_csv('q_result.csv')
        # df_new.plot()
        # plt.show()

    def calc_alpha_beta(self, rtn_name='q1', base_name='base'):
        x = self.com_returns[rtn_name].values
        y = self.com_returns[base_name].values
        x = add_constant(x)
        reg_fit = OLS(y, x).fit()
        return reg_fit.params

    def calc_sharpe(self):
        sharpe = self.returns.apply(lambda x: x.mean() / x.std()) * np.sqrt(12)
        print(sharpe.T)

    def calc_annualized_returns(self):
        # ann_return = self.returns * np.power((self.returns + 1), 12.0)
        ann_return = np.power((self.returns + 1), 12.0)
        print(ann_return)
        # ann_return.plot()
        # plt.show()

    def calc_volatility(self):
        vol = self.returns.apply('std') * np.sqrt(12)
        print(vol.T)

    def calc_information_coefficient(self):
        df_corr = self.returns.corr()  # todo 应该在每个截面期进行系数计算
        # print(df_corr)

    def calc_information_ratio(self):
        ir = self.ic.apply(lambda x: x.mean() / x.std())
        print(ir)

if __name__ == '__main__':
    path = 'D:\\PythonPro\\QUANTAXIS\\AI\\'
    # df = pd.read_csv('regression_test.csv')
    df = pd.read_csv(path + 'cate_test.csv')
    df = df[['date', 'code', 'name', 'industry', 'forward_return', 'prob']]
    df_hs300_return = pd.read_csv(path + 'hs300_return.csv')
    df_hs300_return.set_index(['date'], inplace=True)
    df_industry = pd.read_csv(path + 'hs300_industry_weight.csv')
    df_industry = df_industry.groupby('industry').apply(lambda x: x.sum() / np.sum(df_industry['weight']))
    df_industry = df_industry.reset_index()
    eval = Eval(df, df_hs300_return, df_industry, quantiles=7)
    eval.quantize_data()
    alpha, beta = eval.calc_alpha_beta(rtn_name='q1', base_name='300_f_rtn')
    print('alpha is %.2f, beta is %.2f' % (alpha, beta))
    print('calculate sharpe ratio:')
    eval.calc_sharpe()
    print('calculate annualized returns:')
    eval.calc_annualized_returns()
    print('calculate volatility:')
    eval.calc_volatility()
    print('calculate information coefficient:')
    eval.calc_information_coefficient()
    print('over')