# -*- coding: utf-8 -*-
import json
import multiprocessing
import traceback
from datetime import datetime
import numpy as np
import pandas as pd
import statsmodels.api as sm
from pandas.tseries.offsets import DateOffset
from scipy.optimize import fmin_slsqp
import applications.pycube.models.alpha_dao as alpha_dao
import applications.pycube.models.alpha_constant as alpha_constant

def build_model(alpha_db,policy_id,start_date,end_date,df_obj=None,step=None):
    """
    风险模型
    :param policy_id:策略ID
    :param start_date:起始日期
    :param end_date:结束日期
    :return:
    """
    if df_obj is None :
        df_obj = alpha_dao.get_base_data(alpha_db,start_date, end_date)
    trade_code_list = df_obj['tradecode'].drop_duplicates()
    date_list = df_obj['tradedate'].drop_duplicates().sort_values()

    policy_dict = alpha_dao.get_policy(alpha_db,policy_id=policy_id)
    if step == 'test_valid':
        industry = get_industry_dummies(alpha_db,trade_code_list)    # 行业因子哑变量 DataFrame
    else:
        industry = get_industry_dummies(alpha_db,trade_code_list,policy_id)  # 行业因子哑变量 DataFrame

    industry.reset_index(inplace=True)
    df_obj = pd.merge(df_obj, industry, on='tradecode')  # 加上行业因子的df

    st_tradecode = pd.DataFrame(data=alpha_dao.get_st_tradecode(alpha_db), columns=["tradedate","tradecode"])     # ST股票
    trade_create_data = pd.DataFrame(data=alpha_dao.get_trade_create_data(alpha_db), columns=["tradecode", "ipo_date"])   #股票入市时间

    t = []
    f = []
    resid_df = pd.DataFrame(columns=trade_code_list)    # 残差 即 特质因子
    for i, date in enumerate(date_list):
        sub_df_obj = df_obj[df_obj.tradedate == date]
        # 排除 volume == 0 的数据
        sub_df_obj = sub_df_obj[sub_df_obj['volume'] != 0]
        temp_code = pd.DataFrame(sub_df_obj['tradecode'])

        # 排除ST股票
        st_tradecode_day = pd.DataFrame(st_tradecode[st_tradecode.tradedate == date]['tradecode'])
        st_tradecode_tmp = pd.merge(temp_code, st_tradecode_day, how='inner', on='tradecode')
        if st_tradecode_tmp.shape[0] > 0:
            st_tradecode_tmp.rename(columns={'tradecode': 'st_tradecode'}, inplace=True)
            st_tradecode_obj = pd.merge(st_tradecode_tmp, temp_code, how='right',left_on='st_tradecode',right_on='tradecode')
            temp_code = pd.DataFrame(st_tradecode_obj[st_tradecode_obj['st_tradecode'].isnull()]['tradecode'])

        # 排除上市小于一年的股票
        check_date = pd.to_datetime(date) - DateOffset(years=1)
        trade_create_data['ipo_date'] = pd.to_datetime(trade_create_data['ipo_date'])
        list_trade_obj = pd.DataFrame(trade_create_data[trade_create_data['ipo_date'] >= check_date]['tradecode'])
        list_trade_obj = pd.merge(temp_code, list_trade_obj, how='inner', on='tradecode')
        if list_trade_obj.shape[0] > 0:
            list_trade_obj.rename(columns={'tradecode': 'st_tradecode'}, inplace=True)
            list_tradecode_obj = pd.merge(list_trade_obj, temp_code, how='right', left_on='st_tradecode',right_on='tradecode')
            temp_code = pd.DataFrame(list_tradecode_obj[list_tradecode_obj['st_tradecode'].isnull()]['tradecode'])

        sub_df_obj = sub_df_obj[sub_df_obj['tradecode'].isin(temp_code['tradecode'])]
        temp_Y = sub_df_obj['Y_rets'].values
        temp_X = sub_df_obj[policy_dict['all_factors']]
        weights_df = (sub_df_obj['mkt_cap_ard_org'])**0.5
        weights_df = weights_df / weights_df.sum()
        res_wls = sm.WLS(temp_Y, smit_orth(temp_X), weights=weights_df).fit()

        f.append(res_wls.params)  # 回归系数
        t.append(res_wls.tvalues)

        if step != 'test_valid':
            sub_resid_df = pd.DataFrame([res_wls.resid.tolist()], columns=temp_code['tradecode'])
            resid_df = resid_df.append(sub_resid_df)

    model_dict = {}
    if step != 'test_valid':
        model_dict['resid_df'] = resid_df.dropna(axis=1, how='all')
    model_dict['f'] = f
    model_dict['t'] = t
    return model_dict

def smit_orth(A):
    '''
    #计算施密特正交化
    :param a: 输入参数，dataframe对象
    :return: 返回施密特正交化
    '''
    AA = A.values
    Q = np.zeros_like(AA)
    cnt = 0
    for a in AA.T:
        u = np.copy(a)
        for i in range(0, cnt):
            u -= np.dot(np.dot(Q[:, i].T, a)/np.dot(Q[:, i].T,Q[:, i]), Q[:, i])  # 减去待求向量在以求向量上的投影
        #e = u / np.linalg.norm(u)  # 归一化
        #Q[:, cnt] = e
        Q[:, cnt] = u
        cnt += 1
    b = pd.DataFrame(Q,columns=A.columns)
    return b

def get_industry_dummies(alpha_db,trade_code_list,policy_id=None):
    """
    行业因子哑变量 DataFrame
    :param trade_code_list:股票集合
    :return: industry
    """
    industry = pd.DataFrame(data=alpha_dao.get_stock_industry(alpha_db), columns=['industry', 'tradecode'])
    industry.set_index('tradecode', inplace=True)
    if policy_id != None:  # 有效的行业因子code_list
        policy_dict = alpha_dao.get_policy(alpha_db,policy_id=policy_id)
        industry_factors = policy_dict['industry_factors']
        industry = industry[industry.isin(industry_factors)]
    industry = industry.loc[trade_code_list]
    return pd.get_dummies(industry['industry'])  # 哑变量

def get_return_estimator(f,var_half_life=90,cov_half_life=480,err_half_life=42):
    """
    共同因子的风险预测
    :param f:风险模型
    :param var_half_life:方差半衰期
    :param cov_half_life:协方差半衰期
    :param err_half_life:误差半衰期
    :return: 共同因子的风险预测
    """
    Fd = F_d(f, var_half_life, cov_half_life)
    f = np.array(f)
    # btf01 = get_btf_test(f)
    btf = get_btf(f)
    rf = np.average(btf, weights=weight_btf(len(btf), err_half_life)) ** 0.5
    Fd1 = rf ** 2 * Fd
    return 21 * Fd1
    # return calc_newey_west(pd.DataFrame(f)) * Fd1

def get_risk_estimator(resid_df):
    """
    特质因子的风险预测
    :param resid_df: 残差
    :return:特质因子的风险预测
    """
    tradecode = resid_df.columns.tolist()
    std =[]
    for i in range(resid_df.shape[1]):
        data = resid_df.iloc[:,i]
        data.dropna(inplace = True)
        if data.shape[0]<=1:#可以添加一些阈值判断条件
            # std.append(np.nan)
            tradecode.remove(data.name)
        else:
            weight = weight_var(data.shape[0])
            weight_normalizaiton = weight/weight.sum()
            data_dev = (data - data.mean())**2
            data_std = (data_dev * weight_normalizaiton).sum()
            std.append(data_std)
    Ud = pd.DataFrame(np.diagflat(std),columns=tradecode,index=tradecode)
    return 21 * Ud

def F_d(f,var,cov):
    f_var = np.cov(f, rowvar=False, aweights=weight_var(len(f),var))  # 因子收益率的方差
    # print f_var
    f_cov = np.cov(f, rowvar=False, aweights=weight_cov(len(f),cov))  # 因子收益率的协方差
    # print f_cov
    return f_cov - np.diagflat(np.diag(f_cov)) + np.diagflat(np.diag(f_var))

def get_btf(f, lag=63,delay=1):
    f = pd.DataFrame(f)
    df = f.shift(delay)#k t 为第 t-1 天预测的第 t 天的因子收益的波动率
    f_std = df.rolling(window=lag).apply(pre_oneday_volatility)
    f_btf = (((df/f_std)**2).mean(axis=1))**0.5
    f_btf.dropna(inplace=True)
    return f_btf

def weight_var(size, var=90.0):
    # 方差权重向量
    return np.power(1 / 2.0 ** (1 / var), list(range(size - 1, -1, -1)))

def weight_cov(size, cov=480.0):
    # 协方差权重向量
    return np.power(1 / 2.0 ** (1 / cov), list(range(size - 1, -1, -1)))

def pre_oneday_volatility(df,var=90):#
    varWeight = weight_var(df.shape[0],var)  # T天的var权重
    varWeight = varWeight/varWeight.sum()
    return (varWeight*df**2).sum()**0.5

def weight_btf(size, cov=42.0):
    return np.power(1 / 2.0 ** (1 / cov), list(range(size - 1, -1, -1)))

def last_day_all_data(alpha_db,policy_id,df_obj,resid_df_columns):
    """
    最后一天相关数据
    :param policy_id : 数据源
    :param df_obj: 数据源
    :param resid_df_columns: 回测后参差的股票代码
    :Returns:
    :param last_day_trade_code_list: 最后一天数据的股票代码集合
    :param df_last_day_data:最后一天的股票数据
    :param select_hs300_weight:最后一天沪深300的权重
    :param last_day_hs300:最后一天沪深300的数据
    """
    date_list = df_obj['tradedate'].drop_duplicates().sort_values()

    new_df = df_obj[df_obj.tradedate == date_list[-1:].values[0]]
    last_day_trade_code_list = new_df['tradecode'].drop_duplicates()
    last_day_trade_code_list = last_day_trade_code_list[last_day_trade_code_list.isin(resid_df_columns)]
    new_df = new_df[new_df['tradecode'].isin(last_day_trade_code_list)]

    industry = get_industry_dummies(alpha_db,last_day_trade_code_list,policy_id)  # 行业因子哑变量 DataFrame

    last_day_hs300_weight = get_hs300_weight(alpha_db,date_list[-1:].values[0])
    last_day_hs300_weight.reset_index(inplace=True)

    last_day_hs300_weight = last_day_hs300_weight[last_day_hs300_weight['tradecode'].isin(last_day_trade_code_list)]
    last_day_hs300_weight.set_index('tradecode', inplace=True)

    last_day_hs300 = new_df[new_df['tradecode'].isin(last_day_hs300_weight.index)]
    last_day_hs300.set_index('tradecode', inplace=True)
    new_df.set_index('tradecode', inplace=True)

    policy_dict = alpha_dao.get_policy(alpha_db, policy_id=policy_id)
    style_factors = policy_dict['style_factors']

    df_last_day_data = pd.merge(new_df[style_factors], industry, how='left', left_index=True,right_index=True)
    last_day_hs300 = pd.merge(last_day_hs300[style_factors], industry, how='left', left_index=True,right_index=True)
    select_hs300_weight = last_day_hs300_weight['trade_weight'] / 100.0

    return last_day_trade_code_list,df_last_day_data,select_hs300_weight,last_day_hs300,industry

def get_hs300_weight(alpha_db,trade_date):
    """
    沪深300 权重
    :param trade_code_list: 股票list
    :return: 沪深300 权重
    """
    stockweight = alpha_dao.get_alpha_hs300_weight(alpha_db,trade_date)
    hs300_weight = pd.DataFrame(data=stockweight, columns=['tradecode', 'trade_weight'])
    hs300_weight.set_index('tradecode', inplace=True)
    return hs300_weight

def calc_min_pure_factor_comb(X_selected_day,F1,U1,hs300_weight,hs300_date,ite_count):
    w_parameter = []
    f_parameter = []
    # num_suu = X_selected_day.shape[1]
    # for count_num in ite_count: 此处 以后可以使用 进程池 提升速度
    for count_num in range(ite_count):  # 按风格因子数量循环, 排除行业银子
        print '运行次数' + str(count_num)
        coua = 0.4
        # if count_num>8:
        #     coua = 0.3
        X_parameter, ff_min = calc_w_pure_factor(X_selected_day, F1, U1, hs300_weight, hs300_date, count_num,coua=coua)
        # gg = float(np.dot(X_selected_day.T[count_num, :], (np.array(X_parameter) - hs300_weight).T))
        # print 'gg', gg
        w_parameter.append(X_parameter.tolist())
        f_parameter.append(ff_min)
        # f_parameter.append(str(bb)+str(cou))
    return w_parameter,f_parameter

def calc_w_pure_factor(x, f, u, weight,hs300_date, k, coua=0.5, bounds=(-0.05, 0.05), iteration_step=0.05):
    '''
        :param x: 因子暴露
        :param f: 共同因子风险矩阵预测
        :param u:特质因子风险矩阵
        :param weight:组合的股票权重
        :param weight_data:组合的股票数据
        :param k:选定的纯因子
        :param coua:纯因子暴露值的下限阈值
        :return:组合的股票权重
        '''
    def _min_vol_func(w, *args):
        sigma = np.dot(np.dot(x, f), np.transpose(x)) + u
        return np.dot(np.dot(np.transpose(w), sigma), w)

    init_guess = np.array([1.0 / x.shape[0]] * x.shape[0])
    x_alpha = x[:, k]
    x_beta = np.delete(x, k, axis=1)
    hs300_alpha = hs300_date[:, k]
    hs300_beta = np.delete(hs300_date, k, axis=1)
    eqcons_list = [lambda w, *args: np.abs(np.sum(w) - 1)]
    ieqcons_list = [lambda w, *args: np.abs(np.dot(w.T,x_alpha)-np.dot(weight,hs300_alpha)) - coua,
                    lambda w, *args: 0.01 - np.abs(np.dot(w.T,x_beta)-np.dot(weight,hs300_beta))
                    ]
    # ieqcons_list = [lambda w, *args: np.abs(np.dot(np.transpose(w - weight), x_alpha)) - coua,
    #                 lambda w, *args: 0.01 - np.abs(np.dot(np.transpose(w - weight), x_beta))
    #                 ]
    bounds_list = [bounds] * u.shape[0]
    result = fmin_slsqp(_min_vol_func, init_guess, bounds=bounds_list, eqcons=eqcons_list, ieqcons=ieqcons_list,
                        iprint=2, full_output=True,iter=30,acc=1.0E-4)
    '''ieqcons : list, optional
        A list of functions of length n such that
        ieqcons[j](x,*args) >= 0.0 in a successfully optimized
        problem.
    '''
    while (result[3] <> 0) and (coua>0):
        print result[3]
        coua -= iteration_step
        result = fmin_slsqp(_min_vol_func, init_guess, bounds=bounds_list, eqcons=eqcons_list,
                            ieqcons=ieqcons_list, iprint=2, full_output=True,iter=20,acc=1.0E-4)
    # print('#' * 100)
    # print('Lagrange result: {}'.format(result[0]))
    # print('Sum of w is: {}'.format(sum(result[0])))
    # print('#' * 100)
    return result[0], result[1]

def calc_opt_algorithm(alpah_db,policy_id,opt_type,df_last_day_data, F1, U1, hs300_weight, df_last_day_data_alpha, df_last_day_data_bata,code_list,hs300_alpha,hs300_beta):
    """
    最优化算法
    :param opt_type: 优化算法类型: 最大化组合信息比率=1; 最小化组合风险	=2; 最大化风险调整后收益=3
    :param df_last_day_hs300_data:hs300的数据
    :param F1:共同因子的风险估计
    :param U1:特质因子的风险估计
    :param hs300_weight:沪深300 权重
    :param df_last_day_data_alpha:最近一天的数据中 alpha因子
    :param df_last_day_data_bata:最近一天的数据中 bata因子
    :param upper_limit:
    :param lower_limit:
    :param code_list:股票list
    :param hs300_alpha:hs300 alpha data
    :param hs300_beta:hs300 bata data
    :return:
    """
    '''
    目前先按着国泰君安的alpha因子暴露值设置
    '''
    def calc_portfolio(opt_type,df_last_day_data, F1, U1, hs300_weight, df_last_day_data_alpha, df_last_day_data_bata,code_list,upper_limit,lower_limit,hs300_alpha,hs300_beta):#最优化核心函数
        sql = "select json from alpha_policy_json where type = " + alpha_constant.alpha_limit_list_type + " and policy_id=" + policy_id
        alpha_limit_list = alpah_db.executesql(sql)
        df_limit = pd.DataFrame.from_dict(json.loads(alpha_limit_list[0][0]))
        lower_limit = df_limit.iloc[:, 1].astype('float64')
        upper_limit = df_limit.iloc[:, 2].astype('float64')

        sql = "select json from alpha_policy_json where type = " + alpha_constant.f_pre_type + " and policy_id=" + policy_id
        f_pre = alpah_db.executesql(sql)
        falpha = np.array(json.loads(f_pre[0][0]))  # 因子的收益估计

        if opt_type == 1:
            w_ir_max ,result_sucsuess= calc_w_ir_max(df_last_day_data, F1, U1, hs300_weight, df_last_day_data_alpha.values,df_last_day_data_bata.values, falpha)
            ser_obj = pd.Series(w_ir_max, index=code_list)
        elif opt_type == 2:
            ser_obj ,result_sucsuess= recurs_calc_w_min(X_selected_day=df_last_day_data.values, F1=F1, U1=U1, hs300_weight=hs300_weight, X_selected_alpha_day=df_last_day_data_alpha.values, X_selected_risk_day=df_last_day_data_bata.values, upper_limit=upper_limit, lower_limit=lower_limit, hs300_alpha=hs300_alpha, hs300_beta=hs300_beta, code_list=code_list)
        elif opt_type == 3:
            w_risk_max,result_sucsuess = calc_w_risk_max(falpha=falpha,x=df_last_day_data.values,f=F1, u=U1, weight=hs300_weight,x_alpha=df_last_day_data_alpha.values,x_beta=df_last_day_data_bata.values, upper_limit=upper_limit, lower_limit=lower_limit,hs300_alpha=hs300_alpha,hs300_beta=hs300_beta)
            ser_obj = pd.Series(w_risk_max, index=code_list)
        return ser_obj,result_sucsuess

    def no_solution_clac(lower_limit, order=None, min_lower_limit=0.15,iteration_step=0.05):  # 无解的处理机制，按顺序逐个降低alpha因子的暴露的下限
        if order is None:
            order = range(lower_limit.shape[0])
        for i in range(len(order)):
            if lower_limit[order[i]] > min_lower_limit:  # 按排序依次降低因子的暴露下限，最低值是0.1
                lower_limit[order[i]] -= iteration_step
                break
        return lower_limit

    sql = "select json from alpha_policy_json where type = " + alpha_constant.alpha_limit_list_type + " and policy_id=" + policy_id
    alpha_limit_list = alpah_db.executesql(sql)
    df_limit = pd.DataFrame.from_dict(json.loads(alpha_limit_list[0][0]))
    lower_limit = df_limit.iloc[:, 1].astype('float64')
    upper_limit = df_limit.iloc[:, 2].astype('float64')

    sql = "select json from alpha_policy_json where type = " + alpha_constant.f_pre_type + " and policy_id=" + policy_id
    f_pre = alpah_db.executesql(sql)
    falpha = np.array(json.loads(f_pre[0][0]))  # 因子的收益估计
    ser_obj,result_sucsuess = calc_portfolio(opt_type,df_last_day_data, F1, U1, hs300_weight, df_last_day_data_alpha, df_last_day_data_bata,code_list,upper_limit,lower_limit,hs300_alpha,hs300_beta)
    while result_sucsuess<>0:
        lower_limit= no_solution_clac(df_limit)
        ser_obj, result_sucsuess = calc_portfolio(opt_type,df_last_day_data, F1, U1, hs300_weight, df_last_day_data_alpha, df_last_day_data_bata,code_list,upper_limit,lower_limit,hs300_alpha,hs300_beta)
    return ser_obj

def calc_w_min(x,f, u,weight,x_alpha,x_beta,upper_limit=None,lower_limit=None,bounds=[-0.05, 0.05],hs300_alpha=None,hs300_beta=None):
    '''
    :param x: 因子暴露
    :param f: 共同因子风险矩阵预测
    :param u:特质因子风险矩阵
    :param weight:业绩基准的股票权重
    :param x_alpha: 阿尔法因子的暴露
    :param x_beta: 风格因子的暴露
    :param upper_limit:上限的阈值
    :param lower_limit:下限的阈值
    :return:组合的股票权重
    :param hs300_alpha:hs300 alpha data
    :param hs300_beta:hs300 bata data
    :return:
    '''
    # if upper_limit is None:
    #     upper_limit =[10]*x_alpha.shape[0]
    # if lower_limit is None:
    #     lower_limit = [-10]*x_alpha.shape[0]
    def _min_vol_func(w, *args):
        sigma = np.dot(np.dot(x, f), np.transpose(x)) + u
        return np.dot(np.dot(np.transpose(w), sigma), w)
    init_guess = np.array([1 / u.shape[0]] * x.shape[0])
    eqcons_list = [lambda w, *args: np.abs(np.sum(w) - 1)]
    ieqcons_list = [lambda w, *args: upper_limit - np.dot(w.T,x_alpha)-np.dot(weight,hs300_alpha),
                    lambda w, *args: np.abs(np.dot(w.T,x_alpha)-np.dot(weight,hs300_alpha)) - lower_limit,
                    lambda w, *args: 0.01 - np.abs(np.dot(w.T,x_beta)-np.dot(weight,hs300_beta))
                    ]
    bounds_list = [bounds] * u.shape[0]
    result = fmin_slsqp(_min_vol_func, init_guess, bounds=bounds_list,eqcons=eqcons_list,ieqcons=ieqcons_list, iprint=2, full_output=True, iter=50, acc=1.0E-6)

    print('#' * 100)
    print('Lagrange result: {}'.format(result[0]))
    print('Sum of w is: {}'.format(sum(result[0])))
    print('#' * 100)

    return result[0],result[3]

def calc_w_ir_max(x, f, u,weight,industry,x_alpha,x_beta,coua,falpha,bounds=[-0.05, 0.05],tc=0):
    '''
    最大化组合信息比率Max R p − TC(w)/σ p
    :param x:
    :param f:
    :param u:
    :param weight:
    :param industry:
    :param x_alpha:
    :param x_beta:
    :param coua:
    :param falpha:
    :param bounds:
    :param tc:
    :return:
    '''
    benchmark_alpha=0
    def _min_vol_func(w, *args):
        return np.divide((sum(np.dot(x, falpha)) - tc), np.dot( np.dot(np.transpose(w), np.dot(np.dot(x, f), np.transpose(x)) + u), w) ** 0.5)
    init_guess = np.array([1 / u.shape[0]] * x.shape[0])
    eqcons_list = [lambda w, *args: abs(np.sum(w) - 1)-0.001]
    ieqcons_list = [lambda w, *args: coua - abs(np.dot(w.T,x_alpha)-np.dot(weight,benchmark_alpha)),
                    lambda w, *args: abs(np.dot(np.transpose(w - weight), x_beta)) - 0.01,
                    lambda w, *args: abs(np.dot(np.transpose(w),industry.values))-0.01,lambda w, *args: abs(np.sum(w) - 1)-0.001
                    ]
    bounds_list = [bounds] * u.shape[0]
    result = fmin_slsqp(_min_vol_func, init_guess, bounds=bounds_list, eqcons=eqcons_list, ieqcons=ieqcons_list,
                        iprint=2, full_output=True, iter=50, acc=1.0E-6)

    print('#' * 100)
    print('Lagrange result: {}'.format(result[0]))
    print('Sum of w is: {}'.format(sum(result[0])))
    print('#' * 100)
    return result[0],result[3]

def calc_w_risk_max(falpha,x,f, u,te,weight,x_alpha,x_beta,upper_limit=None,lower_limit=None,bounds=[-0.05, 0.05],hs300_alpha=None,hs300_beta=None,tc=0,risk_adverse =0.5 ):
    """
    最大化经风险调整后预期收益 Max{R p − λ*σp2 − TC(w)}
    根据风险调整后的收益构建股票投资组合
    :param falpha: alpha因子的预测收益
    :param x: 因子暴露
    :param f: 共同因子的风险矩阵预测
    :param u: 特质因子的风险矩阵预测
    :param te: 跟踪误差的上限
    :param weight: 业绩基准的股票权重
    :param x_alpha: alpha因子的因子暴露
    :param x_beta: 风险因子的因子暴露
    :param upper_limit: alpha因子暴露的上限
    :param lower_limit: alpha因子暴露的下限
    :param bounds: 投资组合股票权重的上下限
    :param hs300_alpha: 业绩基准的alpha因子的因子暴露
    :param hs300_beta:业绩基准的风险因子的因子暴露
    :param tc:交易费
    :param risk_adverse:风险厌恶系数
    :return:股票投资组合的权重
    """
    def _min_vol_func(w, *args):
        sigma = np.dot(np.dot(x, f), np.transpose(x)) + u
        risk_std = np.dot(np.dot(np.transpose(w), sigma), w)
        risk_adjust =risk_adverse* risk_std
        return -(np.dot(x, falpha) -risk_adjust - tc)

    init_guess = np.array([1.0 / u.shape[0]] * x.shape[0])
    eqcons_list = [lambda w, *args: np.abs(np.sum(w) - 1)]
    ieqcons_list = [lambda w, *args: upper_limit - np.dot(w.T,x_alpha)-np.dot(weight,hs300_alpha),
                    lambda w, *args: np.abs(np.dot(w.T,x_alpha)-np.dot(weight,hs300_alpha)) - lower_limit,
                    lambda w, *args: 0.01 - np.abs(np.dot(w.T,x_beta)-np.dot(weight,hs300_beta)),
                    lambda w, *args: np.dot(np.dot(np.transpose(w - weight), np.dot(np.dot(x, f), np.transpose(x)) + u)
                                            (w - weight)) - te * (12 ** 0.5)
                    ]
    bounds_list = [bounds] * u.shape[0]
    result = fmin_slsqp(_min_vol_func, init_guess, bounds=bounds_list,eqcons=eqcons_list,ieqcons=ieqcons_list, iprint=2, full_output=True, iter=50, acc=1.0E-6)
    print('#' * 100)
    print('Lagrange result: {}'.format(result[0]))
    print('Sum of w is: {}'.format(sum(result[0])))
    print('#' * 100)
    return result[0],result[3]

def recurs_calc_w_min(X_selected_day, F1, U1, hs300_weight,X_selected_alpha_day, X_selected_risk_day,upper_limit,lower_limit,hs300_alpha=None,hs300_beta=None,code_list=None,resid_df=None,r_var = 0.0011):
    """
    最小风险
    :param X_selected_day:
    :param F1:
    :param U1:
    :param hs300_weight:
    :param industry:
    :param bench_industry_weight:
    :param X_selected_alpha_day:
    :param X_selected_risk_day:
    :param upper_limit:
    :param lower_limit:
    :param code_list:
    :param resid_df: 残差
    :param r_var:
    :param hs300_alpha:hs300 alpha data
    :param hs300_beta:hs300 bata data
    :return:
    """
    min_weight,result_sucsess = calc_w_min(X_selected_day,F1,U1,hs300_weight,X_selected_alpha_day,X_selected_risk_day,upper_limit=upper_limit,lower_limit=lower_limit,hs300_alpha=hs300_alpha,hs300_beta=hs300_beta)
    ser_obj = pd.Series(min_weight, index=code_list)
    # ser_obj_sub = ser_obj >= r_var
    # if min_weight.min() >= r_var or ser_obj_sub.sum()==0 or len(min_weight) <= 50:
    return ser_obj,result_sucsess
    # X_selected_day = X_selected_day[ser_obj_sub.values]
    # U_day_code = U_day_code.T[ser_obj_sub.values].T
    # U1 = 21 * U_d(U_day_code)
    # hs300_weight = hs300_weight[ser_obj_sub.values]
    # industry = industry[ser_obj_sub.values]
    # X_selected_alpha_day = X_selected_alpha_day[ser_obj_sub.values]
    # X_selected_risk_day = X_selected_risk_day[ser_obj_sub.values]
    # code_list = code_list[ser_obj_sub.values]
    # return recurs_calc_w_min(X_selected_day, F1, U1, hs300_weight, industry, X_selected_alpha_day, X_selected_risk_day,code_list,U_day_code)
