# -*- coding: utf-8 -*-
import json
import multiprocessing
import traceback
from datetime import datetime
import random
import numpy as np
import pandas as pd
import statsmodels.api as sm
from pandas.tseries.offsets import DateOffset
from scipy.optimize import fmin_slsqp
from sqlalchemy import create_engine
import traceback
import applications.pycube.models.alpha_constant as alpha_constant
import applications.pycube.models.alpha_dao as alpha_dao
import applications.pycube.models.alpha_common as alpha_common
import applications.pycube.models.alpha_constant as alpha_constant
import time

def multi_pure_factor_portfolios(args):
    return pure_factor_portfolios(*args)

def multi_portfolio_construction(args):
    return portfolio_construction(*args)

def pure_factor_portfolios(alpah_db,policy_id,policy_name,date_range,var_half_life,cov_half_life,err_half_life,ite_count,i):
    '''
    纯因子组合优化
    :param policy_id:
    :param policy_name:
    :param date_range:
    :param var_half_life:
    :param cov_half_life:
    :param err_half_life:
    :param ite_count:
    :param i:
    :return:
    '''
    sub_start_date = pd.to_datetime(date_range[0])._date_repr
    sub_end_date = pd.to_datetime(date_range[1])._date_repr
    retest_start_date = pd.to_datetime(date_range[2])._date_repr
    retest_end_date = pd.to_datetime(date_range[3])._date_repr

    df_obj = alpha_dao.get_base_data(alpah_db,sub_start_date, sub_end_date)  # 策略基本数据
    model_dict = alpha_common.build_model(alpah_db,policy_id, sub_start_date, sub_end_date, df_obj)  # 模型构建

    f = model_dict['f']
    F1 = alpha_common.get_return_estimator(f, var_half_life, cov_half_life, err_half_life)  # 收益估计
    U1 = alpha_common.get_risk_estimator(model_dict['resid_df'])  # 风险估计

    # 最近一天相关数据
    last_day_trade_code_list, df_last_day_data, select_hs300_weight, last_day_hs300, industry = alpha_common.last_day_all_data(alpah_db,policy_id, df_obj, U1.columns.values)
    last_day_U1 = U1[last_day_trade_code_list].T[last_day_trade_code_list].T
    w_parameter, f_parameter = alpha_common.calc_min_pure_factor_comb(df_last_day_data.values, F1, last_day_U1,select_hs300_weight, last_day_hs300.values, ite_count)

    sql = "INSERT INTO alpha_construct_model(policy_name,seq,start_date,end_date,retest_start_date,retest_end_date,model_weights,w_tradecode,model_fx)  VALUES('" + policy_name + "','" + str(i) + "','" + sub_start_date + "','" + sub_end_date + "','" + retest_start_date + "','" + retest_end_date + "','" + json.dumps(w_parameter) + "','" + json.dumps(df_last_day_data.index.tolist()) + "','" + json.dumps(f_parameter) + "')"
    return sql

def portfolio_construction(alpah_db,policy_id,policy_name,f_alpha,f_beta,opt_type,date_range,half_life_dict,i):
    sub_start_date = pd.to_datetime(date_range[0])._date_repr
    sub_end_date = pd.to_datetime(date_range[1])._date_repr
    retest_start_date = pd.to_datetime(date_range[2])._date_repr
    retest_end_date = pd.to_datetime(date_range[3])._date_repr

    var_half_life = half_life_dict['var_half_life']
    cov_half_life = half_life_dict['cov_half_life']
    err_half_life = half_life_dict['err_half_life']

    df = alpha_dao.get_base_data(alpah_db,sub_start_date, sub_end_date)
    model_dict = alpha_common.build_model(alpah_db,policy_id, sub_start_date, sub_end_date, df_obj=df)  # 模型构建
    F1 = alpha_common.get_return_estimator(model_dict['f'], var_half_life, cov_half_life, err_half_life)  # 收益估计
    U1 = alpha_common.get_risk_estimator(model_dict['resid_df'])  # 风险估计

    # 最近一天相关数据
    last_day_trade_code_list, df_last_day_data, select_hs300_weight, last_day_hs300, industry = alpha_common.last_day_all_data(alpah_db,policy_id, df, U1.columns.values)

    last_day_U1 = U1[last_day_trade_code_list].T[last_day_trade_code_list].T

    hs300_alpha = last_day_hs300[f_alpha]
    hs300_beta = last_day_hs300[f_beta]

    # bench_industry_weight_df = alpha_dao.get_bench_industry_weight(alpah_db,sub_end_date)
    ser_obj = alpha_common.calc_opt_algorithm(alpah_db,policy_id,opt_type, df_last_day_data, F1, last_day_U1, select_hs300_weight, df_last_day_data[f_alpha], df_last_day_data[f_beta],last_day_trade_code_list, hs300_alpha, hs300_beta)
    trade_code_w = pd.DataFrame({'tradecode': ser_obj.index, 'weight_w': ser_obj.values})

    sql = "INSERT INTO alpha_construct_comb(policy_name,seq,start_date,end_date,retest_start_date,retest_end_date,comb_weights)  VALUES('" + policy_name + "','" + str(i) + "','" + sub_start_date + "','" + sub_end_date + "','" + retest_start_date + "','" + retest_end_date + "','" + json.dumps(trade_code_w.values.tolist()) + "')"
    return sql

def func_01(alpah_db,sub_start_date=None, sub_end_date=None):
    df_obj = alpha_dao.get_base_data(alpah_db, sub_start_date, sub_end_date)  # 策略基本数据
    model_dict = alpha_common.build_model(alpah_db, '16', sub_start_date, sub_end_date, df_obj)  # 模型构建
    print df_obj
    print model_dict

def mulit_func_02(args):
    return func_02(*args)

def func_02(alpha_db,t,i):
    # 落入圆内的计数
    search = alpha_db.executesql("select seq,name,type from test_table")
    print search
    sql = "insert into test_table(seq, name, type) VALUES ('" + str(i) + "','test_p',"+t+")"
    return sql
    # return base_data.shape

if __name__ == '__main__':
    try:
        process_pool = multiprocessing.Pool(processes=alpha_constant.POOL_SIZE)
        list = []
        for i in range(5):
            list.append((None, 5, i))
        results = process_pool.map(mulit_func_02, list)
        process_pool.close()
        process_pool.join()
    except Exception:
        print traceback.format_exc()