import pandas as pd
import numpy as np
import os
import shutil
import sys
import traceback
import itertools
import datetime
import multiprocessing
import time
import random
import threading
import pymysql
import re
import requests
import getpass
import uuid
import json

from qinganx.factor.StockFactorNew import StockFactor
from factor_test_IR_fun import *
from qinganx.datalib import Datalib
import qinganx.funclib as flib
# from qinganx.alpha_expression import * 
import qinganx.alpha_expression as alphalib
from qinganx.stk_testlib import * 
dlib = Datalib()



LOCAL_MYSQL_DB = 'qingan'
LOCAL_MYSQL_USER = 'yxk'
LOCAL_MYSQL_PASS = 'qingantouzi'
LOCAL_MYSQL_HOST_M = '192.168.1.28'
LOCAL_MYSQL_HOST_S = '192.168.1.28'
LOCAL_MYSQL_PORT = 3306
conn = pymysql.connect(host=LOCAL_MYSQL_HOST_M, user=LOCAL_MYSQL_USER, passwd=LOCAL_MYSQL_PASS, db=LOCAL_MYSQL_DB, port=LOCAL_MYSQL_PORT, charset='utf8')
cursor = conn.cursor()
field0=dlib.get_stock_field()
mysqllock=threading.Lock()
status=dlib.get_stock_status()
STstatus=dlib.get_stock_status('st_delisting_status')
# field_normal=(st.replace(1,np.nan))+(status.replace(0,np.nan)).loc[start_date:end_date,:'688000']


basic_path=  '/home/yxk/workspace/jupyter/data/hdf_second_factor/'    
hdf_path=    '/home/yxk/workspace/jupyter/data/hdf_factor/'
group_path=  '/home/qastorage/public/yxk/Factor_Group/'
pnl_path=    '/home/yxk/workspace/jupyter/data/pnl_return/'
storage_pnl_path='/home/qastorage/public/yxk/factor/signal_return/'
local_py_path='/home/yxk/workspace/jupyter/factor_test/factorpy/'
model_path='/home/yxk/workspace/jupyter/data/model/'
# Basic_path=  '/home/qastorage/public/yxk/Basic_Factor/'    
# hdf_path=    '/home/sunqy/workspace/jupyter/test/data/hdf_data/'
# group_path=  '/home/qastorage/public/yxk/Factor_Group/'
# pnl_path=    '/home/sunqy/workspace/jupyter/test/data/pnl_data/'
# storage_pnl_path='/home/qastorage/public/yxk/factor/signal_return/'
# local_py_path='/home/yxk/workspace/jupyter/factor_test/factorpy/'
# model_path='/home/sunqy/workspace/jupyter/test/data/model/'
          

resid_list=['zyyxscore','tcap','con_pe_0','tclose']
group_list=['stomgroup2','sigmagroup2','returngourp2','lncapgourp1','EGpegourp100'
            ,'EGpegourp40','EGpegourp20','NGshenwan3','NGshenwan2','NGshenwan1'
            ,'EGturnover40gourp100','EGturnover20gourp20','EGturnover40gourp40','EGstomgourp100'
            ,'EGstomgourp40','EGhsigmagourp100','EGhsigmagourp40','EGhsigmagourp20','EGlmcapgourp100'
            ,'EGlmcapgourp40','EGlmcapgourp20','EGreturn40gourp100','EGreturn40gourp40','EGreturn40gourp20'
            ,'EGreturn10gourp20','EG1scoregourp100','EG1scoregourp40','EG1scoregourp20'] 

start_date='2016-01-01'
end_date='2021-09-30'

# total_task=[]



tclose = dlib.get_stock_daily_data(name = 'tclose_adj').loc[start_date:end_date,:]
# topen = dlib.get_stock_daily_data(name = 'topen_adj').loc[start_date:end_date,:]
# thigh = dlib.get_stock_daily_data(name = 'high_adj').loc[start_date:end_date,:]
# tlow = dlib.get_stock_daily_data(name = 'low_adj').loc[start_date:end_date,:]
cohl=dlib.get_pv_data('cohl_raw')

# field_amt_2000=dlib.get_stock_field('field_amt_2000')
# field_liq_2000=dlib.get_stock_field('field_liq_2000')  
# tommorow_ret = dlib.get_split_min_data(1030, 'stk_ret').shift(-1)[start_date:]
# tommorow_ret.index = pd.DatetimeIndex(tommorow_ret.index.date)
# tommorow_ret5 =tommorow_ret.rolling(5, center=False,min_periods=1).sum().shift(-4)
# index_500ret=dlib.get_zyyx_data('qt_idx_daily','change_rate').loc[start_date:,'000905']/100


# returns=dlib.get_zyyx_data(name='qt_stk_daily',factor_name='change_rate').loc[start_date:end_date,:]
# volume = dlib.get_zyyx_data(name='qt_stk_daily',factor_name='volume').loc[start_date:end_date,:]
# amount=dlib.get_zyyx_data(name='qt_stk_daily',factor_name='amount').loc[start_date:end_date,:]
# mcap = dlib.get_zyyx_data(name='qt_stk_daily',factor_name='mcap').loc[start_date:end_date,:]
# score = dlib.get_zyyx_data(name='certainty_score_stk',factor_name='score').loc[start_date:end_date,:]
# con_pe_0 = dlib.get_zyyx_data(name='con_forecast_stk',factor_name='con_pe_0').loc[start_date:end_date,:]

# tommorow_ret_yz = dlib.get_split_min_data(935, 'stk_ret').shift(-1)['2017':]
# tommorow_ret_yz.index = pd.DatetimeIndex(tommorow_ret_yz.index.date)

# tommorow_ret3 =tommorow_ret.rolling(3, center=False,min_periods=1).sum().shift(-2)

# tommorow_ret10 =tommorow_ret.rolling(5, center=False,min_periods=1).sum().shift(-9)


# day5_zf=thigh.rolling(5, center=False,min_periods=1).max().shift(-4)/tclose -1
# day5_df=tlow.rolling(5, center=False,min_periods=1).min().shift(-4)/tclose -1
# day10_zf=thigh.rolling(10, center=False,min_periods=1).max().shift(-9)/tclose -1
# day10_df=tlow.rolling(10, center=False,min_periods=1).min().shift(-9)/tclose -1
# day15_zf=thigh.rolling(15, center=False,min_periods=1).max().shift(-14)/tclose -1
# day15_df=tlow.rolling(15, center=False,min_periods=1).min().shift(-14)/tclose -1

# day5_zf_gr=thigh.rolling(5, center=False,min_periods=1).max().shift(-5)/topen.shift(-1) -1
# day5_df_gr=tlow.rolling(5, center=False,min_periods=1).min().shift(-5)/topen.shift(-1) -1
# day5_good=day5_zf_gr[(day5_zf_gr>0.12)&(day5_df_gr>-0.03)]*(status.replace(0,np.nan))*100
# day5_good=day5_good.loc[start_date:end_date,:].replace(np.nan,0)    
    
    



  
def get_corr_year(factor,year):
    global conn,cursor
        
    try:
        if year == -1:
            IC = factor.corrwith(tommorow_ret, axis=1).dropna()
            IR = IC.mean() / IC.std() * np.sqrt(243)  
        elif year == 1:
            IC = factor.shift(1).corrwith(tommorow_ret, axis=1).dropna()
            IR = IC.mean() / IC.std() * np.sqrt(243)
#         elif year == 3:
#             IC = factor.shift(1).corrwith(tommorow_ret3, axis=1).dropna()
#             IR = IC.mean() / IC.std() * np.sqrt(243)    
        elif year == 5:
            IC = factor.shift(1).corrwith(tommorow_ret5, axis=1).dropna()
            IR = IC.mean() / IC.std() * np.sqrt(243)
        elif year == 10:
            IC = factor.shift(1).corrwith(tommorow_ret10, axis=1).dropna()
            IR = IC.mean() / IC.std() * np.sqrt(243) 
        elif str(year).startswith('ret_1'):
            factor.dropna(how='all',inplace=True)
            stocknum=int(str(year).split('_')[2])
            actor_df = factor.shift(1).rank(axis=1,method='first',ascending=False)
            df=tommorow_ret[actor_df<=stocknum].mean(1)
            alpha=df-index_500ret
            meanret=round(alpha.mean() *100,3)  
            sharp=round((alpha.mean()* 243)/(alpha.std()* np.sqrt(243)),3)
            pro=round((alpha>0).sum()/alpha.count(),3)
            return meanret,pro,sharp
     
        elif str(year).startswith('ret_5'):
            factor.dropna(how='all',inplace=True)
            stocknum=int(str(year).split('_')[2])
            actor_df = factor.shift(1).rank(axis=1,method='first',ascending=False)
            df=tommorow_ret5[actor_df<=stocknum].mean(1)
            alpha=df-index_500ret
            meanret=round(alpha.mean() *100,3)  
            sharp=round((alpha.mean()* 243)/(alpha.std()* np.sqrt(243)),3)
            pro=round((alpha>0).sum()/alpha.count(),3)
            return meanret,pro,sharp   

        elif year == 'field_amt_2000':
            factor=factor*field_amt_2000
            IC = factor.shift(1).corrwith(tommorow_ret, axis=1).dropna()
            IR = IC.mean() / IC.std() * np.sqrt(243)      
            
        elif year == 'field_liq_2000':
            factor=factor*field_liq_2000
            IC = factor.shift(1).corrwith(tommorow_ret, axis=1).dropna()
            IR = IC.mean() / IC.std() * np.sqrt(243)     
        elif year in ['2015','2016','2017','2018','2019','2020','2021','2022']:
            IC = factor.shift(1).corrwith(tommorow_ret, axis=1).dropna()
            IR = IC[year].mean() / IC[year].std() * np.sqrt(243)
        else:
            return 99
        return round(IR,3)
    except Exception as e:
        print('error  ',e)
        traceback.print_exc()
        return 0
def plot_stock(data,starttime='2019-01-01',endtime='2024-01-01'):
    global conn,cursor
    stock_id=random.choice(data.columns.tolist())
    data[stock_id].loc[starttime:endtime].plot(figsize=(24,5),label=str(stock_id),legend=True)
    tclose[stock_id].loc[starttime:endtime].plot(figsize=(24,5),secondary_y=True,label='close',legend=True)
    
def print_IR(pl_factor): 
    global conn,cursor
    
#     print('平均个数:',pl_factor.count(1).mean())
    print('范围:',pl_factor.index[0],pl_factor.index[-1])
    # final_factor =  alphalib.calibrate(pl_factor,field_type = 'field0').loc[start_date:end_date,:] 
    pl_factor.index=pd.DatetimeIndex(pl_factor.index)
    factor= factor_analyse(pl_factor) 
    IR_amt_2000=factor.IR
    sharp_amt_2000=factor.sharp
    alpha_amt_2000=factor.alpha
    print('IR_amt_2000',IR_amt_2000)
    print('sharp_amt_2000',sharp_amt_2000)
    print('alpha_amt_2000',alpha_amt_2000)
    
    # corr_1 =get_corr_year(final_factor,1)
    # corr_5 = get_corr_year(final_factor,5)
    # corr_2021 = get_corr_year(final_factor,'2021')
    # corr_amt_2000 = get_corr_year(final_factor,'field_amt_2000')
    # ret_1_10,pro_1_10,sharp_1_10= get_corr_year(final_factor,'ret_1_10')
    # ret_1_100,pro_1_100,sharp_1_100= get_corr_year(final_factor,'ret_1_100')
    # ret_1_500,pro_1_500,sharp_1_500= get_corr_year(final_factor,'ret_1_500')
    # ret_1_1000,pro_1_1000,sharp_1_1000= get_corr_year(final_factor,'ret_1_1000')
    # ret_5_10,pro_5_10,sharp_5_10= get_corr_year(final_factor,'ret_5_10')
    # ret_5_100,pro_5_100,sharp_5_100= get_corr_year(final_factor,'ret_5_100') 
    # ret_5_500,pro_5_500,sharp_5_500= get_corr_year(final_factor,'ret_5_500')
    # ret_5_1000,pro_5_1000,sharp_5_1000= get_corr_year(final_factor,'ret_5_1000')                        
#     corr_m1 = get_corr_year(final_factor,-1)
#     corr_max5  = get_corr_year(final_factor,100005)
#     corr_min5  = get_corr_year(final_factor,200005)
#     corr_2017 = get_corr_year(final_factor,'2017')
#     corr_2018 = get_corr_year(final_factor,'2018')
#     corr_2019 = get_corr_year(final_factor,'2019')
#     corr_2020 = get_corr_year(final_factor,'2020')
#     corr_2021 = get_corr_year(final_factor,'2021')
#     corr_yz = get_corr_year(final_factor,'yz')
#     corr_good5=get_corr_year(final_factor,'good5')
#     good5_50num_big=get_corr_year(final_factor,'good5_50num_big')
#     good5_50num_small=get_corr_year(final_factor,'good5_50num_small')
#     corr_liq_2000 = get_corr_year(final_factor,'field_liq_2000')
#     corr_amt_2000 = get_corr_year(final_factor,'field_amt_2000')
    # count_final = final_factor.count(1).mean()
    
    
    
    # print('count_final',count_final)
    # print('corr_amt_2000',corr_amt_2000)
    # print('corr_1',corr_1,'corr_5',corr_5)
    
#     print('day:1 stocknum:10   ret:%-6s win_pro:%-6s sharp:%-6s'%(ret_1_10,pro_1_10,sharp_1_10))
#     print('day:1 stocknum:100  ret:%-6s win_pro:%-6s sharp:%-6s'%(ret_1_100,pro_1_100,sharp_1_100))
#     print('day:1 stocknum:500  ret:%-6s win_pro:%-6s sharp:%-6s'%(ret_1_500,pro_1_500,sharp_1_500))
#     print('day:1 stocknum:1000 ret:%-6s win_pro:%-6s sharp:%-6s'%(ret_1_1000,pro_1_10,sharp_1_1000))
#     print('day:5 stocknum:10   ret:%-6s win_pro:%-6s sharp:%-6s'%(ret_5_10,pro_5_10,sharp_5_10))
#     print('day:5 stocknum:100  ret:%-6s win_pro:%-6s sharp:%-6s'%(ret_5_100,pro_5_100,sharp_5_100))
#     print('day:5 stocknum:500  ret:%-6s win_pro:%-6s sharp:%-6s'%(ret_5_500,pro_5_500,sharp_5_500))
#     print('day:5 stocknum:1000 ret:%-6s win_pro:%-6s sharp:%-6s'%(ret_5_1000,pro_5_1000,sharp_5_1000))
  

#     print('corr_max5',corr_max5,'corr_min5',corr_min5,)
#     print('corr_good5',corr_good5,'good5_50num_big',good5_50num_big,'good5_50num_small',good5_50num_small)
#     print('corr_2017',corr_2017,'corr_2018',corr_2018,'corr_2019',corr_2019,'corr_2020',corr_2020,'corr_2021',corr_2021)

    
    
    
def test_factor(final_factor,factor_name,IR_TYPE,train_mode='',to_hdf=True,express='',IR_limit=0.1,model_name='',factor_type='alpha'):
    global conn,cursor
    path_choice={'vol':basic_path,'price':basic_path,'ratio':basic_path,'alpha':hdf_path}
    status='ready'
    final_factor.index=pd.DatetimeIndex(final_factor.index)
    factor= factor_analyse(final_factor)  
    if IR_limit!=0:
        if round(np.abs(factor.IR),2)<=round(np.abs(IR_limit),2) or round(np.abs(factor.IR),2)<2  :
            print(round(np.abs(factor.IR),2),'lower than IR_limit',round(np.abs(IR_limit),2))
            to_hdf=False;
            status='useless'
#         return
    if to_hdf:
        # print(path_choice[factor_type]+factor_name+'.hdf')
        final_factor.to_hdf(path_choice[factor_type]+factor_name+'.hdf','df')
        factor.signal_return.to_hdf(pnl_path+factor_name+'.hdf', 'df')
   
    IR_amt_2000=factor.IR
    sharp_amt_2000=factor.sharp
    alpha_amt_2000=factor.alpha
    robust_fail_num=7-(f_robust_analyse(factor)['pass'].astype(int)).sum()
    print('IR:%-15s sharp:%-15s alpha %-15s'%(IR_amt_2000,sharp_amt_2000,alpha_amt_2000))
    #得到平均股数
    count_final = final_factor.count(1).mean()
    
    upsql="REPLACE INTO `data_test_batch_IR` \
    (`factor_name`, `IR`,`sharp`,`alpha`,`count` ,`model_name`,`factor_type`,\
    `time`,`type`,`train_mode`,`express`,`robust_fail_num`,`status`) \
     VALUES ('{}','{}', '{}', '{}', '{}', '{}', '{}',\
    CURRENT_TIMESTAMP,'{}','{}','{}','{}','{}')".format(
    factor_name,round(IR_amt_2000,2),round(sharp_amt_2000,2),round(alpha_amt_2000,2),count_final,model_name,factor_type,
    IR_TYPE, train_mode ,express,robust_fail_num,status)
    # print(upsql)

    cursor.execute(upsql)

    return
        
            
  
#   得到相关系数
#     IR_1 = get_corr_year(final_factor,1)
#     IR_5 = get_corr_year(final_factor,5)
#     IR_2021 = get_corr_year(final_factor,'2021')
#     final_factor =  alphalib.calibrate(final_factor,field_type = 'field0').loc[start_date:end_date,:] 
#     ret_1_10,pro_1_10,sharp_1_10= get_corr_year(final_factor,'ret_1_10')
#     ret_1_100,pro_1_100,sharp_1_100= get_corr_year(final_factor,'ret_1_100')
#     ret_1_500,pro_1_500,sharp_1_500= get_corr_year(final_factor,'ret_1_500')
#     ret_1_1000,pro_1_1000,sharp_1_1000= get_corr_year(final_factor,'ret_1_1000')
#     ret_5_10,pro_5_10,sharp_5_10= get_corr_year(final_factor,'ret_5_10')
#     ret_5_100,pro_5_100,sharp_5_100= get_corr_year(final_factor,'ret_5_100') 
#     ret_5_500,pro_5_500,sharp_5_500= get_corr_year(final_factor,'ret_5_500')
#     ret_5_1000,pro_5_1000,sharp_5_1000= get_corr_year(final_factor,'ret_5_1000')
#     corr_m1 = get_corr_year(final_factor,-1)
#     corr_max5 = get_corr_year(final_factor,100005)
#     corr_min5 = get_corr_year(final_factor,200005)
#     good5_50num_big=get_corr_year(final_factor,'good5_50num_big')
#     good5_50num_small=get_corr_year(final_factor,'good5_50num_small')
#     corr_good5=get_corr_year(final_factor,'good5')
#     corr_2017 = get_corr_year(final_factor,'2017')
#     corr_2018 = get_corr_year(final_factor,'2018')
#     corr_2019 = get_corr_year(final_factor,'2019')
#     corr_2020 = get_corr_year(final_factor,'2020')
#     corr_2021 = get_corr_year(final_factor,'2021')
#     corr_yz = get_corr_year(final_factor,'yz')
#     corr_liq_2000 = get_corr_year(final_factor,'field_liq_2000')
#     corr_amt_2000 = get_corr_year(final_factor,'field_amt_2000')
#     print('corr_2021:',corr_2021)
   

    
#     result_list=[]
#     result_i = [factor_name,corr_1,corr_5,corr_10,corr_m1,corr_max5,corr_max10,corr_max15,corr_min5,corr_min10,corr_min15,count_final,nowtime,corr_2021,corr_2020,corr_2019,corr_2018,corr_2017]
#     result_list.append(result_i)
#     result = pd.DataFrame(result_list)
#     result.columns = ['factor_name','IR_1','IR_5','IR_10','IR_m1','IR_max5','IR_max10','IR_max15','IR_min5',
#                       'IR_min10','IR_min15','IR_final','time','IR_2021','IR_2020','IR_2019','IR_2018','IR_2017']
# #     print(result)
#     file_path=target_path+'IR_result.csv'
# #     print(file_path)
#     if not os.path.exists(target_path):
#         print('目标路径不存在原文件夹---创建')
#         os.makedirs(target_path)
#     if os.path.exists(file_path) == True:  
#         old_result=pd.read_csv(file_path)
#         result=pd.concat([old_result,result], axis=0, join='outer')
#         result.sort_values(by='time', axis=0, ascending=False, inplace=True)
#         result.drop_duplicates(subset='factor_name',keep='first',inplace=True)
#         result.sort_values(by='IR_1', axis=0, ascending=False, inplace=True)
#         result.to_csv(file_path,index=0)
#     else:
#         result.sort_values(by='IR_1', axis=0, ascending=False, inplace=True)
#         result.to_csv(file_path,index=0)


    

   


    
    
def check_hdf(mode='test_not_in_sql',delete_locksql_flag='false'):
    global conn,cursor
    print('mode:',mode,'delete_locksql_flag:',delete_locksql_flag)
    index_num=0;
  
    if delete_locksql_flag=='true':
        upsql="delete  from `data_test_batch_IR` where  status!='True' and status!='uploaded' and (abs(IR)<2 or  count<1000) "
        cursor.execute(upsql)
        conn.commit() 
        upsql="delete  from `data_test_batch_IR` where  type  not in ('single') and status!='True' and status!='uploaded' and (robust_flag='False'  and  robust_fail_num>3) "
        cursor.execute(upsql)
        conn.commit() 
    
    sql="select * from `data_test_batch_IR` "
    old_result= pd.read_sql(sql, con=conn)   
    
    print('一共',len(old_result))
# pnl_path hdf_path
    for root, dirs, files in os.walk(pnl_path):
        for f in files:
            file_path=os.path.join(root, f)
            index_num+=1
            try:
                if mode=='test_not_in_sql' and file_path[-4:]==".hdf"  and f[:-4] not in old_result['factor_name'].tolist():
                    print('test---',root,'---------',index_num,'-----',f[:-4])
                    pl_factor=pd.read_hdf(file_path,'df')
                    
                    test_factor(pl_factor,f[:-4],'single',to_hdf=False)
                elif mode=='remove_not_in_sql' and file_path[-4:]==".hdf"  and f[:-4] not in old_result['factor_name'].tolist():
                    # print('remove---',root,'---------',index_num,'-----',f[:-4])
                    file_path1=os.path.join(pnl_path, f)
                    file_path2=os.path.join(hdf_path, f)
                    print('remove---',file_path1,)
                    print('remove---',file_path2)
                    if os.path.exists(file_path1):
                        os.remove(file_path1)  
                    if os.path.exists(file_path2):
                        os.remove(file_path2)   
                elif mode=='retest_all':
                    pl_factor=pd.read_hdf(file_path,'df')
                    test_factor(pl_factor,f[:-4],'single',to_hdf=False)
                    

            except Exception as e:
                traceback.print_exc()
                print(e)

    
   
             

  
def cta_test(rum_num=10,thread_num=0):
    global conn,cursor
    
    print('cta')
#     data_index_dpzf80=(((returns>0).sum(1)/returns.count(1))>0.8).replace(False,np.nan)
#     day5_rzf8_wlzf5=(day5_zf>0.05) & (returns>8)
#     day5_rzf8_wlzf4=(day5_zf>0.04) & (returns>8)
#     day5_rzf8_wlzf0=(returns>8)
#     day5_rdf7_wlzf5=(day5_zf>0.05) & (returns<-7)
#     day5_rdf7_wlzf4=(day5_zf>0.04) & (returns<-7)
#     day5_rdf7_wlzf0=(returns<-7)
#     day5_ljzf16_wlzf5=(day5_zf>0.05) & (returns.rolling(5).sum()>16)
#     day5_ljzf16_wlzf4=(day5_zf>0.04) & (returns.rolling(5).sum()>16)
#     day5_ljzf16_wlzf0=(returns.rolling(5).sum()>16)
#     day5_ljdf13_wlzf5=(day5_zf>0.05) & (returns.rolling(5).sum()<-13)
#     day5_ljdf13_wlzf4=(day5_zf>0.04) & (returns.rolling(5).sum()<-13)
#     day5_ljdf13_wlzf0=(returns.rolling(5).sum()<-13)
#     day5_dpzf80_wlzf5=(day5_zf>0.05).mul(data_index_dpzf80,axis=0).replace(1,True).replace(0,False)
#     day5_dpzf80_wlzf4=(day5_zf>0.04).mul(data_index_dpzf80,axis=0).replace(1,True).replace(0,False)
#     day5_dpzf80_wlzf0=(returns<100).mul(data_index_dpzf80,axis=0).replace(1,True).replace(0,False)
    day5_zf_gr=thigh.rolling(5, center=False,min_periods=1).max().shift(-5)/topen.shift(-1) -1
#     data_index_dpdf80=(((returns<0).sum(1)/returns.count(1))>0.8).replace(False,np.nan)
#     day5_dpdf80_wlzf6=(day5_zf_gr>0.06).mul(data_index_dpdf80,axis=0).replace(1,True).replace(0,False)
#     day5_dpdf80_wlzf5=(day5_zf_gr>0.05).mul(data_index_dpdf80,axis=0).replace(1,True).replace(0,False)
#     day5_dpdf80_wlzf4=(day5_zf_gr>0.04).mul(data_index_dpdf80,axis=0).replace(1,True).replace(0,False)
#     day5_dpdf80_wlzf0=(returns<100).mul(data_index_dpdf80,axis=0).replace(1,True).replace(0,False)
    data_index_dpdf70=(((returns<0).sum(1)/returns.count(1))>0.7).replace(False,np.nan)
    day5_dpdf70_wlzf6=(day5_zf_gr>0.06).mul(data_index_dpdf70,axis=0).replace(1,True).replace(0,False)
    day5_dpdf70_wlzf5=(day5_zf_gr>0.05).mul(data_index_dpdf70,axis=0).replace(1,True).replace(0,False)
    day5_dpdf70_wlzf4=(day5_zf_gr>0.04).mul(data_index_dpdf70,axis=0).replace(1,True).replace(0,False)
    day5_dpdf70_wlzf0=(returns<100).mul(data_index_dpdf70,axis=0).replace(1,True).replace(0,False)
    robust_list=['day5_dpdf70_wlzf']
    

    sql1="select * from `data_test_batch_IR` where cta_flag !='finish'  "
    IR_data1= pd.read_sql(sql1, con=conn) 
    
    signle_list=IR_data1['factor_name'].tolist()
    print('signle_list:',len(signle_list))
    for j in range(rum_num):
        choice_factor=random.choice(signle_list)
        print('thread_num',thread_num,'!!!!!!!!!!','第',j,'个/'+str(len(IR_data1))+'个 choice_factor:',choice_factor)
        if  not  os.path.exists(hdf_path+choice_factor+'.hdf'):
            print(choice_factor,'文件不存在',os.path.exists(hdf_path+choice_factor+'.hdf'))
        else:
            try:
                rets=0
                file_path=hdf_path+choice_factor+'.hdf'
                pl_factor=pd.read_hdf(file_path,'df' )
                pl_factor.index=pd.DatetimeIndex(pl_factor.index)
                pl_factor=z_score(pl_factor)
                
    
                
                for robust_item in robust_list:
                    
                    limit_0per=eval(robust_item+"0")
                    limit_4per=eval(robust_item+"4")
                    limit_5per=eval(robust_item+"5")
                    limit_6per=eval(robust_item+"6")
                
                    better_value_4=pl_factor[limit_4per].mean(1).mean()
                    better_value_5=pl_factor[limit_5per].mean(1).mean()
                    better_value_6=pl_factor[limit_6per].mean(1).mean()
                    better_value_0=pl_factor[limit_0per].mean(1).mean()
                    better4_diff=better_value_4-better_value_0
                    better5_diff=better_value_5-better_value_0
                    better6_diff=better_value_6-better_value_0

                    avg_count=limit_0per.sum(1).sum()
                    if better_value_5>better_value_0:
                        rets=day5_zf_gr[(pl_factor>better_value_5) & limit_0per].mean(1).mean()
                    else:
                        rets=day5_zf_gr[(pl_factor<better_value_5) & limit_0per].mean(1).mean()
                    upsql="REPLACE INTO `data_test_batch_cta` (`factor_name`, `cta_mode`, `avg_value`, `better4_value`, `better5_value`, `better6_value`,`better4_diff`, `better5_diff`,  `better6_diff`, `avg_count`, `rets`) \
                    VALUES ('{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}')".format(choice_factor,robust_item,better_value_0,better_value_4,better_value_5,better_value_6,better4_diff,better5_diff,better6_diff,avg_count,rets)
                    cursor.execute(upsql)
                    conn.commit() 
                    upsql="UPDATE `data_test_batch_IR` SET `cta_flag`='finish' WHERE (`factor_name`='{}') LIMIT 1".format(choice_factor)
                    cursor.execute(upsql)
                    conn.commit()


            except Exception as e:
                print(e)
                 
                
def add_JB_best(rum_num=10,thread_num=0):
    global conn,cursor
    
    JB_best=pd.read_hdf(Basic_path+'udcnpgexcess230.hdf')
    class MyFactor(StockFactor):
        def __init__(self):
            StockFactor.__init__(self)
        def set_changenum(self,return_data):
            self.return_data = return_data     
        def gen_factor_signal(self):
            try:
                return_data= self.return_data
            except:
                 return_data= JB_best
            return return_data   
    
    
    index_num=0;
    factor= MyFactor() 
    factor.set_property( trade_time = 1030, pos_limit=60, trade_period = 'week' ,bench_index = '000905',ud_limit=1,\
                        fee_rate = 0.003,  trade_nth=0,start_date='2018-01-01',end_date='2024-09-30', shift = 1, trade_mode = 'long',wgt=0)
    
    
    

  
    sql1="select * from `data_test_batch_IR` where addjb_flag !='finish' and abs(IR_1) >5 "
    IR_data1= pd.read_sql(sql1, con=conn) 
    
   
    signle_list=IR_data1['factor_name'].tolist()
    IR_data1=IR_data1.reset_index()
    IR_data1.drop_duplicates(subset=['factor_name'], keep='first', inplace=True)
    IR_data1.set_index('factor_name',inplace=True)
    
    print('signle_list:',len(signle_list))
    for j in range(rum_num):
        choice_factor=random.choice(signle_list)
        print('thread_num',thread_num,'!!!!!!!!!!','第',j,'个/'+str(len(IR_data1))+'个 choice_factor:',choice_factor)
        if  not  os.path.exists(hdf_path+choice_factor+'.hdf'):
            print(choice_factor,'文件不存在',os.path.exists(hdf_path+choice_factor+'.hdf'))
        else:
            try:
                file_path=hdf_path+choice_factor+'.hdf'
                pl_factor=pd.read_hdf(file_path,'df' )
                pl_factor=pl_factor* np.sign(IR_data1.loc[choice_factor,'IR_1'])
        
                factor_rank = JB_best.rank(axis=1,method='min',ascending=False)
                final_result=pl_factor[factor_rank<=90]
                factor.set_changenum(final_result)
                factor.regen_factor_signal()
                upsql="INSERT INTO `data_test_batch` (`test_name`, `change_name`, `parm_dict`, `trade_period`, `sharp`,\
                `updatetime`, `pos_limit`, `trade_time`, `avg_turnover`, `annual_alpha`, `last_year_alpha`, \
                `last_year_sharp`, `trade_fee`, `count_list`, `trade_day`, `trade_nth`, `wgt`, `longest_drawdown`,\
                `max_drawdown`, `one_week_alpha`, `two_week_alpha`, `three_week_alpha`, `half_year_alpha`, `thread_num`) \
                VALUES ('{}', '{}', '{}', '{}', '{}', CURRENT_TIMESTAMP, '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}')".format(
                choice_factor,'add_jb_best','add_mode_1',factor.trade_period,factor.sharp,
                factor.pos_limit,factor.trade_time,factor.avg_turnover,factor.annual_alpha,factor.last_year_alpha,
                factor.last_year_sharp,factor.trade_fee,factor.stock_count,len(factor.signal_return), factor.trade_nth,factor.wgt,factor.longest_drawdown, 
                factor.max_drawdown,factor.one_week_alpha,factor.two_week_alpha,factor.three_week_alpha,factor.half_year_alpha,thread_num)
                print(factor.sharp)
                cursor.execute(upsql)
                conn.commit()
                upsql="UPDATE `data_test_batch_IR` SET `addjb_flag`='finish' WHERE (`factor_name`='{}') LIMIT 1".format(choice_factor)
                cursor.execute(upsql)
                conn.commit()


            except Exception as e:
                print(e)
  

def make_factor_bymodel(run_num_2,thread_num,model_name,total_task,factor_type=''):
    # if factor_type=='all':
    #     make_factor_bymodel(run_num_2,thread_num,model_name,total_task,'price')
    #     make_factor_bymodel(run_num_2,thread_num,model_name,total_task,'vol')
    #     make_factor_bymodel(run_num_2,thread_num,model_name,total_task,'ratio')
    #     return
    global dlib,conn,cursor
    # mysqllock.acquire()  

    thread_num1=int(thread_num)%30
    time.sleep(thread_num1)
    total_model_name=model_name+'_'+factor_type.upper() if factor_type !='' else model_name
    if( not os.path.exists(model_path+total_model_name+'.npy') and len(total_task)==0 ):
        print('make_factor_bymodel---建立模型',total_model_name)
        model_list=make_totalexpress_by_model_name(model_name,factor_type)
        # return model_list
        total_task.extend(model_list)
        a=np.array(model_list)
        np.save(model_path+total_model_name+'.npy',a)   # 保存为.npy格式
        sql1="REPLACE INTO `data_test_batch_model` (`model_name`, `total_task_num`, `finish_num`, `remain_num`, `error_num`) VALUES ('{}', '{}', '0', '{}',0)\
        ".format(total_model_name,len(total_task),len(total_task))
        print(sql1)
        cursor.execute(sql1)
        conn.commit()
    elif os.path.exists(model_path+total_model_name+'.npy') and len(total_task)==0:
        a=np.load(model_path+total_model_name+'.npy',allow_pickle=True)
        if len(a.tolist())==0:
            print('全部结束')
            sql1="update  `data_test_batch_model`   set `remain_num`=0  where `model_name`='{}'".format(total_model_name)
            cursor.execute(sql1)
            conn.commit()
            update_model_num(total_model_name)
            return
        total_task.extend(a.tolist())
    # mysqllock.release()

    # strat_num=round(len(total_task)/total_thread_num *thread_num)
    # print('task_num:',len(total_task),'strat_num',strat_num)

    for j in range(run_num_2):
        if j%4==1:
            update_model_num(total_model_name)
        total_task_item=random.choice(total_task)
        if isinstance(total_task_item,dict):
            expressstr=total_task_item['express']
            new_factor_name=total_task_item['factor_name']
        elif isinstance(total_task_item,str):
            expressstr=total_task_item
            new_factor_name=''
        else:
            raise TypeError('类型异常')
        total_task.remove(total_task_item)
        a=np.array(total_task)
        np.save(model_path+total_model_name+'.npy',a)   # 保存为.npy格式
#         expressstr=replace_source_name(expressstr)
        print('make_factor_bymodel--'+total_model_name+'-thread_num',thread_num,'!!!!!!!!!!','第',j,'个/',run_num_2,'个------剩余:', len(total_task),'个', )
        if new_factor_name =='':
            new_factor_name='alpha_'+str(round(time.time()*10000))
        expressstr=expressstr.replace('\'','\"')
        
        sql2="select factor_name from `data_test_batch_IR` where  express='{}'  ".format(expressstr)
        IR_name = pd.read_sql(sql2, con=conn) 
        if len(IR_name)!=0:
            print('make_factor_bymodel----already have this factor')
            continue

        try:
            IR_limit=0.1
            if 'BASIC_' in total_model_name:
                # print('IR_limit=0')
                IR_limit=0
            # print(expressstr)     
#             glc={}
            
#             loc={'dlib': dlib,'alphalib':alphalib,'np':np,'pd':pd,'changeDayData2':changeDayData2,'datetime':datetime}
#             exec(expressstr,glc,loc)
            exec(expressstr,globals())
            factor_type = 'alpha' if factor_type=='' else factor_type
            test_factor(returndata,new_factor_name,'single',express=expressstr,model_name=total_model_name,IR_limit=IR_limit,factor_type=factor_type)
            sql1="update  `data_test_batch_model` \
            set `remain_num`={}  where `model_name`='{}'".format(len(total_task),total_model_name)

            cursor.execute(sql1)
            conn.commit()
        except:
            sql1="update  `data_test_batch_model` \
            set `remain_num`={},error_num= error_num+1 where `model_name`='{}'".format(len(total_task),total_model_name)

            cursor.execute(sql1)
            conn.commit()
            error_type, error_value, error_trace = sys.exc_info() 
            print(error_type) 
            print(error_value) 
            for info in traceback.extract_tb(error_trace):  
                print(info)

   
    
    
def expand_factor(run_num_2,thread_num,table='first'):
    global conn,cursor

    add_dict=make_final_add_dict(table)
    # sql1="select * from `data_test_batch_IR` where  type in ('single') and train_flag !='finish'  "
    sql1="select * from `data_test_batch_IR` where train_flag !='finish' and status!='useless'  limit 300 "
    IR_data1= pd.read_sql(sql1, con=conn) 
    if len(IR_data1)==0:
        print('expand_factor---train_IR 没有可处理的')
        cursor.close()
        conn.close() 
        return
    signle_list=IR_data1['factor_name'].tolist()
    IR_data1.set_index('factor_name',inplace=True)
    print('expand_factor---剩余因子数:',len(signle_list))
    for j in range(run_num_2):
        
        choice_factor=random.choice(signle_list)
        signle_list.remove(choice_factor)
        # print('thread_num',thread_num,'!!!!!!!!!!','第',j,'个/'+str(len(IR_data1))+'个 choice_factor:',choice_factor)
        if  not  os.path.exists(hdf_path+choice_factor+'.hdf'):
            print(choice_factor,'文件不存在',os.path.exists(hdf_path+choice_factor+'.hdf'))
        else:
            HDF_DATA=pd.read_hdf(hdf_path+choice_factor+'.hdf','df' )
            for add_dict_item in add_dict:
                try:
                    print('expand_factor---- ',table,' thread_num',thread_num,'!!!!!!!!!!','第',j,'个/'+str(len(IR_data1))+'个 choice_factor:',choice_factor,add_dict_item)
                    new_factor_name='alpha_'+str(round(time.time()*10000))
                    signstr1='1' if np.sign(IR_data1.loc[choice_factor,'IR'])>0 else '-1' 
                    expressstr=choice_factor+"=HDF_DATA;\n"
                    expressstr+="data1="+signstr1+"*"+choice_factor+";\n"
                    expressstr+=change_factor_signal(add_dict_item)
                    # expressstr=expressstr.replace(';','\;')
                    sql2="select factor_name from `data_test_batch_IR` where  express='{}'  ".format(expressstr)
                    IR_name = pd.read_sql(sql2, con=conn) 
                    if len(IR_name)!=0:
                        print('already have this factor')
                        continue
                    # glc={}
                    # loc={'HDF_DATA': HDF_DATA,'alphalib':alphalib,'np':np,'pd':pd,'cohl': cohl}
                    # exec(expressstr,glc,loc)
                    exec(expressstr,globals())
                
                    # print(expressstr)
                    max_IR=IR_data1.loc[choice_factor,'IR']
                    TrainMode=add_dict_item['mode']
                    ModelName=IR_data1.loc[choice_factor,'model_name']
                    if table == 'final':
                        max_IR=0
                    test_factor(returndata,new_factor_name,'train',train_mode=TrainMode,model_name= ModelName,IR_limit=max_IR,express=expressstr)
                except:
                    upsql="UPDATE `data_test_batch_IR` SET `train_flag`='finish' WHERE (`factor_name`='{}') LIMIT 1".format(choice_factor)
                    cursor.execute(upsql)
                    conn.commit()  
                    error_type, error_value, error_trace = sys.exc_info() 
                    print(error_type) 
                    print(error_value) 
                    for info in traceback.extract_tb(error_trace):  
                        print(info)
            
                upsql="UPDATE `data_test_batch_IR` SET `train_flag`='finish' WHERE (`factor_name`='{}') LIMIT 1".format(choice_factor)
                cursor.execute(upsql)
                conn.commit()     


    

def relevance_factor(run_num_2,thread_num,ir_threshold):
    global conn,cursor

    add_dict=make_final_add_dict(table='mutual')
    sql1="select * from `data_test_batch_IR` where  abs(IR)>{}   and status!='useless'  limit 300".format(ir_threshold)
    # sql1="select * from `data_test_batch_IR` where  type in ('single','train') and abs(IR)>2  "
    # sql1="select * from `data_test_batch_IR` where  factor_name ='AmtAllcloseDivAllday'"
    IR_data1= pd.read_sql(sql1, con=conn) 
    if len(IR_data1)==0:
        print('train_IR 已经结束')
        cursor.close()
        conn.close() 
        return
    signle_list=IR_data1['factor_name'].tolist()
    IR_data1.set_index('factor_name',inplace=True)
    print('relevance_factor----剩余因子数:',len(signle_list))
    
    
    
    for j in range(run_num_2):
        
        choice_factor1=random.choice(signle_list)
        choice_factor2=random.choice(signle_list)
        signle_list.remove(choice_factor1)
        signle_list.remove(choice_factor2)                                                         
                                                                  
        # print('relevance_factor----thread_num',thread_num,'!!!!!!!!!!','第',j,'个/'+str(len(IR_data1))+'个 :',choice_factor1+'|',choice_factor2)
        if  not  os.path.exists(hdf_path+choice_factor1+'.hdf') or not  os.path.exists(hdf_path+choice_factor2+'.hdf'):
            print(choice_factor1,'文件不存在',os.path.exists(hdf_path+choice_factor1+'.hdf'))
            print(choice_factor2,'文件不存在',os.path.exists(hdf_path+choice_factor2+'.hdf'))
        else:
           
                
             
            HDF_DATA1=pd.read_hdf(hdf_path+choice_factor1+'.hdf','df' )*np.sign(IR_data1.loc[choice_factor1,'IR'])
            HDF_DATA2=pd.read_hdf(hdf_path+choice_factor2+'.hdf','df' )*np.sign(IR_data1.loc[choice_factor2,'IR'])

            max_IR=0
            if np.abs(IR_data1.loc[choice_factor1,'IR'])>max_IR:
                max_IR=np.abs(IR_data1.loc[choice_factor1,'IR'])
            if np.abs(IR_data1.loc[choice_factor2,'IR'])>max_IR:
                max_IR=np.abs(IR_data1.loc[choice_factor2,'IR'])
            signstr1='' if np.sign(IR_data1.loc[choice_factor1,'IR'])>0 else '-1*'
            signstr2='' if np.sign(IR_data1.loc[choice_factor2,'IR'])>0 else '-1*'
            
          
            
            for add_dict_item in add_dict:
                try:
                    new_factor_name='alpha_'+str(round(time.time()*10000))
                    print('relevance_factor----thread_num',thread_num,'!!!!!!!!!!','第',j,'个/'+str(len(IR_data1))+'个 :',choice_factor1+'|',choice_factor2,add_dict_item)            
                    expressstr=choice_factor1+"=HDF_DATA1;\n"
                    expressstr+="data1="+signstr1+choice_factor1+";\n"
                    expressstr+=choice_factor2+"=HDF_DATA2;\n"
                    expressstr+="data2="+signstr2+choice_factor2+";\n"
                    expressstr+=change_factor_signal(add_dict_item)  
                
                    sql2="select factor_name from `data_test_batch_IR` where  express='{}'  ".format(expressstr)
                    IR_name = pd.read_sql(sql2, con=conn) 
                    if len(IR_name)!=0:
                        print('relevance_factor----already have this factor',choice_factor1+'|',choice_factor2)
                        continue
                    # glc={}
                    # loc={'HDF_DATA1': HDF_DATA1,'HDF_DATA2': HDF_DATA2,'alphalib':alphalib,'np':np,'pd':pd,'cohl': cohl}
                    # exec(expressstr,glc,loc) 
                    exec(expressstr,globals())
                    TrainMode=add_dict_item['mode']
                    ModelName=str(IR_data1.loc[choice_factor1,'model_name'])+','+str(IR_data1.loc[choice_factor2,'model_name'])
                    test_factor(returndata,new_factor_name,'relevance',train_mode=TrainMode,model_name= ModelName,IR_limit=max_IR,express=expressstr)

                except:
                    error_type, error_value, error_trace = sys.exc_info() 
                    print(error_type) 
                    print(error_value) 
                    for info in traceback.extract_tb(error_trace):  
                        print(info)

            
def combine_factor(run_num_1,thread_num,ir_threshold):
    global conn,cursor
 

    print('combine_factor----thread_num',thread_num,'!!!!!!!!!!','第:',ir_threshold,'轮')
    sql1="select * from `data_test_batch_IR` where type in ('single','train','relevance') and ABS(IR)> {} and count>2000  and status!='useless'  limit 300" .format(ir_threshold)
    IR_data1= pd.read_sql(sql1, con=conn) 
    signle_list=IR_data1['factor_name'].tolist()
    IR_data1=IR_data1.reset_index()
    IR_data1.drop_duplicates(subset=['factor_name'], keep='first', inplace=True)
    IR_data1.set_index('factor_name',inplace=True)


    print('combine_factor----IR>',ir_threshold,'的因子个数:',len(signle_list))
    add_dict=make_final_add_dict(table='final')

    for j in range(run_num_1):
        try:
            new_factor_name='alpha_'+str(round(time.time()*10000))
            randint=random.randint(2,4)
            choice_factor_list= random.sample(signle_list,randint)
            choice_factor_list.sort()
            print('combine_factor----第',j,'个 ','choice_factor:',choice_factor_list)   
            
            max_IR=0
            HDF_DATA={}
            ModelName=''
            expressstr='returndata= pd.DataFrame(0,index=tclose.index,columns=tclose.columns);\n'
            for key,factorname_item in enumerate(choice_factor_list):
                if not os.path.exists(hdf_path+factorname_item+'.hdf'):
                    print(factorname_item,'文件不存在',os.path.exists(hdf_path+factorname_item+'.hdf'))
                    continue
                HDF_DATA[key]=pd.read_hdf(hdf_path+factorname_item+'.hdf','df')
                expressstr+=factorname_item+"=HDF_DATA["+str(key)+"];\n"
                expressstr+=factorname_item+".replace([np.inf,-np.inf],np.nan,inplace=True);\n"
                signstr1='' if np.sign(IR_data1.loc[factorname_item,'IR'])>0 else '-1*'
                expressstr+="returndata+=alphalib.z_score("+signstr1+factorname_item+",outliner=\"mad\").fillna(0);\n"
                ModelName+=str(IR_data1.loc[factorname_item,'model_name'])
                if np.abs(IR_data1.loc[factorname_item,'IR'])>max_IR:
                    max_IR=np.abs(IR_data1.loc[factorname_item,'IR'])
            # print(expressstr)
            sql2="select factor_name from `data_test_batch_IR` where  express='{}'  ".format(expressstr)
            IR_name = pd.read_sql(sql2, con=conn) 
            if len(IR_name)!=0:
                print('combine_factor----already have this factor',choice_factor_list)
                continue
            # glc={}
            # loc={'HDF_DATA': HDF_DATA,'alphalib':alphalib,'np':np,'pd':pd,'tclose': tclose}
            # exec(expressstr,glc,loc) 
            exec(expressstr,globals())
            TrainMode=add_dict_item['mode']
            test_factor(returndata,new_factor_name,'combine',train_mode=TrainMode,model_name= ModelName,IR_limit=max_IR,express=expressstr)

        except:
            error_type, error_value, error_trace = sys.exc_info() 
            print(error_type) 
            print(error_value) 
            for info in traceback.extract_tb(error_trace):  
                print(info)

               

   
    return
                                                                           
                                                                  
def group_test(run_num_2,thread_num,ir_threshold): 
    global conn,cursor


    sql1="select * from `data_test_batch_IR` where  group_flag !='finish' and abs(IR)>{}  and status!='useless' limit 300".format(ir_threshold)
    IR_data1= pd.read_sql(sql1, con=conn) 
    if len(IR_data1)==0:
        print('group_test 都已算完')
        cursor.close()
        conn.close() 
        return
    
    signle_list=IR_data1['factor_name'].tolist()
    print('Group_test----signle_list:',len(signle_list))
    IR_data1.drop_duplicates(subset=['factor_name'], keep='first', inplace=True)
    IR_data1.set_index('factor_name',inplace=True)
    
    for j in range(run_num_2):
        choice_factor=random.choice(signle_list)
        print('Group_test----thread_num',thread_num,'!!!!!!!!!!','第',j,'个/'+str(len(IR_data1))+'个 choice_factor:',choice_factor)
        if  not  os.path.exists(hdf_path+choice_factor+'.hdf'):
            print(choice_factor,'文件不存在',os.path.exists(hdf_path+choice_factor+'.hdf'))
        else:
            upsql="UPDATE `data_test_batch_IR` SET `group_flag`='finish'  WHERE (`factor_name`='{}') LIMIT 1".format(choice_factor)
            cursor.execute(upsql)
            conn.commit()
            HDF_DATA=pd.read_hdf(hdf_path+choice_factor+'.hdf','df' )

            for group_type in group_list:
                try:
                    new_factor_name='alpha_'+str(round(time.time()*10000))
                    HDF_DATAgroup=pd.read_hdf(group_path+group_type+'.hdf','df' )
                    HDF_DATAgroup.index=pd.DatetimeIndex(HDF_DATAgroup.index)
                   
                    expressstr=choice_factor+"=HDF_DATA;\n"
                    expressstr+=group_type+"=HDF_DATAgroup;\n"
                    signstr1='' if np.sign(IR_data1.loc[choice_factor,'IR'])>0 else '-1*'
                    expressstr+="data1="+signstr1+choice_factor+";\n"
                    expressstr+="returndata = alphalib.group_neutralize(data1.dropna(how=\"all\"),"+group_type+");"  
                    # print(expressstr)
                    sql2="select factor_name from `data_test_batch_IR` where  express='{}'  ".format(expressstr)
                    IR_name = pd.read_sql(sql2, con=conn) 
                    if len(IR_name)!=0:
                        print('Group_test----already have this factor',choice_factor)
                        continue
                    # glc={}
                    # loc={'HDF_DATA': HDF_DATA,'alphalib':alphalib,'np':np,'pd':pd,'HDF_DATAgroup': HDF_DATAgroup}
                    # exec(expressstr,glc,loc) 
                    exec(expressstr,globals())
                    TrainMode='group_neutralize:'+str(group_type)
                    ModelName=IR_data1.loc[choice_factor,'model_name']
                    test_factor(returndata,new_factor_name,'group',train_mode=TrainMode,model_name= ModelName,IR_limit=IR_data1.loc[choice_factor,'IR'],express=expressstr)
                    
                    
           
                except:
                    error_type, error_value, error_trace = sys.exc_info() 
                    print(error_type) 
                    print(error_value) 
                    for info in traceback.extract_tb(error_trace):  
                        print(info)
 
    
def resid_test(run_num_2,thread_num,ir_threshold): 
    global conn,cursor

    sql1="select * from `data_test_batch_IR` where  Resid_flag !='finish' and abs(IR)>{} and status!='useless' limit 300".format(ir_threshold)
    IR_data1= pd.read_sql(sql1, con=conn) 
    if len(IR_data1)==0:
        print('Resid_test----都已算完')
        cursor.close()
        conn.close() 
        return
    
    signle_list=IR_data1['factor_name'].tolist()
    print('Resid_test----剩余个数:',len(signle_list))
    IR_data1.drop_duplicates(subset=['factor_name'], keep='first', inplace=True)
    IR_data1.set_index('factor_name',inplace=True)
    
    for j in range(run_num_2):
        choice_factor=random.choice(signle_list)
        print('Resid_test----thread_num',thread_num,'!!!!!!!!!!','第',j,'个/'+str(len(IR_data1))+'个 choice_factor:',choice_factor)
        if  not  os.path.exists(hdf_path+choice_factor+'.hdf'):
            print(choice_factor,'文件不存在',os.path.exists(hdf_path+choice_factor+'.hdf'))
        else:
            upsql="UPDATE `data_test_batch_IR` SET `Resid_flag`='finish'  WHERE (`factor_name`='{}') LIMIT 1".format(choice_factor)
            cursor.execute(upsql)
            conn.commit()
            HDF_DATA=pd.read_hdf(hdf_path+choice_factor+'.hdf','df' )

            for group_type in resid_list:
                try:
                    new_factor_name='alpha_'+str(round(time.time()*10000))
                    HDF_DATAgroup=pd.read_hdf(group_path+group_type+'.hdf','df' )
                    HDF_DATAgroup.index=pd.DatetimeIndex(HDF_DATAgroup.index)
                   
                    expressstr=choice_factor+"=HDF_DATA;\n"
                    expressstr+=group_type+"=HDF_DATAgroup;\n"
                    signstr1='' if np.sign(IR_data1.loc[choice_factor,'IR'])>0 else '-1*'
                    expressstr+="data1="+signstr1+choice_factor+";\n"
                    expressstr+="returndata = alphalib.linear_neutralize(data1.dropna(how=\"all\"),"+group_type+");"  
                    sql2="select factor_name from `data_test_batch_IR` where  express='{}'  ".format(expressstr)
                    IR_name = pd.read_sql(sql2, con=conn) 
                    if len(IR_name)!=0:
                        print('Resid_test----already have this factor',choice_factor)
                        continue
                    # glc={}
                    # loc={'HDF_DATA': HDF_DATA,'alphalib':alphalib,'np':np,'pd':pd,'HDF_DATAgroup': HDF_DATAgroup}
                    # exec(expressstr,glc,loc) 
                    exec(expressstr,globals())
                    TrainMode='linear_neutralize:'+str(group_type)
                    ModelName=IR_data1.loc[choice_factor,'model_name']
                    test_factor(returndata,new_factor_name,'Resid',train_mode=TrainMode,model_name= ModelName,IR_limit=IR_data1.loc[choice_factor,'IR'],express=expressstr)
                except:
                    error_type, error_value, error_trace = sys.exc_info() 
                    print(error_type) 
                    print(error_value) 
                    for info in traceback.extract_tb(error_trace):  
                        print(info)


                
                
    
    
def robust_test(run_num_2,thread_num): 
    global conn,cursor
   

    sql1="select * from `data_test_batch_IR` where  robust_flag !='False' and  robust_flag !='True'  and status!='useless' and robust_fail_num =0 \
    and abs(IR) >2   order by abs(IR) limit 400"
#     sql1="select * from `data_test_batch_IR` where  robust_flag !='False' and  robust_flag !='True' and  factor_name not like '%+%' and  abs(IR_1) >2 "

    IR_data1= pd.read_sql(sql1, con=conn) 
    signle_list=IR_data1['factor_name'].tolist()
    IR_data1=IR_data1.reset_index()
    IR_data1.drop_duplicates(subset=['factor_name'], keep='first', inplace=True)
    IR_data1.set_index('factor_name',inplace=True)


    print('robust_test----剩余待检测robust个数:',len(signle_list))
    for j in range(run_num_2):
        choice_factor=random.choice(signle_list)
        if  not  os.path.exists(hdf_path+choice_factor+'.hdf'):
            print(choice_factor,'文件不存在',os.path.exists(hdf_path+choice_factor+'.hdf'))
        else:
            try:
                FINAL_DATA=pd.read_hdf(hdf_path+choice_factor+'.hdf','df' )
                FINAL_DATA.index=pd.DatetimeIndex(FINAL_DATA.index)
                FINAL_DATA=FINAL_DATA* np.sign(IR_data1.loc[choice_factor,'IR'])
                
                robust_result=Rtest(FINAL_DATA)
                robust_result.robust_test()
                robust_list=robust_result.test_result
                robust_flag=str(robust_result.rt_pass)
                robust_good=str(robust_result.good_long)
                error_str=''
                fail_num=0
                long_ratio=0
                for type in robust_list.index:
                    if robust_list.loc[type,'pass']==False:
                        if type=='alphaf_long_ratio':
                            long_ratio=robust_list.loc[type,'value']
                        
                        fail_num+=1
                        error_str+=str(type)+'_'+str(round(robust_list.loc[type,'value'],2))+'/'+str(round(robust_list.loc[type,'limit'],2))+'|'
                # print(error_str)
                upsql="UPDATE `data_test_batch_IR` SET `robust_flag`='{}',`robust_good`='{}',robust_info='{}',robust_fail_num={},alpha_long_ratio={} \
                WHERE (`factor_name`='{}') LIMIT 1".format( \
                    robust_flag,robust_good,error_str,fail_num,long_ratio,choice_factor)
                cursor.execute(upsql)
                conn.commit()
                print('robust_test----thread_num',thread_num,'!!!!!!!!!!','第',j,'个/'+str(len(IR_data1))+'个 choice_factor:',choice_factor, 'robust_fail_num:',fail_num)
            except:
                error_type, error_value, error_trace = sys.exc_info() 
                print(error_type) 
                print(error_value) 
                for info in traceback.extract_tb(error_trace):  
                    print(info)

    return                

            
def cal_best_corr_list(corr_threshold=0.8):
    global conn,cursor
    print('start cal_best_corr_list corr_threshold:')
    # if not os.path.exists(storage_pnl_path):
    #     print('目标路径不存在原文件夹---创建:',storage_pnl_path)
    #     os.makedirs(target_path)
    # else:   
    #     print('清空目标路径:',storage_pnl_path)
    #     shutil.rmtree(storage_pnl_path)
    #     os.makedirs(storage_pnl_path)
    # r=os.system('sshpass -p 58976177 scp -P 22 -r yxk@192.168.1.28:/home/raid/factor/signal_return/ /home/qastorage/public/yxk/factor/') 


 
    # update_sql="update `data_test_batch_IR` set status=''"
    # cursor.execute(update_sql)
    # conn.commit()
    
    sql="select  * from `data_test_batch_IR` where  robust_flag='True' and status!='True' and status!='uploaded' and status!='corr_false' order by sharp desc "
    # sql="select  * from `data_test_batch_IR` where  robust_flag='True' and status!='True' and status!='corr_false' order by sharp desc "
    rubustpass_df= pd.read_sql(sql, con=conn)
    rubustpass_df.set_index('factor_name' ,inplace=True)
    print('cal_best_corr_list---corr_threshold:',corr_threshold,'待检测robust_past因子个数：',len(rubustpass_df))
  
    
    longlist_mysql='select * from `factor_evaluation` where factor_type="量价" and is_selected>=1 '
    long_list_mysql= pd.read_sql(longlist_mysql, con=conn)
    long_list_mysql.set_index('id' ,inplace=True)
    
    # long_list_df= pd.read_sql(longlist_sql, con=conn)
    # long_list=long_list_df['factor_name'].tolist()
    

    
    try:
        i=0
        for factorname_item in rubustpass_df.index:
            i+=1
            isok=1
            file_path=pnl_path+ factorname_item+'.hdf'
            # print(i,file_path)
            if os.path.exists(file_path) != True:  
                print('文件不存在',file_path)
                continue
            factor_signal_return=pd.read_hdf(file_path)
            factor_signal_return.index=factor_signal_return.index.map(lambda x: x.strftime('%Y-%m-%d'))
            factor_alpha_minus_fee=factor_signal_return['alpha_minus_fee'].copy()
            factor_pnl = factor_alpha_minus_fee - factor_alpha_minus_fee.shift(1)
            
            longlist_bendi="select  * from `data_test_batch_IR` where  status='True' order by sharp desc   "
            long_list_bendi= pd.read_sql(longlist_bendi, con=conn)
            long_list_bendi.set_index('factor_name' ,inplace=True)
         
            
            if len(long_list_bendi)>0:
                for longlist_bendi_item in long_list_bendi.index.tolist():
                    file_path_bengdi=pnl_path+ longlist_bendi_item+'.hdf'
                    if os.path.exists(file_path_bengdi) != True:  
                        print(file_path_bengdi,'本地文件不存在')
                        continue
                    bendi_signal_return=pd.read_hdf(file_path_bengdi)
                    bendi_signal_return.index=bendi_signal_return.index.map(lambda x: x.strftime('%Y-%m-%d'))
                    bendi_alpha_minus_fee=bendi_signal_return['alpha_minus_fee'].copy()
                    bendi_pnl = bendi_alpha_minus_fee - bendi_alpha_minus_fee.shift(1)
                    corr_item=factor_pnl.corr(bendi_pnl)
                    if corr_item>corr_threshold and rubustpass_df.loc[factorname_item,'sharp']<long_list_bendi.loc[longlist_bendi_item,'sharp']:
                        print('cal_best_corr_list---',factorname_item,'相关性不合格',longlist_bendi_item,i,corr_item)
                        update_sql2="UPDATE `data_test_batch_IR` set status='corr_false'  WHERE `factor_name`='{}' ".format(factorname_item)
                        cursor.execute(update_sql2)
                        conn.commit()
                        isok=0
                        break;
                    if corr_item>corr_threshold and rubustpass_df.loc[factorname_item,'sharp']>long_list_bendi.loc[longlist_bendi_item,'sharp']:
                        print('cal_best_corr_list---',factorname_item,'相关性合格,剔除原有因子',longlist_bendi_item,i,corr_item)
                        update_sql2="UPDATE `data_test_batch_IR` set status='corr_false'  WHERE `factor_name`='{}' ".format(longlist_bendi_item)
                        cursor.execute(update_sql2)
                        conn.commit()
                        isok=1
                        break;    
                        
            if len(long_list_mysql)>0:
                for longlist_mysql_item in long_list_mysql.index.tolist():
                    file_path_mysql=storage_pnl_path+ longlist_mysql_item+'.hdf'
                    if os.path.exists(file_path_mysql) != True:
                        print(file_path_mysql,'库内文件不存在')
                        continue
                    mysql_signal_return=pd.read_hdf(file_path_mysql)
                    mysql_signal_return.index=mysql_signal_return.index.map(lambda x: x.strftime('%Y-%m-%d'))
                    mysql_alpha_minus_fee=mysql_signal_return['alpha_minus_fee'].copy()
                    mysql_pnl = mysql_alpha_minus_fee - mysql_alpha_minus_fee.shift(1)
                    corr_item=factor_pnl.corr(mysql_pnl)
                    if corr_item>0.7 and rubustpass_df.loc[factorname_item,'sharp']<(long_list_mysql.loc[longlist_mysql_item,'sharp_ratio']*1.1):
                        print('cal_best_corr_list---',factorname_item,'相关性不合格',long_list_mysql.loc[longlist_mysql_item,'factor_name'],i,corr_item)
                        update_sql2="UPDATE `data_test_batch_IR` set status='corr_false'  WHERE `factor_name`='{}' ".format(factorname_item)
                        cursor.execute(update_sql2)
                        conn.commit()
                        isok=0
                        break;
            if isok==1:
                print('cal_best_corr_list---',factorname_item,'长链长度增加：',len(long_list_bendi)+1,'!!!!!!!!!!!!!!!!!!!')  
                update_sql2="UPDATE `data_test_batch_IR` set status='True'  WHERE `factor_name`='{}' ".format(factorname_item)
                cursor.execute(update_sql2)
                conn.commit()

    except:
        error_type, error_value, error_trace = sys.exc_info() 
        print(error_type) 
        print(error_value) 
        for info in traceback.extract_tb(error_trace):  
            print(info)

        pass     


     

    
    return   
 
def upload_all_pyfile():
    global conn,cursor
    goodfactor="SELECT * from data_test_batch_IR where status='True' "
    goodfactorlist= pd.read_sql(goodfactor, con=conn)  
    goodfactorlist.set_index('factor_name' ,inplace=True)
    print('开始上传文件')
    
    if not os.path.exists(local_py_path):
        print('目标路径不存在原文件夹---创建:',local_py_path)
        os.makedirs(local_py_path)
    # else:   
    #     print('清空目标路径:',local_py_path)
    #     shutil.rmtree(local_py_path)
    #     os.makedirs(local_py_path)
    
    num=0
    for factor_name in goodfactorlist.index:
        num+=1
        make_pyfile(factor_name,True,local_py_path)
        time.sleep(2)
        print('第{}个文件--:{}'.format(num,factor_name))
        # flib.update_factor(factor_name,'待完成',update_flag=False,file_path=local_py_path,PV_flag='量价')
        # update_sql2="UPDATE `data_test_batch_IR` set status='uploaded'  WHERE `factor_name`='{}' ".format(factor_name)
        # cursor.execute(update_sql2)
        # conn.commit()

    
def update_model_num(model_name=''):
    global conn,cursor
    if model_name=='':
        sql1="select distinct(model_name) from data_test_batch_model "
        IR_data1= pd.read_sql(sql1, con=conn) 
        model_list=IR_data1['model_name'].tolist()
    else:
        model_list=[]
        model_list.append(model_name)
    for model_name_item in model_list:
        sql2="select count(*) as count from `data_test_batch_IR` where  model_name ='{}'".format(model_name_item)
        count_sum_data= pd.read_sql(sql2, con=conn)
        count_sum=count_sum_data['count'][0]
        sql3="select count(*) as count from `data_test_batch_IR` where  model_name ='{}' and sharp>2.5".format(model_name_item)
        sharpgood_count_sum_data= pd.read_sql(sql3, con=conn)
        sharpgood_count=sharpgood_count_sum_data['count'][0]

        sql3="select count(*) as count from `data_test_batch_IR` where  model_name ='{}' and robust_fail_num=0".format(model_name_item)
        robustgood_count_sum_data= pd.read_sql(sql3, con=conn)
        robustgood_count=robustgood_count_sum_data['count'][0]
        
        sql4="select count(*) as count from `data_test_batch_IR` where  model_name ='{}' and status='True'".format(model_name_item)
        selected_data= pd.read_sql(sql4, con=conn)
        selected_count=selected_data['count'][0]
        upsql="UPDATE `data_test_batch_model` SET `finish_num`='{}', `robust_good`='{}', `sharp_good`='{}',selected='{}' WHERE (`model_name`='{}') \
        LIMIT 1".format(count_sum,robustgood_count,sharpgood_count,selected_count,model_name_item)
        cursor.execute(upsql)
        conn.commit()
        print(model_name_item,':---count_sum',count_sum,'sharpgood_count',sharpgood_count,'sharpgood_count',robustgood_count)





def make_pyfile(factor_name,topy_flag=True,pypath=''):
    global conn,cursor
    
    factor_name_alpha=factor_name
    factor_express=''
    finishlist=[]
    while 'alpha_' in factor_name_alpha:
        expresssql="SELECT express from data_test_batch_IR where factor_name='{}' ".format(factor_name_alpha)
        expresss= pd.read_sql(expresssql, con=conn)   
        expressstr1=''
        for i in expresss['express'][0].split('\n'):
            if 'HDF_DATA' not in i:
                expressstr1+=i+'\n'

        factor_express=  expressstr1.replace("returndata",factor_name_alpha)+factor_express
        finishlist.append(factor_name_alpha)
      

        index_list = [i.start() for i in re.finditer('alpha_', factor_express)] 
        for nPos in index_list:
            factor_name_alpha=factor_express[nPos:nPos+20] 
            # print(factor_name_alpha)
            if factor_name_alpha not in finishlist:
                # print(factor_name_alpha,'!!!!!!!!!!',factor_express)
                break;
            else:
                factor_name_alpha=''


  



    factor_express=  factor_express.replace(factor_name,"returndata")    
    
    factor_express=replace_read_hdf(factor_express)
    factor_express=replace_source_name(factor_express)
    factor_express=addfun_str(factor_express)
    print("==="*30)
    print(factor_express)
    print("==="*30)
    pystr1="""
import pandas as pd
import numpy as np
import datetime
from qinganx.factor.StockFactorNew import StockFactor
from qinganx.datalib import Datalib
dlib = Datalib()
import qinganx.alpha_expression as alphalib

"""

    pystr2="""
class MyFactor(StockFactor):
    def __init__(self):
        StockFactor.__init__(self)
    def gen_factor_signal(self):
        return returndata
"""
    finalstr=pystr1+factor_express+pystr2


    cursor.close()
    conn.close() 
    if topy_flag:
        pythonfilename=pypath+factor_name+'.py'
        with open(pythonfilename,'w',encoding='utf-8') as f:
            text = finalstr
            f.write(text)

    # print(finalstr)
            
def check_path(path_list):
    global conn,cursor
    index_num=0;
    print(path_list)
#     path_list=[
#             {'source':'/home/yxk/workspace/jupyter/yxk_stk_factor/因子仓库/量价/tdx' ,},
#             {'source':'/home/yxk/workspace/jupyter/yxk_stk_factor/因子仓库/量价/101' ,},
#             {'source':'/home/yxk/workspace/jupyter/yxk_stk_factor/因子仓库/量价/lxs' ,},
#             {'source':'/home/yxk/workspace/jupyter/yxk_stk_factor/因子仓库/量价/yxk' ,},
#           ]
    
    sql="select * from `data_test_batch_IR` "
    old_result= pd.read_sql(sql, con=conn)   
#     print(old_result['factor_name'].tolist())
   
    for source_path in path_list:
        source_path=source_path['source'];
        ctime=time.time()
        if os.path.exists(source_path):
            sys.path.append(source_path)
            for root, dirs, files in os.walk(source_path):
                for f in files:
                    file_path=os.path.join(root, f)
                    daycha=round((ctime-os.path.getmtime(file_path))/(60*60*24),2)
#                     print(  f[-3:],f[-3:] in old_result['factor_name'].tolist())
                    if file_path[-3:]==".py" and 'bfunction' not in file_path and 'cpython'  not in file_path and 'checkpoint'  not in file_path:
                         if ( f[:-3] not in old_result['factor_name'].tolist() or not os.path.exists(hdf_path+f[:-3]+'.hdf') ) :  #or daycha<0.2
                                index_num+=1
                                print(root,'---------',index_num,'-----',f)
                                print(f[:-3],'not in old_result:',f[:-3] not in old_result['factor_name'].tolist())
                                print(hdf_path+f[:-3]+'.hdf','not os.path.exists:',not os.path.exists(hdf_path+f[:-3]+'.hdf'))
                                print('daycha: ',daycha)
                                try:
                                    exec('import %s'%(f[:-3]))
                                    factor = eval('%s.MyFactor()'%(f[:-3]))
                                    pl_factor = factor.gen_factor_signal()
                                    test_factor(pl_factor,f[:-3],'single')
                           
                                except Exception as e:
                                    print(e)
                              


    cursor.close()
    conn.close()
        
def run_multiproce(run_mode,processesnum=2,run_total_num=4,run_deep_num=5,ir_threshold=3,corr_threshold=0.8,model_name='',factor_type=''):
    global conn,cursor
    manager = multiprocessing.Manager()
    total_task=manager.list()
    pool = multiprocessing.Pool(processes = processesnum) # 创建拥有路径个数个进程数量的进程池
    try:
        for j in range(run_total_num):
    #         add_factor(runnum,ir_threshold,j)
            if run_mode=='total':
                pool.apply_async(expand_factor, (run_deep_num*3,j,'first')) 
                pool.apply_async(relevance_factor, (run_deep_num,j,2)) 
                # pool.apply_async(combine_factor, (run_deep_num,j,2)) 
                pool.apply_async(group_test, (run_deep_num*3,j,2)) 
                pool.apply_async(resid_test, (run_deep_num*3,j,2)) 
                pool.apply_async(expand_factor, (run_deep_num,j,'final')) 
                pool.apply_async(check_hdf, ('remove_not_in_sql','true')) 
                pool.apply_async(robust_test, (run_deep_num*9,j)) 
                pool.apply_async(cal_best_corr_list,())  
                # pool.apply_async(upload_all_pyfile,()) 

            elif run_mode=='upload_all_pyfile':
                pool.apply_async(upload_all_pyfile,()) 
            elif run_mode=='BASIC_DAY_PRICE':
                pool.apply_async(make_factor_bymodel, (run_deep_num,j,'BASIC_DAY',total_task,'price'))  
            elif run_mode=='BASIC_DAY_VOL':
                pool.apply_async(make_factor_bymodel, (run_deep_num,j,'BASIC_DAY',total_task,'vol'))  
            elif run_mode=='BASIC_DAY_RATIO':
                pool.apply_async(make_factor_bymodel, (run_deep_num,j,'BASIC_DAY',total_task,'ratio'))  
            elif run_mode=='BASIC_MIN_PRICE':
                pool.apply_async(make_factor_bymodel, (run_deep_num,j,'BASIC_MIN',total_task,'price'))  
            elif run_mode=='BASIC_MIN_VOL':
                pool.apply_async(make_factor_bymodel, (run_deep_num,j,'BASIC_MIN',total_task,'vol'))  
            elif run_mode=='BASIC_MIN_RATIO':
                pool.apply_async(make_factor_bymodel, (run_deep_num,j,'BASIC_MIN',total_task,'ratio'))  
            elif run_mode=='BASIC_TICK_PRICE':
                pool.apply_async(make_factor_bymodel, (run_deep_num,j,'BASIC_TICK',total_task,'price'))  
            elif run_mode=='BASIC_TICK_VOL':
                pool.apply_async(make_factor_bymodel, (run_deep_num,j,'BASIC_TICK',total_task,'vol'))  
            elif run_mode=='BASIC_TICK_RATIO':
                pool.apply_async(make_factor_bymodel, (run_deep_num,j,'BASIC_TICK',total_task,'ratio'))   
                # pool.apply_async(make_factor_bymodel, (run_deep_num,j,'BASIC_DAY',total_task,'vol'))  
                # pool.apply_async(make_factor_bymodel, (run_deep_num,j,'BASIC_DAY',total_task,'ratio'))
                # pool.apply_async(make_factor_bymodel, (run_deep_num,j,'BASIC_MIN',total_task,'price'))     
                # pool.apply_async(make_factor_bymodel, (run_deep_num,j,'BASIC_MIN',total_task,'vol'))  
                # pool.apply_async(make_factor_bymodel, (run_deep_num,j,'BASIC_MIN',total_task,'ratio'))
                # pool.apply_async(make_factor_bymodel, (run_deep_num,j,'BASIC_TICK',total_task,'price'))     
                # pool.apply_async(make_factor_bymodel, (run_deep_num,j,'BASIC_TICK',total_task,'vol'))  
                # pool.apply_async(make_factor_bymodel, (run_deep_num,j,'BASIC_TICK',total_task,'ratio'))
            elif run_mode=='relevance_factor':
                pool.apply_async(relevance_factor, (run_deep_num,j))     
            elif run_mode=='cal_best_corr_list':
                pool.apply_async(cal_best_corr_list,())
            elif run_mode=='mul_factor':
                pool.apply_async(mul_factor, (run_deep_num,j,ir_threshold,best_corr_mode)) 
            elif run_mode=='add_factor':
                pool.apply_async(add_factor, (run_deep_num,j,ir_threshold,best_corr_mode)) 
            elif run_mode=='add_BASIC':
                pool.apply_async(add_JB_best, (run_deep_num,j)) 
            elif run_mode=='robust_test':
                pool.apply_async(robust_test, (run_deep_num,j))    
            elif run_mode=='resid_test':
                pool.apply_async(resid_test, (run_deep_num,j))  
            elif run_mode=='group_test':
                pool.apply_async(group_test, (run_deep_num,j))       
            elif run_mode=='cta_test':
                pool.apply_async(cta_test, (run_deep_num,j))     
            elif run_mode=='corr_test':
                pool.apply_async(corr_test, (run_deep_num,j))       

        pool.close() # 关闭进程池，不再接受新的进程
        pool.join() # 主进程阻塞等待子进程的退出
        print('run_end')
    except Exception as e:
        print(e)
    finally:
        cursor.close()
        conn.close()

 
    
if __name__=='__main__':

    print(sys.argv[1])
    if str(sys.argv[1])=='total':
        run_multiproce('total',processesnum=8,run_total_num=9000,run_deep_num=10,ir_threshold=3)

    elif str(sys.argv[1])=='model':     
        run_multiproce('make_factor_bymodel1',processesnum=10,run_total_num=4000,run_deep_num=5)    
#     elif str(sys.argv[1])=='expand':
#         run_multiproce('expand_factor',processesnum=2,run_total_num=500,run_deep_num=10)  
#     elif str(sys.argv[1])=='relevance':
#         run_multiproce('relevance_factor',processesnum=5,run_total_num=500,run_deep_num=10)
#     elif str(sys.argv[1])=='delete_hdf':
#         check_hdf(mode='remove_not_in_sql',delete_locksql_flag='true')   
#     elif str(sys.argv[1])=='runmodel':
#         run_multiproce('make_factor_bymodel',processesnum=5)