import sys   
sys.path.append('code')
import os
from joblib import Parallel, delayed
import akshare as ak
import numpy as np
sys.path.append('code')
from data_capture.stock_capture_ak.get_data import get_stock_codes, get_price, get_market_value ,get_other_data
from data_capture.data_contrate import consentrate_price_value
import time
import shutil  

###数据更新
def catdata():
    cc = consentrate_price_value()
    cc.consentrate_columns()

def scrash_price():
    gz1000 = get_stock_codes(name='国证1000')['样本代码'].values
    gz2000 = get_stock_codes(name='国证2000')['样本代码'].values
    gz = np.union1d(gz1000, gz2000)
    kc = ak.stock_kc_a_spot_em()
    kc = kc['代码'].values
    codes = np.setdiff1d(gz,kc)
    lst = [i for i in codes if i[0] != '3']
    codes = lst
    try:
        shutil.rmtree(r'data\stock_data\daily')
        shutil.rmtree(r'data\cmodty\my_feature')
    except:
        print('已经移除因子文件')
    get_price(codes)
    get_market_value(codes)
    get_other_data()

scrash_price()
catdata()
from data_capture.etf_data_capture.etf_back import get_etf_related, data_fliter,get_base_etf,send, send_email
get_etf_related()
data_fliter()
poc_etf, ebs_poc, hold_poc = get_base_etf()
send_email((poc_etf, '###########', hold_poc, '##########', ebs_poc ))
send()

###因子计算
from feature_eng.alpha_101.main_101 import calc_main_101, calc_main_alpha_ind, calc_main_alpha_ind9
from feature_eng.alpha_191.main_191 import calc_main_191 
from feature_eng.alpha_huangeven.main_huangeven import calc_main_even
from feature_eng.alpha_mine.main_mine import calc_main_mine
from feature_eng.feature_fundmantal.value_main import calc_main_fundmental
from feature_eng.alpha_series.alpha_main import calc_main_alpha_df, calc_main_alpha_ser
# calc_main_fundmental()

# calc_main_101()
calc_main_alpha_ind()
calc_main_alpha_ind9()
# calc_main_191()
# calc_main_alpha_df()
# calc_main_even()
# calc_main_mine()
# calc_main_alpha_ser()


###因子检查
import sys
sys.path.append(r'code')
sys.path.append(r'code/frame')
from real.ml_reg.strage.signal_predict import Predictor
from data_read.get_feature import Get_feature_data_select,  Get_feature_data_many
from data_read.get_label import Get_label_data, Get_class_label, Get_rank_label
# from real.ml_reg.strage.open_order import Pos_make_rank_thred_open
from real.email_test import is_trade_day
import os
import operator
import numpy as np
import argparse
import pandas as pd
#训练
#basic
parser = argparse.ArgumentParser(description='ml_training structure')
parser.add_argument('--result_root', type=str,  default='dl_test', help='the root to save result')
parser.add_argument('--feature_root', type=str,  default=None, help='the root to read feature')
parser.add_argument('--label_root', type=str,  default=None, help='the root to read label')
parser.add_argument('--model_name', type=str,  default='lightgbm',  help='model name, options: [lasso, lightgbm, liner, random_forest]')

#time
parser.add_argument('--time_param', type=dict,  default={"insample_beg": '2019-1',
                                                        "outsample_end": '2032-12',
                                                        }, help='time param for training')

#train_control
parser.add_argument('--rolling_step', type=int,  default=12, help='the step of rolling train')
parser.add_argument('--rolling_method', type=str,  default='cum', help='the method of rolling train')
parser.add_argument('--train_ratio', type=float,  default=0.8, help='the ratio to split train, eval and test')
parser.add_argument('--feature_list', type=list,  default=['high', 'low', 'close', 'vol', 'volume', 'oi'], help='feature name')
parser.add_argument('--varieties', type=list,  default=None, help='codes num')
parser.add_argument('--label_range', type=int,  default=10, help='the periods of thre sum of return')
parser.add_argument('--divide_vol', type=bool,  default=False, help='to decide that regression label is dividing vol')
parser.add_argument('--log_eval',  type=int,  default=50, help='the step of log_eval')
parser.add_argument('--predict_save', type=bool,  default=True, help='weather saving predict result')


#model
parser.add_argument('--ml_param1', type=dict,  default={"task": "train", 
                                            "max_depth": 5, 
                                            "boosting_type": "goss", 
                                            "num_leaves": 28, 
                                            "n_estimators": 5000, 
                                            "objective": "regression", 
                                            "metric": 'rmse', #'average_precision', 'auc'
                                            "learning_rate": 0.005, 
                                            "feature_fraction": 0.65, 
                                            "bagging_freq": 10, 
                                            "verbose": -1, 
                                            "n_jobs": 16,
                                            # 'is_unbalance':'True',
                                            'seed':2022}, help='model param')

parser.add_argument('--ml_param2', type=dict,  default={'num_boost_round': 500, 
                                                                'early_stopping_rounds': 200, 
                                                               'verbose_eval': 500}, help='model param verbose .etc')


parser.add_argument('--send_email', type=bool,  default=True, help='the ratio of stop ret')
parser.add_argument('--tune_param', type=bool,  default=False, help='the ratio of stop ret')



from feature_eng.feature_display import Feature_display, IC_display, Cor_display_fast, Cor_display_straitfy
import json
def feature_fliter(root_lst):
    ic_dis = IC_display(start_date='2020-01-01', end_date='2023-02-05',feature_root=root_lst)
    ic_dis.info_generate()
    del ic_dis
    cor_dis = Cor_display_fast(start_date='2020-01-01',end_date='2023-02-05',thred=0.8,nan_thred=0.03,cor_thred=0.01,feature_root=root_lst, method='spearman')
    cor_dis.cor_generate()
    del cor_dis

args = parser.parse_args()
lst = ['ind', 'ind9'] #[ 'df','ser','101','191']#,'df','ser','ser3','101','191'
args.feature_root = [os.path.join(r'data/cmodty/my_feature', f'feature_data_{i}') for i in lst] 
Get_feature_data_select.select_file = r'data\cmodty\check\fliter_indind9.json'
for i in args.feature_root:
    r = [i,]
    feature_fliter(r)
cor_dis = Cor_display_straitfy(start_date='2020-01-01', end_date='2023-02-05',thred=0.8,nan_thred=0.03,cor_thred=0.01, feature_root=args.feature_root, method='spearman')
cor_dis.cor_generate()

args.label_root = r'data/stock_data/daily/consentrate_price'
args.result_root = r'result/reg_real'
args.result_root = os.path.join(args.result_root, str(args.label_range)) 
if not os.path.exists(args.result_root):
    os.makedirs(args.result_root)

qb = Predictor(args)

class Ml_quant():   
    
    def __init__(self, QuantBuider) -> None:
        self.qb = QuantBuider
        
    def creat_quant(self):
        feature, label, codes, times = self.qb.get_data(Get_feature_data_select, Get_rank_label)
        dict10 = self.qb.rolling_train(codes,times,feature,label)
        return dict10
          
def info_concat(dict10):
    codes1 = pd.read_csv(r'data\stock_data\code_date\国证1000.csv', index_col=0,converters={'样本代码':str})
    codes2 = pd.read_csv(r'data\stock_data\code_date\国证2000.csv', index_col=0,converters={'样本代码':str})
    close = pd.read_pickle(r'data\stock_data\daily\consentrate_price\close.pkl.gzip').iloc[-1]
    codes = pd.concat([codes1, codes2], axis=0)
    codes.index = codes['样本代码']
    pred10 = codes.loc[dic10.index]
    pred10['pred'] = dic10
    pred10['close'] = close[dic10.index]
    pred10 = pred10[['样本简称', '所属行业', 'close', 'pred','自由流通市值', '总市值']]
    return pred10

import datetime
from real.email_test import send_email
# # if is_trade_day():
# # try:
M = Ml_quant(qb)
dic10 = M.creat_quant()
pred10 = info_concat(dic10)
today = datetime.date.today()
with open(f'{args.result_root}/log.txt',"a") as file:   #只需要将之前的”w"改为“a"即可，代表追加内容
    file.write(f'\n{today}回归数据更新完成')  
send_email(f'回归数据更新完成. \n{pred10 }')
print(pred10)
# except Exception:
#     with open(f'{args.result_root}/demo_1027/log.txt',"a") as file:   #只需要将之前的”w"改为“a"即可，代表追加内容
#         file.write(f'\nlearn error')
#     send_email('learn error')


# from data_capture.etf_data_capture.etf_back import get_etf_related, data_fliter,get_base_etf,send
# get_etf_related()
# data_fliter()
# poc_etf, ebs_poc, hold_poc = get_base_etf()
# send_email((poc_etf, '###########', hold_poc, '##########', ebs_poc ))
# send()