from wqAPI import runSimulate, sign_in, init_logging
from multiprocessing import Manager, freeze_support
from datetime import datetime, timedelta 

import logging

if __name__ == '__main__':
    freeze_support()
    # 全局共享变量（通过Manager实现跨进程安全）  
    manager = Manager()  
    shared_dict = manager.dict()  

    shared_dict['session'] = None  
    shared_dict['expire_time'] = None  
    lock = manager.Lock()  # 跨进程锁  
    expire_Time = 0.2 #3.5 #hours

    logfile = init_logging() 

    def get_session():  #get or update the session with timespan:
        with lock:  # 加锁  
            current_time = datetime.now()  
            # 若session未初始化或已过期，重新登录  
            if (shared_dict['session'] is None) or (current_time >= shared_dict['expire_time']):
                new_session = sign_in()  
                shared_dict['session'] = new_session  
                shared_dict['expire_time'] = current_time + timedelta(hours=expire_Time)  
            return shared_dict['session']  

    sess = get_session()

    # 获取数据集ID为fundamental6（Company Fundamental Data for Equity）下的所有数据字段
    ### Get Data_fields like Data Explorer 获取所有满足条件的数据字段及其ID
    def get_datafields(
            s,
            searchScope,
            dataset_id: str = '',
            search: str = ''
    ):
        import pandas as pd
        instrument_type = searchScope['instrumentType']
        region = searchScope['region']
        delay = searchScope['delay']
        universe = searchScope['universe']

        if len(search) == 0:
            url_template = "https://api.worldquantbrain.com/data-fields?" + \
                        f"&instrumentType={instrument_type}" + \
                        f"&region={region}&delay={str(delay)}&universe={universe}&dataset.id={dataset_id}&limit=50" + \
                        "&offset={x}"
            count = s.get(url_template.format(x=0)).json()['count']
        else:
            url_template = "https://api.worldquantbrain.com/data-fields?" + \
                        f"&instrumentType={instrument_type}" + \
                        f"&region={region}&delay={str(delay)}&universe={universe}&limit=50" + \
                        f"&search={search}" + \
                        "&offset={x}"
            count = 100

        datafields_list = []
        for x in range(0, count, 50):
            datafields = s.get(url_template.format(x=x))
            datafields_list.append(datafields.json()['results'])

        datafields_list_flat = [item for sublist in datafields_list for item in sublist]

        datafields_df = pd.DataFrame(datafields_list_flat)
        return datafields_df


    # 定义搜索范围
    searchScope = {'region': 'USA', 'delay': '1', 'universe': 'TOP3000', 'instrumentType': 'EQUITY'}
    # 从数据集中获取数据字段
    fnd6 = get_datafields(s=sess, searchScope=searchScope, dataset_id='fundamental6')
    # 过滤类型为 "MATRIX" 的数据字段
    fnd6 = fnd6[fnd6['type'] == "MATRIX"]
    # 提取数据字段的ID并转换为列表
    datafields_list_fnd6 = fnd6['id'].values
    # 输出数据字段的ID列表
    # logging(datafields_list_fnd6)
    logging.info(f"datafields_list_fnd6 count: {len(datafields_list_fnd6)}")


    # 将datafield和operator替换到Alpha模板(框架)中group_rank(ts_rank({fundamental model data},252),industry),批量生成Alpha
    # 模板<group_compare_op>(<ts_compare_op>(<company_fundamentals>,<days>),<group>)
    # 定义分组比较操作符
    group_compare_op = ['group_rank', 'group_zscore', 'group_neutralize']  # 分组比较操作符列表
    # 定义时间序列比较操作符
    ts_compare_op = ['ts_rank', 'ts_zscore', 'ts_av_diff']  # 时间序列比较操作符列表
    # 定义公司基本面数据的字段列表
    company_fundamentals = datafields_list_fnd6
    # 定义时间周期列表
    days = [60, 200]
    # 定义分组依据列表
    group = ['market', 'industry', 'subindustry', 'sector', 'densify(pv13_h_f1_sector)']
    # 初始化alpha表达式列表
    alpha_expressions = []
    # 遍历分组比较操作符
    for gco in group_compare_op:
        # 遍历时间序列比较操作符
        for tco in ts_compare_op:
            # 遍历公司基本面数据的字段
            for cf in company_fundamentals:
                # 遍历时间周期
                for d in days:
                    # 遍历分组依据
                    for grp in group:
                        # 生成alpha表达式并添加到列表中
                        alpha_expressions.append(f"{gco}({tco}({cf}, {d}), {grp})")

    # 输出生成的alpha表达式总数 # 打印或返回结果字符串列表
    logging.info(f"there are total {len(alpha_expressions)} alpha expressions")

    # 打印结果
    #logging.info(alpha_expressions[:5])
    logging.info(f"there are total {len(alpha_expressions)} alphas.")

    # 将datafield替换到Alpha模板(框架)中group_rank({fundamental model data}/cap,subindustry)批量生成Alpha
    alpha_list = []

    logging.info("正在开始将如下alpha表达式与setting封装...")

    for index, alpha_expression in enumerate(alpha_expressions, start=1):
        # logging.info(f"正在循环第 {index} 个元素")
        # logging.info("正在将如下alpha表达式与setting封装")
        simulation_data = {
            "type": "REGULAR",
            "settings": {
                "instrumentType": "EQUITY",
                "region": "USA",
                "universe": "TOP3000",
                "delay": 1,
                "decay": 0,
                "neutralization": "SUBINDUSTRY",
                "truncation": 0.01,
                "pasteurization": "ON",
                "unitHandling": "VERIFY",
                "nanHandling": "OFF",
                "language": "FASTEXPR",
                "visualization": False,
            },
            "regular": alpha_expression
        }
        alpha_list.append(simulation_data)

    logging.info(f"there are {len(alpha_list)} Alphas to simulate")
    # logging.info(f"first alpha: {alpha_list[0]}")

    from joblib import Parallel, delayed
    # # 并行执行3个任务（n_jobs=3）
    startIdx = 1
    
    Parallel(n_jobs=3)(
        delayed(runSimulate)(get_session(), alpha, index, logfile)
        for index, alpha in enumerate(alpha_list[startIdx-1: ],start=startIdx) #startIdx+3
        )

    logging.info(f"Finished running for {__file__}")
