import json
from pages.gp_zjlx import GpZjPage
from pages.gp_bk import GpBk
from pages.gp_ztgy import GpZtGy
from pages.gp_homepage import GpHomePage as Gh
from config.gp_config import PathConfig, GpApiConfig
from api.base_api import BaseApi
from tools.utils import read_config
import multiprocessing
import os
from tools.logs import Logs

logger = Logs("处理股票数据").get_logger()


class GpApi(BaseApi):

    def __init__(self, strategy="DEFAULT"):
        # 初始化策略
        config = read_config(PathConfig.celve_path)
        self.celue = config[strategy]
        self.process = config["PROCESS"]

    @staticmethod
    def get_gp_rule(gp_data, celue):
        rule = celue
        for r in rule:
            if r.startswith("is"):
                if r:
                    func = getattr(BaseApi, r)
                    is_ok = func(BaseApi, gp_data)
                    if not is_ok:
                        return False
        return True

    def process_item_is_ok_gp(pgno, result_list, gp_list, celue):
        gp1 = GpZjPage("gp")
        # 保存备选池子
        for gp in gp_list:
            logger.info(f"线程序号：{pgno}")
            logger.info("当前股票id：{}".format(gp.get("dm")))
            try:
                gp_data = gp1.get_zj_by_dm(gp.get("dm"), celue.getint("data_days"))
                # logger.info(f"股票id：{gp.get('dm')}，30日数据列表：{gp_data}")
                if GpApi.get_gp_rule(gp_data, celue):
                    logger.info(f"符合条件的股票信息：{gp}")
                    result_list.append(gp)
            except Exception as e:
                logger.error("异常股票，手动处理：{}, 错误信息：{}".format(gp, e))
        gp1.close()

    def start(self):
        with open(PathConfig.gp_path_chi, "r", encoding="utf-8") as f:
            gp_list = json.loads(f.read())

        if not os.path.exists(PathConfig.gp_file_path):
            os.mkdir(PathConfig.gp_file_path)

        manager = multiprocessing.Manager()
        result_list = manager.list()
        if self.process.getboolean("is_open"):
            processes = []
            # 计算每一份的大小，注意处理不能整除的情况
            # 将池子分钟多少个大小 进行执行
            workerspan = self.process.getint("workers")

            chunk_size = len(gp_list) // workerspan
            remainder = len(gp_list) % workerspan  # 剩余的元素数量

            start_index = 0
            for i in range(workerspan):
                # 如果有剩余元素，让第一份多一个元素
                end_index = start_index + chunk_size + (1 if i < remainder else 0)
                # 获取当前份的元素并加入到chunks列表中
                current_chunk = gp_list[start_index:end_index]
                # 更新下一次循环的起始索引
                start_index = end_index
                p = multiprocessing.Process(target=GpApi.process_item_is_ok_gp, args=(i, result_list, current_chunk, self.celue))
                p.start()
                processes.append(p)

            # 等待所有进程完成
            for p in processes:
                p.join()
        else:
            GpApi.process_item_is_ok_gp(1, result_list, gp_list, self.celue)
        # 主进程收集所有结果并写入文件
        with open(PathConfig.gp_path_bx_chi, "w", encoding="utf-8") as f111:
            json.dump(list(result_list), f111)

    def today_is_ok(self):
        """
        根据今日资金净流入，排序
        :return:
        """
        gp1 = GpZjPage("gp")
        gz = GpZtGy("gp")
        if not os.path.exists(PathConfig.gp_path_bx_chi):
            logger.warning(f"当前路径不存在，请先计算历史数据:{PathConfig.gp_path_bx_chi}")
            raise FileNotFoundError("当前路径不存在，请先计算历史数据")
        with open(PathConfig.gp_path_bx_chi, "r", encoding="utf-8") as f:
            gp_list = json.loads(f.read())
        result = []
        re = gz.get_all_zt_gp()
        bk = gz.get_bk_by_gp(re)
        for gp in gp_list:
            # 过滤资金净流入
            gp_tody_lr = gp1.get_gp_tody_zj(gp.get("dm"))
            # 过滤符合板块
            bks = gp.get("bks")
            bk_is_ok = gz.bk_is_ok(bks, bk)
            if gp_tody_lr > 0 and bk_is_ok:
                logger.info("符合条件，当前股票id：{}".format(gp.get("dm")))
                result.append(gp)
        return result


if __name__ == '__main__':
    GpApi().start()