"""
PlumeSoft 量化代码库
作者：PlumeSoft
用途：为 optuna 设计的支持多进程、不依赖数据库、支持持久化保存的高性能炼丹基础框架
"""

import os
import time
import warnings
import math
import webbrowser
import pandas as pd
import optuna
import traceback
import tqdm
from optuna import Trial
from optuna.study import MaxTrialsCallback
from optuna.trial import TrialState, FrozenTrial
from datetime import datetime
from multiprocessing import Pool, Manager, Process
from program.plumesoft_bys.multi_task_storage import (
    MultiTaskStorage,
    get_fast_client_storage,
)
from program.plumesoft_bys.bys_base import Base_Bayesian
from program.plumesoft_bys.bys_common import dbg, log
import program.plumesoft_bys.bys_common as common

warnings.simplefilter(action="ignore")


################################
# 炼丹炉基础框架，可以直接使用
################################


class TestTrialsCallback:

    def __init__(
        self,
        n_trials: int,
        test_n_trials: int,
        test_min_score: float,
        states=(TrialState.COMPLETE, TrialState.RUNNING),
    ) -> None:
        self._n_trials = n_trials
        self._test_n_trials = test_n_trials  # 测试次数
        self._test_min_score = test_min_score  # 达到测试次数时可以继续执行的最低分数
        self._states = states

    def __call__(self, study: "optuna.study.Study", trial: FrozenTrial) -> None:
        trials = study.get_trials(deepcopy=False, states=self._states)
        n_complete = len(trials)
        # 判断是否达到总次数
        if n_complete >= self._n_trials:
            study.stop()
        # 判断是否达到指定的测试次数并且当前最优分数不满足最低分数
        if (
            n_complete >= self._test_n_trials
            and study.best_trial.value < self._test_min_score
        ):
            study.stop()


def objective(
    trial: Trial, params: dict, bys: Base_Bayesian, result_queue, score_queue
) -> float:
    try:
        common.start_profiler("objective")
        score = bys.execute_trial(trial, params, result_queue, score_queue)
        common.stop_profiler("objective")

        def is_valid_score(v) -> bool:
            try:
                float(v)
            except (ValueError, TypeError):
                return False

            if math.isnan(v) or math.isinf(v):
                return False
            return True

        # 检查返回分数的值是否有效
        if is_valid_score(score):
            return score
        else:
            log(f"迭代 {trial.number} 返回分数异常：{score}")
            return -2000
    except Exception as e:
        msg = f"迭代 {trial.number} 执行任务发生异常：\n{str(e)}\n{traceback.format_exc()}"
        log(msg)
        return -3000


def create_sampler(study_sample_mode):
    if study_sample_mode == "贝叶斯":
        return optuna.samplers.TPESampler()
    elif study_sample_mode == "随机":
        return optuna.samplers.RandomSampler()
    else:
        return None


def study_task(
    params, bys: Base_Bayesian, n, num_trials, result_queue, score_queue, storage
):
    # 在子进程中独立执行的学习任务
    common.init_env(params)

    log(f"进程 {n+1} 开始执行学习任务，最大迭代数: {num_trials}")

    # 安静模式禁用 Optuna 普通日志
    if params["silent_mode"]:
        optuna.logging.set_verbosity(optuna.logging.WARNING)

    common.start_profiler("study_task")
    # 创建学习对象
    study_sample_mode = params.get("study_sample_mode", "贝叶斯")
    study = optuna.create_study(
        study_name=params["study_name"],
        storage=get_fast_client_storage(storage),
        load_if_exists=True,
        direction="maximize",
        sampler=create_sampler(study_sample_mode),
    )

    # 准备任务使用的数据
    params = bys.prepare_task_data(params)

    func = lambda trial: objective(trial, params, bys, result_queue, score_queue)
    if params.get("check_point_trials", None) is not None:
        # 如果定义了检查点，则使用自定义回调函数
        callbacks = [
            TestTrialsCallback(
                num_trials,
                params["check_point_trials"],
                params["check_point_min_score"],
            )
        ]
    else:
        callbacks = [
            MaxTrialsCallback(
                num_trials, states=(TrialState.COMPLETE, TrialState.RUNNING)
            )
        ]
    study.optimize(
        func,
        callbacks=callbacks,
    )
    common.stop_profiler("study_task")


def save_score(params, trial_id, score, param_config, core_str):
    # ===保存结果
    with open(params["study_result_txt"], "a+") as f:
        f.writelines("==== 新高分数 ====")
        f.writelines("\r\n")
        f.writelines(f"迭代: {trial_id} 分数: {score}")
        f.writelines("\r\n")
        f.writelines(f"参数: {param_config}")
        f.writelines("\r\n")
        f.writelines(f"结果: {core_str}")
        f.writelines("\r\n")
        f.close()


def run_dashboard(params, storage):
    from optuna_dashboard._app import create_app
    from optuna_dashboard.artifact._backend_to_store import to_artifact_store
    from bottle import run

    store = to_artifact_store(None)
    app = create_app(storage, artifact_store=store)
    host = params.get("dashboard_host", "localhost")
    port = params.get("dashboard_port", 8080)
    run(app, host=host, port=port, quiet=True)


def execute_study(
    params, bys: Base_Bayesian, num_trials, result_queue, score_queue, storage
):
    common.start_profiler("execute_study")

    result_df = pd.DataFrame()
    if common.safe_isfile(params["study_result_pkl"]):
        result_df = common.safe_load_dataframe(params["study_result_pkl"])
        if result_df is not None and len(result_df) > 0:
            log(f"已找到上一次学习的结果数据，共 {len(result_df)} 条记录")
        else:
            log(f"上一次学习的结果数据文件 {params['study_result_pkl']} 可能损坏，开始全新的学习任务")
            result_df = None
    else:
        log("开始全新的学习任务")

    def process_result_queue(result_queue, result_df, progress_bar: tqdm.tqdm):
        df_list = []
        while not result_queue.empty():
            df = result_queue.get()
            df_list.append(df)
        new_count = 0
        if len(df_list) > 0:
            new_count = len(df_list)
            result_df = pd.concat([result_df] + df_list, axis=0, ignore_index=True)
            result_df.sort_values(
                by="score", ascending=False, ignore_index=True, inplace=True
            )
            common.safe_save_dataframe(result_df, params["study_result_pkl"])
            result_df.drop_duplicates(subset=["回测数据"], keep="first").head(
                params["study_result_top_num"]
            ).reset_index(drop=True).to_csv(params["study_result_csv"], encoding="gbk")
            if progress_bar:
                progress_bar.update(
                    min(
                        storage.get_all_trials_count(params["study_name"]),
                        progress_bar.total,
                    )
                    - progress_bar.n
                )
        return result_df, new_count

    max_score = {params["study_name"]: -10000}
    if not result_df.empty:
        max_score[params["study_name"]] = result_df["score"].max()
        log(f'从历史数据中读取到当前最高分数：{max_score[params["study_name"]]}')

    def process_score_queue(score_queue):
        while not score_queue.empty():
            trial_id, score_avg, param_config, core_str = score_queue.get()
            if score_avg > max_score[params["study_name"]]:
                max_score[params["study_name"]] = score_avg
                log(
                    f"迭代 {trial_id} 找到新的最高分数: {score_avg}\n参数: {str(param_config)}\n回测结果: {core_str}"
                )
                save_score(params, trial_id, score_avg, param_config, core_str)

    def check_sub_task_timeout(trials_count, last_update_time):
        if trials_count > num_trials * 0.95 and time.time() - last_update_time > 60:
            log(
                f"已完成 {trials_count}/{num_trials} 的学习任务，超过60秒未更新进度，中断学习任务。"
            )
            return True
        return False

    try:
        dashboard_proc = None
        if params.get("dashboard_enabled", False):
            log("开始创建 dashboard 可视化面板进程...")
            try:
                import optuna_dashboard

                dashboard_proc = Process(target=run_dashboard, args=(params, storage))
                dashboard_proc.start()
                if params.get("dashboard_auto_open", False) and (
                    not "批次" in params.keys() or params.get("batch_first", False)
                ):
                    # 延时5秒自动打开 dashboard 面板
                    time.sleep(5)
                    webbrowser.open_new_tab(
                        f"http://{params['dashboard_host']}:{params['dashboard_port']}"
                    )
            except:
                log(
                    "可视化面板需要安装 optuna-dashboard 包才能使用，请执行 pip install optuna-dashboard"
                )
                time.sleep(10)

        log(f"开始创建子任务，任务数 {params['study_n_jobs']}")
        pool = None
        pool = Pool(params["study_n_jobs"])

        if params["use_fork"]:
            log(f"Linux 下使用 fork 方式创建子任务...")
            child_pids = []
            for n in range(params["study_n_jobs"]):
                pid = os.fork()
                if pid == 0:
                    # 子任务的代码
                    study_task(
                        params, bys, n, num_trials, result_queue, score_queue, storage
                    )
                    # 子任务结束后调用 os._exit() 来退出，避免产生多个子任务的子进程
                    os._exit(0)
                else:
                    # 主任务的代码
                    child_pids.append(pid)

            # 主进程等待子进程结束
            log(f"开始执行学习任务")
            progress_bar = tqdm.trange(num_trials, desc="贝叶斯", unit="迭代")
            last_update_time = time.time()
            while child_pids:
                result_df, new_count = process_result_queue(
                    result_queue, result_df, progress_bar
                )
                if new_count > 0:
                    last_update_time = time.time()
                process_score_queue(score_queue)
                time.sleep(1)

                # 非阻塞地检查子进程状态
                while child_pids:
                    pid, status = os.waitpid(child_pids[0], os.WNOHANG)
                    if pid != 0:
                        child_pids.remove(pid)
                        log(f"子进程 {pid} 已结束")
                    else:
                        break

                # 检查后台任务是否超时未更新
                if check_sub_task_timeout(progress_bar.n, last_update_time):
                    for pid in child_pids:
                        try:
                            common.kill_process(pid)
                        except:
                            dbg(f"kill 后台进程失败")
                    break
        else:
            for n in range(params["study_n_jobs"]):
                pool.apply_async(
                    study_task,
                    args=(
                        params,
                        bys,
                        n,
                        num_trials,
                        result_queue,
                        score_queue,
                        storage,
                    ),
                )
            pool.close()

            # 等待所有计算任务完成
            log(f"开始执行学习任务")
            progress_bar = tqdm.trange(num_trials, desc="贝叶斯", unit="迭代")
            last_update_time = time.time()
            while len([p for p in pool._pool if p.is_alive()]) > 0:
                # 处理结果队列
                result_df, new_count = process_result_queue(
                    result_queue, result_df, progress_bar
                )
                if new_count > 0:
                    last_update_time = time.time()
                process_score_queue(score_queue)
                time.sleep(1)

                # 检查后台任务是否超时未更新
                if check_sub_task_timeout(progress_bar.n, last_update_time):
                    for p in [p for p in pool._pool if p.is_alive()]:
                        try:
                            common.kill_process(p.pid)
                        except:
                            dbg(f"kill 后台进程失败")
                    break

        # 确保计算完成后所有计算结果都被处理
        result_df, new_count = process_result_queue(result_queue, result_df, progress_bar)
        process_score_queue(score_queue)
        if progress_bar:
            progress_bar.close()

        # 关闭后台面板进程
        if dashboard_proc is not None:
            common.kill_process(dashboard_proc.pid)

        common.stop_profiler("execute_study")

        # 手动释放存储对象，确保其保存数据文件
        storage.free()

        return result_df
    except KeyboardInterrupt:
        storage.free()
        common.kill_process(os.getpid())
        exit()


def Bayesian_Search(params: dict, bys: Base_Bayesian) -> pd.DataFrame:
    start_time = datetime.now()

    common.init_env(params)

    # 在 Linux/Mac 等系统下使用 fork 可以快速创建学习任务，最大化共享内存
    params["use_fork"] = (
        params["use_fork"] and os.name == "posix" and hasattr(os, "fork")
    )
    if params["use_fork"]:
        # 使用 fork 时不再需要交换文件
        log(f"Linux 下启用 fork 模式，不创建交换文件")
        params["study_params_swap_pkl"] = None

    # 在全局数据处理
    params = bys.prepare_global_data(params.copy())

    # 安静模式禁用 Optuna 普通日志
    if params["silent_mode"]:
        optuna.logging.set_verbosity(optuna.logging.WARNING)

    # 判断是否断点续传
    if not common.safe_isfile(params["study_result_pkl"]):
        log(f"学习数据文件不存在，开始新的学习")
        retry_cnt = 0
        while common.safe_isfile(params["study_storage_file"]):
            time.sleep(2)
            retry_cnt += 1
            if retry_cnt > 10:
                log(f"删除无效的内存数据文件失败，程序退出...")
                exit()

            dbg(f"发现没有学习数据的内存数据文件，第 {retry_cnt} 尝试删除...")
            common.safe_remove_file(params["study_storage_file"])

    # 创建采样器
    study_sample_mode = params.get("study_sample_mode", "贝叶斯")
    sampler = create_sampler(study_sample_mode)
    if sampler is None:
        log(f"不支持的 study_sample_mode：{study_sample_mode}")
        exit()

    # 准备存储对象和共享队列
    manager = Manager()
    result_queue = manager.Queue()
    score_queue = manager.Queue()
    storage = manager.MultiTaskStorage(params["study_storage_file"])

    # 执行学习任务
    result_df = execute_study(
        params, bys, params["study_num_trials"], result_queue, score_queue, storage
    )

    study_df = None
    if len(result_df) > 0:
        # 创建学习对象
        study = optuna.create_study(
            study_name=params["study_name"],
            storage=MultiTaskStorage(
                params["study_storage_file"], autosave_interval=None
            ),
            load_if_exists=True,
            direction="maximize",
            sampler=create_sampler(study_sample_mode),
        )

        study_df = study.trials_dataframe(attrs=("number", "value", "params", "state"))

        log(f"学习完成，总用时：{datetime.now() - start_time}")
        log(f"最佳参数：{study.best_params}")
        log(f"最佳得分：{study.best_value}")
    else:
        log(
            f"学习结束，没有找到有效的学习结果，请检查基础策略配置，总用时：{datetime.now() - start_time}"
        )
        exit()

    # optuna.visualization.plot_contour(study)
    # optuna.visualization.plot_param_importances(study)
    # optuna.visualization.plot_intermediate_values(study)

    # 完成学习任务
    bys.study_finished(params, result_df, study_df)

    return result_df
