import os
import ast
import signal
import traceback
import subprocess
from time import sleep
from celery import signals, group, chord
from datetime import datetime
from apps.tms.job.utils.db import *
from apps.tms.job.utils.node import execute_step_on_node
from apps.tms.job.utils.distribute import (
    distribute_cases_to_node,
    distribute_cases_by_order,
)
from apps.tms.testcase.project import build_pytest_project
from application import settings


def start(job_id):
    """任务执行"""
    update_job(
        job_id,
        status=Status.RUNNING.value,
        percentage=0,
        run_time=datetime.now(),
        finished=None,
    )
    job_execution = init_execution(job_id)
    if job_execution.job.type == "1":
        run_task.delay(
            job_id,
            job_execution.id,
            [
                (testcase.id, [])
                for testcase in JobLog.objects.filter(job_execution=job_execution.id)
            ],
        )


def stop(job_id):
    """停止执行"""
    update_job(job_id, status=Status.STOPPED.value, finished=datetime.now())
    job_execution = Job.objects.filter(id=job_id).first().jobexecution_set.all().first()
    if job_execution:
        update_execution(
            job_execution.id, status=Status.STOPPED.value, finished=datetime.now()
        )
        app.control.revoke(job_id, terminate=True, signal=signal.SIGINT)


def retry(job_id, job_execution_id, testcases, sync_case=False, executor=None):
    """任务重试"""
    update_job(
        job_id,
        status=Status.RUNNING.value,
        percentage=0,
        run_time=datetime.now(),
        finished=None,
    )
    run_task.delay(
        job_id,
        job_execution_id,
        distribute_cases_by_order(testcases),
        sync_case,
        executor,
    )


@app.task
def run_task(
    job_id, job_execution_id=None, test_cases=None, sync_case=False, executor=None
):
    try:
        job = Job.objects.filter(id=job_id).first()
        # 获取执行节点
        execute_nodes = job.nodes.all()
        # 分配测试用例
        distributions = distribute_cases_to_node(execute_nodes, test_cases)

        if not distributions:
            return update_job(job_id, status=Status.BLOCK.value)

        if job.plugin:
            # 若存在插件，则按照插件逻辑执行
            execute_with_plugin(
                job,
                job_execution_id,
                distributions,
                len(test_cases),
                executor,
                sync_case,
            )
        else:
            # 若不存在插件，则默认按脚本形式执行
            execute_without_plugin(job_execution_id, distributions)
        # 所有任务完成后，更新任务状态和执行状态
        finalize_execution(job_id, job_execution_id)
    except Exception as e:
        update_job(
            job_id,
            status=Status.BLOCK.value,
            percentage=100,
            detail=traceback.format_exc(),
            finished=datetime.now(),
        )
        if job_execution_id is not None:
            update_execution(
                job_execution_id, status=Status.BLOCK.value, finished=datetime.now()
            )
        raise  # Re-throw the exception after handling for further logging or debugging


@app.task
def execute_with_plugin(
    job, job_execution_id, distributions, total_cases, executor, sync_case
):
    plugin = os.path.join(settings.TARGET_SCRIPTS_ROOT, str(job_execution_id) + ".py")

    async_plugin_results = group(
        async_synchronize_directory.s(node, str(job.plugin), plugin)
        for node, cases in distributions.items()
    ).apply_async()
    while not async_plugin_results.ready():
        sleep(2)
    executor = executor if executor else job.executor
    testcases_results = group(
        run_testcase.s(
            node, plugin, testcase, executor, job.params, sync_case, job.policy
        )
        for node, node_testcases in distributions.items()
        for testcase in node_testcases
    ).apply_async()

    monitor_and_update_progress(testcases_results, job.id, total_cases)


@app.task
def execute_without_plugin(job_execution_id, distributions):
    for node, test_cases in distributions.items():
        local_path = os.path.join(settings.TARGET_SCRIPTS_ROOT, str(job_execution_id))
        remote_path = settings.TARGET_SCRIPTS_ROOT
        test_case_ids = [test_case[0] for test_case in test_cases]
        selected_testcases = JobLog.objects.filter(id__in=test_case_ids)
        build_pytest_project(local_path, selected_testcases)
        async_synchronize_directory(node, local_path, remote_path)
        execute_step_on_node(
            node, os.path.join(remote_path, str(job_execution_id), "main.py")
        )


@app.task
def run_testcase(
    node,
    plugin=None,
    testcase=None,
    executor=None,
    params={},
    sync_case=False,
    policy="0",
):
    joblog_id = testcase[0]
    try:
        update_execution_case(
            joblog_id,
            status=Status.RUNNING.value,
            run_time=datetime.now(),
            finished=None,
            executor=executor,
        )
        if node and plugin:
            teststeps = get_testcase_object(joblog_id, sync_case).steps
            if policy == "0":
                # 并发执行
                execute_steps_concurrent(joblog_id, node, plugin, params, teststeps)
            else:
                # 顺序执行
                execute_steps_sequential(joblog_id, node, plugin, params, teststeps)
    except Exception as e:
        if testcase:
            update_execution_case(
                joblog_id,
                status=Status.BLOCK.value,
                detail=traceback.format_exc(),
                finished=datetime.now(),
            )
        raise  # Re-raise exception after handling


@app.task
def run_step(joblog_id, node, plugin, params, step):
    if "exp" in step and step.get("exp"):
        try:
            step = execute_step_on_node(node, plugin, params, step)
            step = ast.literal_eval(step)
        except Exception as e:
            step["result"] = Result.BLOCK.value
            if "detail" not in step:
                step["detail"] = ""
            step["detail"] += str(e) + "\n" + traceback.format_exc()
    else:
        step["result"] = Result.BLOCK.value
        if "detail" not in step:
            step["detail"] = ""
        step["detail"] += "期望值为空！"
    update_execution_step(joblog_id, step)


@app.task
def execute_steps_concurrent(joblog_id, node, plugin, params, steps):
    """并发执行并更新执行结果"""
    step_group = group(
        run_step.s(joblog_id, node, plugin, params, step) for step in steps
    )
    callback = ignore_results_and_update_status.s(joblog_id)
    chord(step_group)(callback)


@app.task
def execute_steps_sequential(joblog_id, node, plugin, params, steps):
    """顺序执行并更新执行结果"""
    for i, step in enumerate(steps):
        run_step(joblog_id, node, plugin, params, step)
    update_joblog_status_from_result(joblog_id)


@app.task
def ignore_results_and_update_status(_, joblog_id):
    return update_joblog_status_from_result(joblog_id)


@app.task
def async_synchronize_directory(node, local_path, remote_path):
    """使用 rsync 将目录同步复制到远程位置"""
    # 安全地构建命令
    rsync_command = [
        "rsync",
        "-avz",
        f"{local_path}",
        f"{node[1]}@{node[0]}:{remote_path}",
    ]
    # 不使用 shell=True 安全地执行 rsync 命令
    subprocess.run(rsync_command, check=True)


@app.task
def monitor_and_update_progress(testcases_results, job_id, total_cases):
    while not testcases_results.ready():
        sleep(2)
        # 统计执行完成的用例个数
        completed = sum(result.ready() for result in testcases_results.results)
        # 更新任务进度
        percentage = (completed / total_cases) * 100
        update_job(job_id, percentage=percentage)
