# executor.py
"""
核心执行器
- 使用 ThreadPoolExecutor(max_workers=MAX_WORKERS)
- 对每次请求生成唯一 request_id，并把 request_id 写入所有日志中
- 保存用户 shell 到 scripts/<request_id>.sh，记录脚本内容到日志
- 执行脚本（bash script_path）并等待完成（超时 SCRIPT_TIMEOUT）
- 按 DELETE_POLICY 决定是否删除脚本
- 返回结构：{ request_id, success(bool), exit_code, stdout, stderr }
"""

import os
import time
import uuid
import subprocess
from concurrent.futures import ThreadPoolExecutor
import threading

import config
from logger_config import logger

# 线程池全局
EXECUTOR = ThreadPoolExecutor(max_workers=config.MAX_WORKERS)

SCRIPTS_DIR = config.SCRIPTS_DIR
os.makedirs(SCRIPTS_DIR, exist_ok=True)

# 限制返回输出大小，防止内存爆炸（可调整）
MAX_OUTPUT_CHARS = 200_000  # 200k chars

def _gen_request_id(user_id: str) -> str:
    return f"{int(time.time()*1000)}_{user_id}_{uuid.uuid4().hex[:8]}"

def _safe_write_file(path: str, content: str):
    with open(path, "w", encoding="utf-8") as f:
        f.write(content)

def _safe_remove(path: str):
    try:
        if os.path.exists(path):
            os.remove(path)
            logger.info(f"[CLEANUP] removed file {path}")
    except Exception as e:
        logger.warning(f"[CLEANUP] failed to remove {path}: {e}")

def _truncate_output(s: str) -> str:
    if s is None:
        return ""
    if len(s) > MAX_OUTPUT_CHARS:
        return s[:MAX_OUTPUT_CHARS] + "\n...output truncated..."
    return s

def _execute_script_blocking(script_path: str, timeout: int, request_id: str):
    """
    在 worker 线程中启动子进程，阻塞直到完成或超时。
    返回 (exit_code, stdout, stderr, timed_out_flag)
    """
    cmd = ["bash", script_path]

    logger.info(f"[{request_id}] [EXEC] running command: {' '.join(cmd)}")
    start_time = time.time()

    try:
        # use subprocess.Popen + communicate to capture output
        proc = subprocess.Popen(
            cmd,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            text=True,
            encoding="utf-8",
            errors="replace",
            cwd=SCRIPTS_DIR
        )

        try:
            stdout, stderr = proc.communicate(timeout=timeout)
            timed_out = False
        except subprocess.TimeoutExpired:
            # Timed out: try terminate then kill
            logger.error(f"[{request_id}] [TIMEOUT] process exceeded {timeout}s, terminating...")
            try:
                proc.terminate()
                try:
                    proc.wait(5)
                except Exception:
                    proc.kill()
            except Exception as kill_e:
                logger.exception(f"[{request_id}] error terminating process: {kill_e}")
            stdout, stderr = proc.communicate(timeout=5)
            timed_out = True

        exit_code = proc.returncode
        elapsed = time.time() - start_time
        logger.info(f"[{request_id}] [EXEC] finished exit_code={exit_code} elapsed={elapsed:.2f}s")

        return exit_code, stdout or "", stderr or "", timed_out

    except Exception as e:
        logger.exception(f"[{request_id}] [EXEC] exception running script: {e}")
        return -1, "", str(e), False


def execute_task(user_id: str, usap_id: str, shell_content: str):
    """
    这是提交给线程池的任务函数（在 worker 线程执行）：
    - 生成 request_id
    - 写文件并记录脚本内容日志
    - 调用执行函数并记录 stdout/stderr
    - 按策略删除脚本
    - 返回一个 dict 结果
    """
    request_id = _gen_request_id(user_id)
    script_filename = f"{request_id}.sh"
    script_path = os.path.join(SCRIPTS_DIR, script_filename)

    logger.info(f"[{request_id}] [START] user={user_id} usap_id={usap_id}")
    logger.info(f"[{request_id}] [SCRIPT_PATH] {script_path}")
    # log script content (careful: might be long)
    logger.info(f"[{request_id}] [SCRIPT_CONTENT_BEGIN]\n{shell_content}\n[SCRIPT_CONTENT_END]")

    # Save script (no modification) — the shell_content is executed as-is
    _safe_write_file(script_path, shell_content if shell_content.endswith("\n") else shell_content + "\n")
    try:
        os.chmod(script_path, 0o750)
    except Exception:
        pass

    # Execute
    exit_code, stdout, stderr, timed_out = _execute_script_blocking(script_path, config.SCRIPT_TIMEOUT, request_id)

    # Truncate large outputs before returning or including in logs
    stdout_t = _truncate_output(stdout)
    stderr_t = _truncate_output(stderr)

    # Log outputs with request id
    if stdout_t:
        logger.info(f"[{request_id}] [STDOUT]\n{stdout_t}")
    if stderr_t:
        logger.error(f"[{request_id}] [STDERR]\n{stderr_t}")

    success = (exit_code == 0 and not timed_out)

    # Delete policy
    try:
        if config.DELETE_POLICY == "always":
            _safe_remove(script_path)
        elif config.DELETE_POLICY == "on_success" and success:
            _safe_remove(script_path)
        elif config.DELETE_POLICY == "never":
            logger.info(f"[{request_id}] [KEEP] delete policy = never, keeping script.")
        else:
            # if other unknown policy, keep for safety
            logger.info(f"[{request_id}] [KEEP] delete policy = {config.DELETE_POLICY}, keeping script.")
    except Exception as e:
        logger.warning(f"[{request_id}] error during deletion policy handling: {e}")

    result = {
        "request_id": request_id,
        "success": success,
        "exit_code": exit_code,
        "timed_out": timed_out,
        "stdout": stdout_t,
        "stderr": stderr_t,
    }
    logger.info(f"[{request_id}] [END] success={success} exit_code={exit_code} timed_out={timed_out}")
    return result


def submit_task(user_id: str, usap_id: str, shell_content: str):
    """
    提交任务到线程池并返回 Future。
    主线程可以 future.result() 阻塞等待执行完成。
    """
    return EXECUTOR.submit(execute_task, user_id, usap_id, shell_content)
