import subprocess
import sys
import os
import time
import signal
import shutil
import glob
import select  # <--- IMPORT THE SELECT MODULE

# ================== 配置区域 ==================
# ... (All your configurations remain the same) ...
# 1. Python解释器路径
PYTHON_EXECUTABLE = sys.executable

TARGET_SCRIPT_MODULE = "verl.trainer.main_ppo"

DATASETS = {
    "lmsys": {
        "train": "/data/zhujunjie/datasets/convert_lmsys_80/train.parquet",
        "val": "/data/zhujunjie/datasets/convert_lmsys_80/test.parquet"
    },
    #"gsm8k": {
    #    "train": "/data/zhujunjie/datasets/convert_gsm8k/train.parquet",
    #    "val": "/data/zhujunjie/datasets/convert_gsm8k/test.parquet"
    #}
}

BATCH_SIZES = [128]
PORTS_TO_CLEAR = [5555, 5556] 

TEST_CONFIGURATIONS = {
    #"merge": {
    #    "name": "merge",
    #    "args": {
    #        "+actor_rollout_ref.rollout.migration_threshold": 120.0,
    #        "+actor_rollout_ref.rollout.enable_dynamic_spec": "True",
    #    }
    #},
    #"dynamic": {
    #    "name": "dynamic",
    #    "args": {
    #        "+actor_rollout_ref.rollout.migration_threshold": 150000.0,
    #        "+actor_rollout_ref.rollout.enable_dynamic_spec": "True",
    #    }
    #},
    #"spec_baseline": {
    #    "name": "spec_baseline",
    #    "args": {
    #        "+actor_rollout_ref.rollout.migration_threshold": 150000.0,
    #        "+actor_rollout_ref.rollout.enable_dynamic_spec": "False",
    #    }
    #},
    "migration": {
        "name": "migration",
        "args": {
            "+actor_rollout_ref.rollout.migration_threshold": 100.0,
            "+actor_rollout_ref.rollout.enable_dynamic_spec": "False",
        }
    }
}

LOG_OUTPUT_DIR = "/verl/experiment_logs/grid_search_results_test_thoughputs_spec_baseline"

BASE_COMMAND_ARGS = [
    'data.max_prompt_length=1024',
    'data.max_response_length=2048',
    'data.truncation=right',
    'actor_rollout_ref.rollout.name=sglang',
    'actor_rollout_ref.model.path=/data/zhujunjie/models/Llama-3.1-8B-Instruct',
    'actor_rollout_ref.actor.optim.lr=1e-6',
    'actor_rollout_ref.actor.use_dynamic_bsz=True',
    'actor_rollout_ref.model.enable_gradient_checkpointing=True',
    'actor_rollout_ref.model.use_remove_padding=True',
    'critic.model.use_remove_padding=True',
    'actor_rollout_ref.actor.fsdp_config.param_offload=True',
    'actor_rollout_ref.actor.fsdp_config.optimizer_offload=True',
    'actor_rollout_ref.rollout.tensor_model_parallel_size=2',
    'actor_rollout_ref.rollout.gpu_memory_utilization=0.32',
    'actor_rollout_ref.rollout.dtype=float16',
    'actor_rollout_ref.rollout.load_format=auto',
    'actor_rollout_ref.rollout.temperature=0.0',
    '+actor_rollout_ref.rollout.speculative_algorithm=EAGLE3',
    '+actor_rollout_ref.rollout.speculative_draft_model_path=/data/zhujunjie/models/sglang-EAGLE3-Llama-3.1-Instruct-8B',
    '+actor_rollout_ref.rollout.speculative_num_steps=4',
    '+actor_rollout_ref.rollout.speculative_eagle_topk=6',
    '+actor_rollout_ref.rollout.speculative_num_draft_tokens=32',
    'critic.optim.lr=1e-5',
    'critic.model.path=/data/zhujunjie/models/Llama-3.1-8B-Instruct',
    'critic.model.fsdp_config.param_offload=True',
    'critic.model.fsdp_config.optimizer_offload=True',
    'algorithm.kl_ctrl.kl_coef=0.001',
    'trainer.logger=[\'console\']',
    'trainer.val_before_train=False',
    'trainer.default_hdfs_dir=null',
    'trainer.total_training_steps=6',
    'trainer.n_gpus_per_node=8',
    'trainer.nnodes=1',
    'trainer.save_freq=-1',
    'trainer.test_freq=10000',
    'trainer.total_epochs=1'
]

STALL_DETECTION_TIMEOUT = 90
GRACE_PERIOD_SECONDS = 360
RETRY_DELAY_SECONDS = 10
INITIAL_START_STEP = 0
false_sample_list = []
# ================== 配置结束 ==================

class StallTimeoutError(Exception):
    pass

# ... (find_command, get_log_mtime, get_resume_start_step, kill_gpu_python_processes functions are unchanged) ...
def find_command(cmd):
    return shutil.which(cmd) is not None

def get_log_mtime(log_path):
    if not os.path.exists(log_path):
        return 0
    try:
        return os.path.getmtime(log_path)
    except OSError:
        return 0

def get_resume_start_step(log_path):

    if not os.path.exists(log_path) or os.path.getsize(log_path) == 0:
        print(f"[*] 日志 '{os.path.basename(log_path)}' 不存在或为空。将使用初始 start_step: {INITIAL_START_STEP}")
        return INITIAL_START_STEP

    last_valid_step = None
    try:
        with open(log_path, 'r', encoding='utf-8') as f:
            lines = [line for line in f.read().splitlines() if line.strip() and not line.strip().startswith("step_num")]
        
        if not lines:
             print(f"[*] 日志 '{os.path.basename(log_path)}' 中没有有效的数据行。将使用初始 start_step: {INITIAL_START_STEP}")
             return INITIAL_START_STEP

        last_line = lines[-1]
        step_num_str = last_line.strip().split(',')[0]
        last_valid_step = int(step_num_str)
        resume_step = max(0, last_valid_step + 2)
        print(f"[*] 从日志 '{os.path.basename(log_path)}' 中恢复进度。最后一个step为 {last_valid_step}，将设置 start_step={resume_step} 以进行warm-up。")
        return resume_step

    except (ValueError, IndexError) as e:
        print(f" [!] 警告: 解析日志 '{os.path.basename(log_path)}' 最后一行时出错: {e}。将使用初始 start_step: {INITIAL_START_STEP}")
        return INITIAL_START_STEP
    except Exception as e:
        print(f" [!] 警告: 读取日志 '{log_path}' 时出错: {e}。将使用初始 start_step: {INITIAL_START_STEP}")
        return INITIAL_START_STEP


def kill_gpu_python_processes():
    if not find_command("lsof"):
        print("\n" + "!"*20); print(" [!] 错误: 未找到 'lsof' 命令。无法清理GPU进程。"); sys.exit(1)
    
    print("\n" + "="*20); print(" [!] 正在使用 lsof 清理所有Python GPU进程...")
    nvidia_devices = glob.glob('/dev/nvidia[0-9]*')
    if not nvidia_devices:
        print(" [*] 未在 /dev/ 下找到 nvidia 设备。"); print("="*20 + "\n"); return
    
    pids_to_kill = set()
    try:
        proc = subprocess.run(["lsof"] + nvidia_devices, capture_output=True, text=True, check=False)
        if proc.returncode > 1 and proc.stderr: 
            print(f" [!] lsof 命令执行可能出错: {proc.stderr.strip()}")
        
        for line in proc.stdout.strip().split('\n'):
            parts = line.split()
            if len(parts) >= 2 and parts[0].lower().startswith("python"): 
                try:
                    pids_to_kill.add(parts[1])
                except (ValueError, IndexError):
                    continue

    except Exception as e: 
        print(f" [!] 解析lsof输出时出错: {e}"); return

    if not pids_to_kill: 
        print(" [*] 没有找到正在使用GPU的Python进程。"); print("="*20 + "\n"); return

    print(f" [*] 发现Python GPU进程PID: {', '.join(pids_to_kill)}"); print(" [*] 正在发送 SIGKILL 信号...")
    try:
        subprocess.run(["kill", "-9"] + list(pids_to_kill), check=True)
        print(" [*] GPU上的Python进程清理完成。")
    except Exception as e: 
        print(f" [!] 执行 kill 命令时出错: {e}")
    
    print("="*20 + "\n"); time.sleep(2)

def kill_processes_on_ports(ports_to_clear):
    """
    Finds and kills any processes listening on the specified TCP ports.
    """
    if not find_command("lsof"):
        print("\n" + "!"*20); print(" [!] 错误: 未找到 'lsof' 命令。无法清理网络端口。"); return

    print(f"\n" + "="*20 + f" [!] 正在清理端口: {ports_to_clear} " + "="*20)
    pids_to_kill = set()
    for port in ports_to_clear:
        try:
            # The '-ti' flags tell lsof to give us only the PID (t) for network files (i)
            command = ["lsof", "-ti", f"tcp:{port}"]
            proc = subprocess.run(command, capture_output=True, text=True, check=False)
            
            # If a process is found, its PID will be in stdout
            if proc.stdout:
                found_pids = proc.stdout.strip().split('\n')
                pids_to_kill.update(p for p in found_pids if p.isdigit())

        except Exception as e:
            print(f" [!] 在检查端口 {port} 时发生错误: {e}")
    
    if not pids_to_kill:
        print(" [*] 未在指定端口上找到活动的进程。")
        print("="*40 + "\n")
        return

    print(f" [*] 发现占用端口的进程PID: {', '.join(pids_to_kill)}")
    print(" [*] 正在发送 SIGKILL 信号...")
    try:
        subprocess.run(["kill", "-9"] + list(pids_to_kill), check=True)
        print(" [*] 端口清理完成。")
    except Exception as e:
        print(f" [!] 执行 kill 命令时出错: {e}")
    
    print("="*40 + "\n"); time.sleep(2)


def run_single_test(command_list, log_path):
    """
    运行单个测试任务，包含完整的监控、重试和错误处理逻辑。
    (This is the NEW, corrected version)
    """
    FALSE_CODE = False
    retry_count = 0
    test_start_step = 0
    while True:
        retry_count += 1
        
        start_step = get_resume_start_step(log_path)
        start_step = 4
        test_start_step = max(test_start_step + 1, start_step)
        final_command = list(command_list)
        final_command.append(f'+actor_rollout_ref.rollout.start_step={test_start_step}')
        final_command.append(f'+actor_rollout_ref.rollout.log_file={log_path}')
        
        env = os.environ.copy()
        env["SGL_DISABLE_TP_MEMORY_INBALANCE_CHECK"] = "True"
        env["PYTHONUNBUFFERED"] = "1"
        
        print("\n" + "#"*80)
        print(f"### [尝试 #{retry_count}] 准备启动任务...")
        print("### 执行命令: \n" + " \\\n    ".join(final_command))
        print("#"*80)
        
        process_args = {
            "stdout": subprocess.PIPE, "stderr": subprocess.STDOUT, "text": True,
            "encoding": 'utf-8', "bufsize": 1, "preexec_fn": os.setsid, "env": env
        }
        process = subprocess.Popen(final_command, **process_args)

        start_run_time = time.time()

        try:
            last_log_mtime = get_log_mtime(log_path)
            last_activity_time = time.time()

            # ================== NEW ROBUST MONITORING LOOP ==================
            # This loop combines the best of both approaches:
            # 1. `while True:` ensures the loop continues even in weird zombie states.
            # 2. `select` ensures we never block on reading output.
            # 3. `process.poll()` provides a clean exit condition.
            while True:
                # Check 1: Has the process terminated? If so, exit the monitoring loop.
                if process.poll() is not None:
                    break

                # Check 2: Is there output to read (non-blocking)?
                ready_to_read, _, _ = select.select([process.stdout], [], [], 0.1)
                if ready_to_read:
                    line = process.stdout.readline()
                    if line:
                        sys.stdout.write(line)
                        sys.stdout.flush()

                # Check 3: Is the process stalled? (based on log file activity)
                # This check now runs reliably on every iteration.
                current_mtime = get_log_mtime(log_path)
                if current_mtime > last_log_mtime:
                    last_activity_time = time.time()
                    last_log_mtime = current_mtime
                
                if (time.time() - last_activity_time > STALL_DETECTION_TIMEOUT and
                    time.time() - start_run_time > GRACE_PERIOD_SECONDS):
                    raise StallTimeoutError(f"日志文件 '{os.path.basename(log_path)}' 已超过 {STALL_DETECTION_TIMEOUT} 秒未更新。")
            
            # =================== END OF MONITORING LOOP ===================

            # After loop exit, drain any final buffered output
            for line in process.stdout.readlines():
                sys.stdout.write(line)
                sys.stdout.flush()

            # Check final return code
            return_code = process.poll()
            if return_code == 0:
                print("\n" + "-"*60)
                print(f"### [成功] 任务 {os.path.basename(log_path)} 已成功完成。")
                print("-" * 60)
                return True
            else:
                FALSE_CODE = True
                print("\n" + "!"*60)
                print(f"### [错误] 任务 {os.path.basename(log_path)} 以错误码 {return_code} 退出。")
                print("!"*60)
                kill_gpu_python_processes()
                kill_processes_on_ports(PORTS_TO_CLEAR)
                print(f" [*] 等待 {RETRY_DELAY_SECONDS} 秒后重试...")
                time.sleep(RETRY_DELAY_SECONDS)
                # The 'while True' retry loop will now take over

        except StallTimeoutError as e:
            print("\n" + "!"*60)
            print(f"### [停滞超时] 任务 {os.path.basename(log_path)} 被检测到卡住。")
            print(f"### 原因: {e}")
            print("!"*60)
            try:
                # Kill the entire process group to ensure child-processes are killed
                os.killpg(os.getpgid(process.pid), signal.SIGKILL)
            except ProcessLookupError:
                pass # Process already dead
            kill_gpu_python_processes() # Extra cleanup for GPU processes
            kill_processes_on_ports(PORTS_TO_CLEAR)
            print(f" [*] 等待 {RETRY_DELAY_SECONDS} 秒后从断点处重试...")
            time.sleep(RETRY_DELAY_SECONDS)
            # The 'while True' retry loop will now take over

        except Exception as e:
            print(f"\n### [严重错误] 看门狗程序自身发生错误: {e}")
            kill_gpu_python_processes()
            kill_processes_on_ports(PORTS_TO_CLEAR)
            return False # Exit script on unrecoverable error

# ... (main function is unchanged) ...
def main():
    print("="*60)
    print("=== Verl 网格化测试启动器 (带停滞检测与断点续传) ===")
    print(f"测试模块: {TARGET_SCRIPT_MODULE}")
    print(f"数据集: {list(DATASETS.keys())}")
    print(f"批处理大小: {BATCH_SIZES}")
    print(f"测试配置: {list(TEST_CONFIGURATIONS.keys())}")
    print("="*60)

    os.makedirs(LOG_OUTPUT_DIR, exist_ok=True)
    print(f"[*] 日志将输出到: {LOG_OUTPUT_DIR}")

    for dataset_name, dataset_paths in DATASETS.items():
        for bs in BATCH_SIZES:
            for test_name, config_details in TEST_CONFIGURATIONS.items():
                
                print(f"\n\n{'='*20} 准备测试: [Dataset: {dataset_name}] | [Batch Size: {bs}] | [Type: {test_name}] {'='*20}")
                
                if bs % 8 != 0:
                    print(f" [!] 警告: Batch size {bs} 不能被8整除，micro_batch_size 将被截断。")
                micro_bs = bs // 8
                
                log_filename = f"{dataset_name}_{test_name}_bs{bs}.txt"
                log_path = os.path.join(LOG_OUTPUT_DIR, log_filename)

                current_command = [PYTHON_EXECUTABLE, "-m", TARGET_SCRIPT_MODULE]
                current_command.extend(BASE_COMMAND_ARGS)
                
                current_command.extend([
                    f"data.train_files={dataset_paths['train']}",
                    f"data.val_files={dataset_paths['val']}",
                    f"data.train_batch_size={bs}",
                    f"actor_rollout_ref.actor.ppo_mini_batch_size={bs}",
                    f"actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu={micro_bs}",
                    f"actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu={micro_bs}",
                    f"+actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu={micro_bs}",
                    f"critic.ppo_micro_batch_size_per_gpu={micro_bs}"
                ])
                
                for arg, value in config_details['args'].items():
                    current_command.append(f"{arg}={value}")
               
                start_step = get_resume_start_step(log_path) + 1

                if start_step >= 52:
                    continue
                
                success = run_single_test(
                    command_list=current_command,
                    log_path=log_path
                )

                if not success:
                    print(f"\n[!!!] 任务 {log_filename} 最终失败，无法继续。测试套件终止。")
                    return

    print("\n" + "*"*60)
    print("***** 所有网格化测试任务已处理完毕 *****")
    print("*"*60)
    with open("/verl/outputs/false_samples.txt", "a") as f:
        f.write(f"{false_sample_list}")

if __name__ == "__main__":
    kill_gpu_python_processes()
    kill_processes_on_ports(PORTS_TO_CLEAR)
    main()
