# Copyright 2024 CHINA MERCHANTS BANK CO., LTD.
# Copyright 2024 Huawei Technologies Co., Ltd
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

"""
1. 解决examples/grpo/qwen_grpo_tutorial/main.py多机训练获取ma环境master ip问题, 此脚本自动获取，客户无需感知
2. 解决examples/grpo/qwen_grpo_tutorial/main.py在ma平台训练结束后挂起, 无法退出的问题
3. 支持自动读取并行策略等参数并打印, 以便启动训练时再次check
4. 兼容examples/grpo/qwen_grpo_tutorial/main.py入参
"""

import os
import time
import psutil
import socket
import re
import sys
import argparse
import yaml
import random
from easydict import EasyDict
from mindformers.tools.utils import str2bool


def cmd_exec(cmd, just_print=False):
    if not just_print:
        os.system(cmd)


def get_matching_process():
    """
    获取运行 python run_mindformer.py的进程列表
    """
    processes = []
    # 遍历本地计算机上所有正在运行的进程，并为每一个进程创建一个实例
    for proc in psutil.process_iter(["pid", "name", "cmdline"]):
        try:
            print(f'proc.info["cmdline"]:{proc.info["cmdline"]}', flush=True)
            # 检查命令行是否包含特定关键字
            mindrlhf_path = str(os.getenv("MINDRLHF_PATH", "/home/ma-user/cmb/mindrlhf"))
            cmd = f"{mindrlhf_path}/examples/grpo/qwen_grpo_tutorial/main.py"
            if proc.info["cmdline"] and "python" in proc.info["cmdline"] and cmd in proc.info["cmdline"]:
                processes.append(proc)
        except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
            pass
    return processes


def _monitor_process(process_status):
    for pid, status in list(process_status.items()):
        try:
            proc = psutil.Process(pid)
            if proc.is_running():
                continue  # 进程仍在运行
            else:
                exit_code = proc.wait()  # 等待进程退出
                process_status[pid] = {"status": "exited", "exit_code": exit_code}
                print(f"Process {pid} exited with code {exit_code}")
        except (psutil.NoSuchProcess, psutil.ZombieProcess):
            # 进程已不存在或已僵死
            process_status[pid] = {"status": "exited", "exit_code": None}
        # 移除已退出的进程
        process_status = {pid: status for pid, status in process_status.items() if status["status"] == "running"}
    if process_status:
        return True
    return False


def monitor_process(processes, filename, monitor_time=1):
    """
    监控进程是否正常退出
    """
    error_pattern = re.compile("Training Over")
    exit_code = 1
    process_status = {}
    for proc in processes:
        process_status[proc.pid] = {"status": "running", "exit_code": None}

    is_running = True
    print(f"Monitoring {filename}...", flush=True)
    try:
        f = open(filename, "r", encoding="utf-8")
        # 获软文件的初始 inode号, 用于检测文件是否被替换
        inode = os.stat(filename).st_ino
        f.seek(0, 2)  # 初始移动到文件未尾
        last_position = f.tell()
    except FileNotFoundError:
        print(f"Error: File {filename} not found.", flush=True)
    except KeyboardInterrupt:
        print("\nStopped monitoring.", flush=True)

    start_monitor_timestamp = time.time()
    while True:
        if not is_running:
            return exit_code

        # 检查文件是否被修改或者重写
        current_inode = os.stat(filename).st_ino
        if current_inode != inode and os.path.getsize(filename) < last_position:
            print("\nFile truncated. Re-opening...", flush=True)
            f.close()
            f = open(filename, "r", encoding="utf-8")
            f.seek(last_position)  # 调到上次读取结束的位置
            for line in f:
                print(line, end="", flush=True)
                last_position = f.tell()

        f.seek(last_position)
        line = f.readline()
        while line:
            print(line, end="")
            is_matched = error_pattern.search(line)
            if is_matched:
                exit_code = 0
            last_position = f.tell()
            line = f.readline()

        time.sleep(0.1)
        now_timestamp = time.time()
        if now_timestamp - start_monitor_timestamp >= monitor_time:
            is_running = _monitor_process(process_status)
            start_monitor_timestamp = now_timestamp


def read_yaml_file(yaml_file_fir):
    with open(yaml_file_fir, "r", encoding="utf-8") as file:
        return yaml.safe_load(file)


def run(args):
    worker_num = 0
    local_worker_num = 8
    bind_core_state = True
    if os.getenv("VC_WORKER_HOSTS"):
        ip_addr = os.getenv("VC_WORKER_HOSTS").split(',')
        ip_addr_list = []
        for i in ip_addr:
            host_addr_ip = socket.gethostbyname(i)
            ip_addr_list.append(host_addr_ip)

        master_addr = ip_addr_list[0]
    else:
        master_addr = "127.0.0.1"
    os.environ['MASTER_ADDRESS'] = master_addr
    node_rank = int(os.getenv("VC_TASK_INDEX", "0"))
    mindrlhf_path = str(os.getenv("MINDRLHF_PATH", "/home/ma-user/cmb/mindrlhf"))
    cmd = f"{mindrlhf_path}/examples/grpo/qwen_grpo_tutorial/main.py"
    generate_config = ''
    output_dir = "./output"
    yaml_dict = read_yaml_file(args.config)
    print(f'args.config:{args.config}', flush=True)
    print(f'yaml_dict:{yaml_dict}', flush=True)
    for key, value in args.items():
        if value is not None:
            if key == 'output_dir':
                output_dir = value
                continue
            cmd += f" --{key} {value}"

        if key == "config":
            data_parallel = int(yaml_dict['actor_config']["parallel_config"]["data_parallel"])
            model_parallel = int(yaml_dict['actor_config']["parallel_config"]["model_parallel"])
            pipeline_stage = int(yaml_dict['actor_config']["parallel_config"]["pipeline_stage"])
            worker_num = data_parallel * model_parallel * pipeline_stage
            if worker_num == 0:
                print(f"Error:worker_num = {worker_num}, please check yaml file {value} parallel_config",
                      flush=True)
                sys.exit(1)
            if worker_num >= 8:
                local_worker_num = 8
                bind_core_state = True
            else:
                local_worker_num = worker_num
                bind_core_state = False

            generate_config = yaml_dict['generate_config']["model_config"]
    yaml_dict['rl_config']['save_strategy_dir'] = os.path.join(output_dir, 'strategy')
    with open(args.config, 'w') as f:
        yaml.dump(yaml_dict, f, indent=2)

    HCCL_PORT = random.randint(30000, 60000)

    msrun_cmd = f'export MINDFORMERS_MODEL_CONFIG={generate_config} && '
    msrun_cmd += f'export PYTHONPATH=/home/ma-user/cmb/msadapter/mindtorch/:$PYTHONPATH && '
    msrun_cmd += f'export PYTHONPATH=$MINDRLHF_PATH:$PYTHONPATH && '
    msrun_cmd += f'export PYTHONPATH=$MINDFORMERS_PATH:$PYTHONPATH && '
    msrun_cmd += f'export ENABLE_CELL_REUSE=1 && '
    msrun_cmd += f'export ENABLE_LAZY_INLINE=1 && '
    msrun_cmd += f'export HCCL_IF_BASE_PORT={HCCL_PORT} && '
    msrun_cmd += f'unset RANK_TABLE_FILE; msrun '
    msrun_cmd += f'--worker_num={worker_num} '
    msrun_cmd += f'--bind_core={bind_core_state} '
    msrun_cmd += f'--local_worker_num={local_worker_num} '
    msrun_cmd += f'--master_addr={master_addr} '
    msrun_cmd += f'--master_port={40000} '
    msrun_cmd += f'--node_rank={node_rank} '
    msrun_cmd += f'--join={False} '
    msrun_cmd += f'--log_dir={output_dir}/msrun_log '
    msrun_cmd += f'--cluster_time_out={600} '
    msrun_cmd += f'{cmd}'
    print('msrun_cmd', msrun_cmd, flush=True)

    cmd_exec(msrun_cmd)
    log_node = str(node_rank * 8)

    time.sleep(10)
    mf_processes = get_matching_process()
    if not mf_processes:
        sys.exit(1)

    print("mf_processes: ", mf_processes, flush=True)
    filename = os.path.join(output_dir, "msrun_log", f"worker_{log_node}.log")
    exit_code = monitor_process(mf_processes, filename)
    print("Train Over, exit_code:", exit_code)
    sys.exit(exit_code)


if __name__ == "__main__":
    work_path = os.path.dirname(os.path.abspath(__file__))
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--config',
        default='examples/grpo/qwen_grpo_tutorial/grpo_config.yaml', type=str,
        help='YAML config files')
    parser.add_argument(
        '--tokenizer_dir', default=None, type=str,
        help='dataset directory of data loader to train/finetune.')
    parser.add_argument(
        '--dataset_file', default=None, type=str,
        help='dataset directory of data loader to train/finetune.')
    parser.add_argument(
        '--actor_checkpoint_path', default=None, type=str,
        help='load checkpoint to actor model')
    parser.add_argument(
        '--ref_checkpoint_path', default=None, type=str,
        help='load checkpoint to ref model')
    parser.add_argument(
        '--generate_checkpoint_path', default=None, type=str,
        help='load checkpoint to generate model')
    parser.add_argument(
        '--verifier_function', default="qwen_accuracy_reward,format_reward", type=str,
        help='load checkpoint to ref model')
    parser.add_argument(
        '--verifier_weight', default="1.0,1.0", type=str,
        help='load checkpoint to generate model')
    parser.add_argument(
        '--output_dir', default=None, type=str,
        help='output directory.')
    parser.add_argument(
        '--tensorboard', default=None, type=str2bool,
        help='whether dump tensorboard data')
    parser.add_argument(
        '--save_checkpoint_dir', default='./', type=str,
        help='whether dump tensorboard data')

    args_, rest_args_ = parser.parse_known_args()
    args_ = EasyDict(vars(args_))
    run(args_)
