# Copyright 2024 CHINA MERCHANTS BANK CO., LTD.
# Copyright 2024 Huawei Technologies Co., Ltd
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

"""
1. 解决run_mindformers.py多机训练获取ma环境master ip问题, 此脚本自动获取，客户无需感知
2. 解决run_mindformers.py在ma平台训练结束后挂起, 无法退出的问题
3. 支持自动读取并行策略等参数并打印, 以便启动训练时再次check
4. 兼容run_mindformers.py入参
"""

import os
import time
import psutil
import socket
import re
import sys
import argparse
import yaml
import random
from easydict import EasyDict
from mindformers.tools.utils import str2bool


def cmd_exec(cmd, just_print=False):
    if not just_print:
        os.system(cmd)


def get_matching_process():
    """
    获取运行 python run_mindformer.py的进程列表
    """
    processes = []
    # 遍历本地计算机上所有正在运行的进程，并为每一个进程创建一个实例
    for proc in psutil.process_iter(["pid", "name", "cmdline"]):
        try:
            # 检查命令行是否包含特定关键字
            cmd = str(os.environ.get('MINDFORMERS_PATH', "/home/ma-user/cmb/mindformers/run_mindformer.py"))
            print(f'proc.info["cmdline"]:{proc.info["cmdline"]}', flush=True)
            if proc.info["cmdline"] and "python" in proc.info["cmdline"] and cmd in proc.info["cmdline"]:
                processes.append(proc)
        except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
            pass
    return processes


def _monitor_process(process_status):
    for pid, status in list(process_status.items()):
        try:
            proc = psutil.Process(pid)
            if proc.is_running():
                continue  # 进程仍在运行
            else:
                exit_code = proc.wait()  # 等待进程退出
                process_status[pid] = {"status": "exited", "exit_code": exit_code}
                print(f"Process {pid} exited with code {exit_code}")
        except (psutil.NoSuchProcess, psutil.ZombieProcess):
            # 进程已不存在或已僵死
            process_status[pid] = {"status": "exited", "exit_code": None}
        # 移除已退出的进程
        process_status = {pid: status for pid, status in process_status.items() if status["status"] == "running"}
    if process_status:
        return True
    return False


def monitor_process(processes, filename, monitor_time=1):
    """
    监控进程是否正常退出
    """
    error_pattern = re.compile("Training Over")
    exit_code = 1
    process_status = {}
    for proc in processes:
        process_status[proc.pid] = {"status": "running", "exit_code": None}

    is_running = True
    print(f"Monitoring {filename}...", flush=True)
    try:
        f = open(filename, "r", encoding="utf-8")
        # 获软文件的初始 inode号, 用于检测文件是否被替换
        inode = os.stat(filename).st_ino
        f.seek(0, 2)  # 初始移动到文件末尾
        last_position = f.tell()
    except FileNotFoundError:
        print(f"Error: File {filename} not found.", flush=True)
    except KeyboardInterrupt:
        print("\nStopped monitoring.", flush=True)

    start_monitor_timestamp = time.time()
    while True:
        if not is_running:
            return exit_code

        # 检查文件是否被修改或者重写
        current_inode = os.stat(filename).st_ino
        if current_inode != inode and os.path.getsize(filename) < last_position:
            print("\nFile truncated. Re-opening...", flush=True)
            f.close()
            f = open(filename, "r", encoding="utf-8")
            f.seek(last_position)  # 调到上次读取结束的位置
            for line in f:
                print(line, end="", flush=True)
                last_position = f.tell()

        f.seek(last_position)
        line = f.readline()
        while line:
            print(line, end="")
            is_matched = error_pattern.search(line)
            if is_matched:
                exit_code = 0
            last_position = f.tell()
            line = f.readline()

        time.sleep(0.1)
        now_timestamp = time.time()
        if now_timestamp - start_monitor_timestamp >= monitor_time:
            is_running = _monitor_process(process_status)
            start_monitor_timestamp = now_timestamp


def read_yaml_file(yaml_file_fir):
    with open(yaml_file_fir, "r", encoding="utf-8") as file:
        return yaml.safe_load(file)


def generate_index_file(safetensors_dir):
    import json
    import numpy as np
    import mindspore as ms
    file_list = os.listdir(safetensors_dir)
    param_dict = {}
    file_name_dict = {}
    for file in file_list:
        if file.endswith('.safetensors'):
            file_name = os.path.join(safetensors_dir, file)
            local_param_dict = ms.load_checkpoint(file_name, format='safetensors')
            for key in local_param_dict:
                param_dict[key] = local_param_dict[key]
                file_name_dict[key] = file
    total_size = 0
    for key in param_dict:
        if param_dict[key].dtype in (ms.float16, ms.bfloat16):
            total_size += 2 * np.prod(param_dict[key].shape)
        elif param_dict[key].dtype in (ms.float32,):
            total_size += 4 * np.prod(param_dict[key].shape)
        elif param_dict[key].dtype in (ms.int8, ms.uint8):
            total_size += 1 * np.prod(param_dict[key].shape)
    index_file = os.path.join(safetensors_dir, 'model.safetensors.index.json')
    index_dict = {'metadata': {'total_size': str(total_size)}, 'weight_map': {}}
    for key in param_dict:
        index_dict['weight_map'][key] = file_name_dict[key]
    with open(index_file, 'w') as f:
        json.dump(index_dict, f, indent=2)


def run(args):
    worker_num = 0
    local_worker_num = 8
    bind_core_state = True
    if os.getenv("VC_WORKER_HOSTS"):
        ip_addr = os.getenv("VC_WORKER_HOSTS").split(',')
        ip_addr_list = []
        for i in ip_addr:
            host_addr_ip = socket.gethostbyname(i)
            ip_addr_list.append(host_addr_ip)

        master_addr = ip_addr_list[0]
    else:
        master_addr = "127.0.0.1"
    os.environ['MASTER_ADDRESS'] = master_addr
    node_rank = int(os.getenv("VC_TASK_INDEX", "0"))
    cmd = str(os.environ.get("MINDFORMERS_PATH", "/home/ma-user/cmb/mindformers/run_mindformer.py"))
    yaml_file_fir = args.config
    yaml_dict = read_yaml_file(yaml_file_fir)
    for key, value in args.items():
        if value is not None:
            cmd += f" --{key} {value}"

        if key == "config":
            data_parallel = int(yaml_dict["parallel_config"]["data_parallel"])
            model_parallel = int(yaml_dict["parallel_config"]["model_parallel"])
            pipeline_stage = int(yaml_dict["parallel_config"]["pipeline_stage"])
            worker_num = data_parallel * model_parallel * pipeline_stage
            if worker_num == 0:
                print(f"Error:worker_num = {worker_num}, please check yaml file {yaml_file_fir} parallel_config",
                      flush=True)
                sys.exit(1)
            if worker_num >= 8:
                local_worker_num = 8
                bind_core_state = True
            else:
                local_worker_num = worker_num
                bind_core_state = False

            print('data_parallel', data_parallel, flush=True)
            print('model_parallel', model_parallel, flush=True)
            print('pipeline_stage', pipeline_stage, flush=True)

        if key == 'load_checkpoint' and value is not None:
            index_file = os.path.join(value, 'model.safetensors.index.json')
            if not os.path.exists(index_file):
                generate_index_file(value)

    HCCL_PORT = random.randint(30000, 60000)
    print('worker_num', worker_num, flush=True)
    print('master_addr', master_addr, flush=True)
    print('node_rank', node_rank, flush=True)
    print('cmd', cmd, flush=True)
    os.environ["LOCAL_DEFAULT_PATH"] = args.output_dir if args.output_dir else "./output"
    LOCAL_DEFAULT_PATH = os.getenv("LOCAL_DEFAULT_PATH", './output')

    msrun_cmd = f'export ENABLE_CELL_REUSE=1 && '
    msrun_cmd += f'export ENABLE_LAZY_INLINE=1 && '
    msrun_cmd += f'export HCCL_IF_BASE_PORT={HCCL_PORT} && '
    if "runner_config" in yaml_dict and \
            "gradient_accumulation_steps" in yaml_dict['runner_config'] and \
            yaml_dict['runner_config']["gradient_accumulation_steps"] > 1:
            msrun_cmd += f'export ENABLE_LAZY_INLINE_NO_PIPELINE=1 && '
    msrun_cmd += f"unset RANK_TABLE_FILE; msrun "
    msrun_cmd += f"--worker_num={worker_num} "
    msrun_cmd += f"--bind_core={bind_core_state} "
    msrun_cmd += f"--local_worker_num={local_worker_num} "
    msrun_cmd += f"--master_port=40000 "
    msrun_cmd += f"--node_rank={node_rank} "
    msrun_cmd += f"--join=False "
    msrun_cmd += f"--log_dir={LOCAL_DEFAULT_PATH}/msrun_log "
    msrun_cmd += f"--cluster_time_out=600 {cmd}"
    print('msrun_cmd', msrun_cmd, flush=True)

    cmd_exec(msrun_cmd)
    log_node = str(node_rank * 8 + local_worker_num - 1)

    time.sleep(10)
    mf_processes = get_matching_process()
    if not mf_processes:
        print('please check cmdline in `get_matching_process`')
        sys.exit(1)

    print("mf_processes: ", mf_processes, flush=True)
    filename = os.path.join(LOCAL_DEFAULT_PATH, "msrun_log", f"worker_{log_node}.log")
    exit_code = monitor_process(mf_processes, filename)
    print("Train Over, exit_code:", exit_code)
    sys.exit(exit_code)


if __name__ == "__main__":
    work_path = os.path.dirname(os.path.abspath(__file__))
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--config',
        default='configs/mae/run_mae_vit_base_p16_224_800ep.yaml',
        required=True,
        help='YAML config files')
    parser.add_argument(
        '--train_dataset_dir', default=None, type=str, required=True,
        help='dataset directory of data loader to train/finetune.')
    parser.add_argument(
        '--load_checkpoint', default=None, type=str, required=False,
        help='load model checkpoint to pretrain/finetune')
    parser.add_argument(
        '--auto_trans_ckpt', default=None, type=str2bool, required=True,
        help='if true, auto transform load_checkpoint to load in distributed model.')
    parser.add_argument(
        '--output_dir', default=None, type=str, required=True,
        help='output directory.')
    parser.add_argument(
        '--seq_length', default=None, type=int, required=True,
        help='Please pass in the sequence length based on the length of the generated dataset.')
    parser.add_argument(
        '--enable_tensorboard', default=None, type=str2bool,
        help='whether dump tensorboard data, default is False.')
    parser.add_argument(
        '--only_save_strategy', default=False, type=str2bool,
        help='only save strategy.')

    args_, rest_args_ = parser.parse_known_args()
    args_ = EasyDict(vars(args_))
    run(args_)
