"""
Borrowed from verl.trainer.main_ppo.py
Note that we don't combine the main with ray_trainer as ray_trainer is used by other main.
"""

# 解决 Hugging Face tokenizers 的 fork 警告
import os
import socket

os.environ["TOKENIZERS_PARALLELISM"] = "false"
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:32,'
os.environ["GRPC_DEFAULT_AUTHORITY"] = "localhost"
os.environ['TORCH_CUDA_ARCH_LIST'] = "8.0"

import ray
import hydra
import torch
import numpy as np
from omegaconf import OmegaConf
import sys

from verl import DataProto
from verl.trainer.constants_ppo import get_ppo_ray_runtime_env
from verl.trainer.ppo.utils import need_critic, need_reference_policy
from verl.utils.config import validate_config
from verl.utils.device import is_cuda_available
from verl.utils.import_utils import load_extern_type

from ragen.trainer.agent_trainer import RayAgentTrainer
from ragen.utils import register_resolvers
register_resolvers()

class DummyRewardManager():
    """The reward manager.
    """

    def __init__(self, tokenizer, num_examine, compute_score=None) -> None:
        self.tokenizer = tokenizer
        self.num_examine = num_examine  # the number of batches of decoded responses to print to the console
        self.compute_score = compute_score

    def __call__(self, data: DataProto, return_dict=False):
        """We will expand this function gradually based on the available datasets"""

        # If there is rm score, we directly return rm score. Otherwise, we compute via rm_score_fn
        if 'rm_scores' in data.batch.keys():
            if return_dict:
                return {
                    "reward_tensor": data.batch['rm_scores'],
                }
            else:
                return data.batch['rm_scores']

        reward_tensor = torch.zeros_like(data.batch['responses'], dtype=torch.float32)

        all_scores = []

        already_print_data_sources = {}

        for i in range(len(data)):
            data_item = data[i]  # DataProtoItem

            prompt_ids = data_item.batch['prompts']

            prompt_length = prompt_ids.shape[-1]

            valid_prompt_length = data_item.batch['attention_mask'][:prompt_length].sum()
            valid_prompt_ids = prompt_ids[-valid_prompt_length:]

            response_ids = data_item.batch['responses']
            valid_response_length = data_item.batch['attention_mask'][prompt_length:].sum()
            valid_response_ids = response_ids[:valid_response_length]

            # decode
            sequences = torch.cat((valid_prompt_ids, valid_response_ids))
            sequences_str = self.tokenizer.decode(sequences)

            score = data_item.non_tensor_batch['reward']
            score = float(score)
 
            reward_tensor[i, valid_response_length - 1] = score
            all_scores.append(score)

            # Get data_source from data_item if available, otherwise use a default value
            data_source = data_item.non_tensor_batch.get('data_source', 'default')
            
            if data_source not in already_print_data_sources:
                already_print_data_sources[data_source] = 0

            if already_print_data_sources[data_source] < self.num_examine:
                already_print_data_sources[data_source] += 1
                print(sequences_str)
        
        print(f"[DEBUG] all_scores: {all_scores}")
        print(f"[DEBUG] all_scores shape: {np.array(all_scores).shape}")
        print(f"[DEBUG] all_scores mean: {np.mean(all_scores)}")
        print(f"[DEBUG] all_scores max: {np.max(all_scores)}")
        print(f"[DEBUG] all_scores min: {np.min(all_scores)}")
        print(f"[DEBUG] all_scores std: {np.std(all_scores)}")

        if return_dict:
            return {
                "reward_tensor": reward_tensor,
            }
        else:
            return reward_tensor

def get_custom_reward_fn(config):
    import importlib.util, os

    reward_fn_config = config.get("custom_reward_function") or {}
    file_path = reward_fn_config.get("path")
    if not file_path:
        return None

    if not os.path.exists(file_path):
        raise FileNotFoundError(f"Reward function file '{file_path}' not found.")

    spec = importlib.util.spec_from_file_location("custom_module", file_path)
    if spec is None:
        raise RuntimeError(f"Failed to create module spec from '{file_path}'")
        
    module = importlib.util.module_from_spec(spec)
    try:
        spec.loader.exec_module(module)
    except Exception as e:
        raise RuntimeError(f"Error loading module from '{file_path}': {e}")

    function_name = reward_fn_config.get("name")
    if not function_name:
        raise ValueError("Function name not specified in custom_reward_function config")

    if not hasattr(module, function_name):
        raise AttributeError(f"Reward function '{function_name}' not found in '{file_path}'.")

    print(f"using customized reward function '{function_name}' from '{file_path}'")

    return getattr(module, function_name)



def add_dependency_and_validate_config(config):

    # validate config
    assert config.micro_batch_size_per_gpu * config.trainer.n_gpus_per_node <= config.actor_rollout_ref.actor.ppo_mini_batch_size, \
        f"micro_batch_size_per_gpu * n_gpus_per_node ({config.micro_batch_size_per_gpu * config.trainer.n_gpus_per_node}) must be less than or equal to ppo_mini_batch_size ({config.actor_rollout_ref.actor.ppo_mini_batch_size})"
    assert config.actor_rollout_ref.actor.ppo_mini_batch_size % (config.micro_batch_size_per_gpu * config.trainer.n_gpus_per_node) == 0, \
        f"ppo_mini_batch_size ({config.actor_rollout_ref.actor.ppo_mini_batch_size}) must be divisible by micro_batch_size_per_gpu * n_gpus_per_node ({config.micro_batch_size_per_gpu * config.trainer.n_gpus_per_node})"
    assert len(str(config.system.CUDA_VISIBLE_DEVICES).split(',')) == config.trainer.n_gpus_per_node, \
        f"CUDA_VISIBLE_DEVICES ({config.system.CUDA_VISIBLE_DEVICES}) must have the same number of GPUs as n_gpus_per_node ({config.trainer.n_gpus_per_node})"
    ratio = getattr(config.agent_proxy, 'mid_turn_ratio', None)
    assert ratio is not None, "agent_proxy.mid_turn_ratio 必须设置，取值范围为 [0, 1)；0 表示从第一回合开始引入可选的 print_result 提示，不能等于 1"
    try:
        ratio = float(ratio)
    except Exception:
        raise AssertionError(f"agent_proxy.mid_turn_ratio 必须是数值类型，当前值为: {ratio}")
    assert 0 <= ratio <= 1, f"agent_proxy.mid_turn_ratio 必须位于 [0, 1] 区间，当前值为: {ratio}"

    # add dependency
    config.data.train_batch_size = config.es_manager.train.env_groups * config.es_manager.train.group_size


    return config


@hydra.main(version_base=None, config_path="config", config_name="base")
def main(config):
    config = add_dependency_and_validate_config(config)
    print(f"config: {config}")

    run_ppo(config)


def run_ppo(config) -> None:
    """Initialize Ray cluster and run distributed PPO training process.

    Args:
        config: Training configuration object containing all necessary parameters
                for distributed PPO training including Ray initialization settings,
                model paths, and training hyperparameters.
    """
    # 设置环境变量应该在Ray初始化之前
    os.environ["CUDA_VISIBLE_DEVICES"] = str(config.system.CUDA_VISIBLE_DEVICES)
    os.environ["ENSURE_CUDA_VISIBLE_DEVICES"] = os.environ.get('CUDA_VISIBLE_DEVICES', '')

    print(f"CUDA_VISIBLE_DEVICES: {os.environ['CUDA_VISIBLE_DEVICES']}")

    # 使用 try-finally 确保即使程序异常退出也能清理 Ray 资源
    try:
        # Check if Ray is not initialized
        if not ray.is_initialized():
            # Initialize Ray with a local cluster configuration
            # Set environment variables in the runtime environment to control tokenizer parallelism,
            # NCCL debug level, VLLM logging level, and allow runtime LoRA updating
            default_runtime_env = get_ppo_ray_runtime_env()
            
            # Get ray_kwargs from config, with fallback to empty dict
            ray_kwargs = getattr(config, 'ray_kwargs', {})
            ray_init_kwargs = ray_kwargs.get("ray_init", {}) if ray_kwargs else {}
            runtime_env_kwargs = ray_init_kwargs.get("runtime_env", {})
            
            # Merge with system-specific environment variables
            system_env_vars = {
                'TOKENIZERS_PARALLELISM': 'true',
                'NCCL_DEBUG': 'WARN',
                'VLLM_LOGGING_LEVEL': 'WARN',
                "RAY_DEBUG": "legacy",
                "CUDA_VISIBLE_DEVICES": str(config.system.CUDA_VISIBLE_DEVICES)
            }
            
            # Update default runtime env with system-specific vars
            default_runtime_env['env_vars'].update(system_env_vars)
            
            runtime_env = OmegaConf.merge(default_runtime_env, runtime_env_kwargs)
            ray_init_kwargs = OmegaConf.create({**ray_init_kwargs, "runtime_env": runtime_env})
            
            # Add default settings if not present
            if 'ignore_reinit_error' not in ray_init_kwargs:
                ray_init_kwargs['ignore_reinit_error'] = True
            if 'configure_logging' not in ray_init_kwargs:
                ray_init_kwargs['configure_logging'] = True
            if 'logging_level' not in ray_init_kwargs:
                ray_init_kwargs['logging_level'] = 'warning'
                
            print(f"ray init kwargs: {ray_init_kwargs}")
            ray.init(**OmegaConf.to_container(ray_init_kwargs))

        # Create a remote instance of the TaskRunner class, and
        # Execute the `run` method of the TaskRunner instance remotely and wait for it to complete
        if (
            is_cuda_available
            and hasattr(config, 'global_profiler')
            and config.global_profiler.tool == "nsys"
            and config.global_profiler.get("steps") is not None
            and len(config.global_profiler.get("steps", [])) > 0
        ):
            from verl.utils.import_utils import is_nvtx_available

            assert is_nvtx_available(), "nvtx is not available in CUDA platform. Please 'pip3 install nvtx'"
            nsight_options = OmegaConf.to_container(
                config.global_profiler.global_tool_config.nsys.controller_nsight_options
            )
            runner = TaskRunner.options(runtime_env={"nsight": nsight_options}).remote()
        else:
            runner = TaskRunner.remote()
        ray.get(runner.run.remote(config))

        # [Optional] get the path of the timeline trace file from the configuration, default to None
        # This file is used for performance analysis
        timeline_json_file = getattr(config, 'ray_kwargs', {}).get("timeline_json_file", None) if hasattr(config, 'ray_kwargs') else None
        if timeline_json_file:
            ray.timeline(filename=timeline_json_file)
    finally:
        # 确保 Ray 正确关闭，清理所有 worker 进程
        # 这对于防止程序异常退出时遗留 worker 进程非常重要
        # 注意：kill -9 等强制终止信号无法被捕获，这种情况下需要使用清理脚本
        if ray.is_initialized():
            try:
                ray.shutdown()
                print("Ray 已成功关闭")
            except Exception as e:
                print(f"关闭 Ray 时出现错误: {e}")
                print("提示: 如果仍有遗留的 worker 进程，请运行 scripts/cleanup_ray_workers.sh 进行清理")

@ray.remote(num_cpus=1)  # please make sure main_task is not scheduled on head
class TaskRunner:
    """Ray remote class for executing distributed PPO training tasks.

    This class encapsulates the main training logic and runs as a Ray remote actor
    to enable distributed execution across multiple nodes and GPUs.

    Attributes:
        role_worker_mapping: Dictionary mapping Role enums to Ray remote worker classes
        mapping: Dictionary mapping Role enums to resource pool IDs for GPU allocation
    """

    def __init__(self):
        self.role_worker_mapping = {}
        self.mapping = {}

    def add_actor_rollout_worker(self, config):
        """Add actor rollout worker based on the actor strategy."""
        from verl.single_controller.ray import RayWorkerGroup

        if config.actor_rollout_ref.actor.strategy in {"fsdp", "fsdp2"}:
            from ragen.workers.fsdp_workers import ActorRolloutRefWorker
            
            # Check for async rollout mode
            if hasattr(config.actor_rollout_ref, 'rollout') and hasattr(config.actor_rollout_ref.rollout, 'mode') and config.actor_rollout_ref.rollout.mode == "async":
                try:
                    from ragen.workers.fsdp_workers import AsyncActorRolloutRefWorker
                    actor_rollout_cls = AsyncActorRolloutRefWorker
                except ImportError:
                    print("AsyncActorRolloutRefWorker not available, using ActorRolloutRefWorker")
                    actor_rollout_cls = ActorRolloutRefWorker
            else:
                actor_rollout_cls = ActorRolloutRefWorker
            
            ray_worker_group_cls = RayWorkerGroup

        elif config.actor_rollout_ref.actor.strategy == "megatron":
            from verl.workers.megatron_workers import ActorRolloutRefWorker

            if hasattr(config.actor_rollout_ref, 'rollout') and hasattr(config.actor_rollout_ref.rollout, 'mode') and config.actor_rollout_ref.rollout.mode == "async":
                try:
                    from verl.workers.megatron_workers import AsyncActorRolloutRefWorker
                    actor_rollout_cls = AsyncActorRolloutRefWorker
                except ImportError:
                    print("AsyncActorRolloutRefWorker not available, using ActorRolloutRefWorker")
                    actor_rollout_cls = ActorRolloutRefWorker
            else:
                actor_rollout_cls = ActorRolloutRefWorker
            
            ray_worker_group_cls = RayWorkerGroup

        else:
            raise NotImplementedError

        from verl.trainer.ppo.ray_trainer import Role

        self.role_worker_mapping[Role.ActorRollout] = ray.remote(actor_rollout_cls)

        return actor_rollout_cls, ray_worker_group_cls

    def add_critic_worker(self, config):
        """Add critic worker to role mapping."""
        if config.critic.strategy in {"fsdp", "fsdp2"}:
            use_legacy_worker_impl = config.trainer.get("use_legacy_worker_impl", "auto")
            if use_legacy_worker_impl in ["auto", "enable"]:
                from ragen.workers.fsdp_workers import CriticWorker
            elif use_legacy_worker_impl == "disable":
                from verl.workers.roles import CriticWorker
                print("Using new worker implementation")
            else:
                raise ValueError(f"Invalid use_legacy_worker_impl: {use_legacy_worker_impl}")

        elif config.critic.strategy == "megatron":
            from verl.workers.megatron_workers import CriticWorker

        else:
            raise NotImplementedError

        from verl.trainer.ppo.ray_trainer import Role

        self.role_worker_mapping[Role.Critic] = ray.remote(CriticWorker)

    def init_resource_pool_mgr(self, config):
        """Initialize resource pool manager."""
        from verl.trainer.ppo.ray_trainer import Role

        global_pool_id = "global_pool"
        resource_pool_spec = {
            global_pool_id: [config.trainer.n_gpus_per_node] * config.trainer.nnodes,
        }
        
        # TODO Here you can use the new registration method to support dynamic registration of roles
        if hasattr(config, 'reward_model') and config.reward_model.get('enable_resource_pool', False):
            if config.reward_model.n_gpus_per_node <= 0:
                raise ValueError("config.reward_model.n_gpus_per_node must be greater than 0")
            if config.reward_model.nnodes <= 0:
                raise ValueError("config.reward_model.nnodes must be greater than 0")

            reward_pool = [config.reward_model.n_gpus_per_node] * config.reward_model.nnodes
            resource_pool_spec["reward_pool"] = reward_pool

        self.mapping[Role.ActorRollout] = global_pool_id
        self.mapping[Role.Critic] = global_pool_id
        from verl.trainer.ppo.ray_trainer import ResourcePoolManager

        resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=self.mapping)
        return resource_pool_manager

    def add_reward_model_worker(self, config):
        """Add reward model worker if enabled."""
        from verl.trainer.ppo.ray_trainer import Role

        if hasattr(config, 'reward_model') and config.reward_model.get('enable', False):
            use_legacy_worker_impl = config.trainer.get("use_legacy_worker_impl", "auto")
            if use_legacy_worker_impl in ["auto", "enable"]:
                if config.reward_model.strategy in {"fsdp", "fsdp2"}:
                    from ragen.workers.fsdp_workers import RewardModelWorker
                elif config.reward_model.strategy == "megatron":
                    from verl.workers.megatron_workers import RewardModelWorker
                else:
                    raise NotImplementedError
            elif use_legacy_worker_impl == "disable":
                from verl.workers.roles import RewardModelWorker
                print("Using new worker implementation")
            else:
                raise ValueError(f"Invalid use_legacy_worker_impl: {use_legacy_worker_impl}")

            self.role_worker_mapping[Role.RewardModel] = ray.remote(RewardModelWorker)
            if config.reward_model.get('enable_resource_pool', False):
                self.mapping[Role.RewardModel] = "reward_pool"
            else:
                self.mapping[Role.RewardModel] = "global_pool"

    def add_ref_policy_worker(self, config, ref_policy_cls):
        """Add reference policy worker if KL loss or KL reward is used."""
        from verl.trainer.ppo.ray_trainer import Role

        if (hasattr(config, 'algorithm') and config.algorithm.get('use_kl_in_reward', False)) or config.actor_rollout_ref.actor.get('use_kl_loss', False):
            self.role_worker_mapping[Role.RefPolicy] = ray.remote(ref_policy_cls)
            self.mapping[Role.RefPolicy] = "global_pool"

    def run(self, config):
        """Execute the main PPO training workflow.

        This method sets up the distributed training environment, initializes
        workers, datasets, and reward functions, then starts the training process.

        Args:
            config: Training configuration object containing all parameters needed
                   for setting up and running the PPO training process.
        """
        # Print the initial configuration. `resolve=True` will evaluate symbolic values.
        from pprint import pprint
        from omegaconf import OmegaConf
        from verl.utils.fs import copy_to_local

        print(f"TaskRunner hostname: {socket.gethostname()}, PID: {os.getpid()}")
        pprint(OmegaConf.to_container(config, resolve=True))
        OmegaConf.resolve(config)

        actor_rollout_cls, ray_worker_group_cls = self.add_actor_rollout_worker(config)
        self.add_critic_worker(config)

        # We should adopt a multi-source reward function here:
        # - for rule-based rm, we directly call a reward score
        # - for model-based rm, we call a model
        # - for code related prompt, we send to a sandbox if there are test cases
        # finally, we combine all the rewards together
        # The reward type depends on the tag of the data
        self.add_reward_model_worker(config)

        # Add a reference policy worker if KL loss or KL reward is used.
        self.add_ref_policy_worker(config, actor_rollout_cls)

        # Handle legacy ref policy configuration for backward compatibility
        if hasattr(config.actor_rollout_ref.actor, 'use_ref') and config.actor_rollout_ref.actor.use_ref:
            print("[DEBUG] using ref policy (legacy config)")
            from verl.trainer.ppo.ray_trainer import Role
            if Role.RefPolicy not in self.role_worker_mapping:
                self.role_worker_mapping[Role.RefPolicy] = ray.remote(actor_rollout_cls)
                self.mapping[Role.RefPolicy] = "global_pool"

        # Handle KL loss setting logic for backward compatibility
        if not ((hasattr(config, 'algorithm') and config.algorithm.get('use_kl_in_reward', False)) or 
                config.actor_rollout_ref.actor.get('use_kl_loss', False) or
                (hasattr(config.actor_rollout_ref.actor, 'use_ref') and config.actor_rollout_ref.actor.use_ref)):
            print("[DEBUG] not using ref policy, setting use_kl_loss to False")
            if hasattr(config.actor_rollout_ref.actor, 'use_kl_loss'):
                config.actor_rollout_ref.actor.use_kl_loss = False

        # validate config
        validate_config(
            config=config,
            use_reference_policy=need_reference_policy(self.role_worker_mapping),
            use_critic=need_critic(config),
        )

        # Download the checkpoint from HDFS to the local machine.
        # `use_shm` determines whether to use shared memory, which could lead to faster model loading if turned on
        local_path = copy_to_local(
            config.actor_rollout_ref.model.path, use_shm=config.actor_rollout_ref.model.get("use_shm", False)
        )

        # Instantiate the tokenizer and processor.
        from verl.utils import hf_tokenizer, hf_processor

        trust_remote_code = getattr(config, 'data', {}).get("trust_remote_code", False) if hasattr(config, 'data') else False
        tokenizer = hf_tokenizer(local_path, trust_remote_code=trust_remote_code)
        # Used for multimodal LLM, could be None
        processor = hf_processor(local_path, trust_remote_code=trust_remote_code, use_fast=True)

        resource_pool_manager = self.init_resource_pool_mgr(config)

        # Initialize reward functions
        print("using dummy reward manager")
        reward_manager_cls = DummyRewardManager

        compute_score = get_custom_reward_fn(config)
        reward_fn = reward_manager_cls(tokenizer=tokenizer, num_examine=0, compute_score=compute_score)

        # Note that we always use function-based RM for validation
        val_reward_fn = reward_manager_cls(tokenizer=tokenizer, num_examine=1, compute_score=compute_score)

        # 初始化数据收集器
        from ragen.llm_agent.data_collector import TrainingDataCollector
        data_collector = TrainingDataCollector(config)
        
        # 检查数据保存配置
        data_config = getattr(config, 'data_saving', None)
        trainer = RayAgentTrainer(
            config=config,
            tokenizer=tokenizer,
            processor=processor,
            role_worker_mapping=self.role_worker_mapping,
            resource_pool_manager=resource_pool_manager,
            ray_worker_group_cls=ray_worker_group_cls,
            reward_fn=reward_fn,
            val_reward_fn=val_reward_fn,
            data_collector=data_collector
        )
        trainer.init_workers()
        trainer.init_agent_proxy()
        trainer.fit()


if __name__ == '__main__':
    main()
