|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
Note that we don't combine the main with ray_trainer as ray_trainer is used by other main. |
|
""" |
|
import ray |
|
import hydra |
|
from pathlib import Path |
|
from pprint import pprint |
|
|
|
from omegaconf import OmegaConf |
|
from verl.utils.fs import copy_local_path_from_hdfs |
|
from verl.utils import hf_tokenizer |
|
from verl.trainer.ppo.ray_trainer import ResourcePoolManager, Role |
|
|
|
from absolute_zero_reasoner.trainer.ppo.azr_ray_trainer import CodeIORayPPOTrainer |
|
from absolute_zero_reasoner.rewards.reward_managers import CodeIORewardManager |
|
|
|
|
|
@hydra.main(config_path='configs', config_name='azr_ppo_trainer', version_base=None) |
|
def main(config): |
|
run_ppo(config) |
|
|
|
|
|
|
|
def run_ppo(config) -> None: |
|
|
|
if not ray.is_initialized(): |
|
|
|
|
|
|
|
|
|
import os |
|
cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", "0") |
|
ray.init( |
|
runtime_env={"env_vars": { |
|
"TOKENIZERS_PARALLELISM": "true", |
|
"NCCL_DEBUG": "WARN", |
|
"VLLM_LOGGING_LEVEL": "WARN", |
|
"VLLM_ALLOW_RUNTIME_LORA_UPDATING": "true", |
|
"CUDA_VISIBLE_DEVICES": cuda_visible_devices |
|
}}, |
|
num_cpus=config.ray_init.num_cpus, |
|
) |
|
|
|
|
|
|
|
if OmegaConf.select(config.trainer, "profile_steps") is not None and len(OmegaConf.select(config.trainer, "profile_steps")) > 0: |
|
nsight_options = OmegaConf.to_container(config.trainer.controller_nsight_options) |
|
runner = TaskRunner.options(runtime_env={ |
|
"nsight": nsight_options, |
|
"env_vars": {"CUDA_VISIBLE_DEVICES": cuda_visible_devices} |
|
}).remote() |
|
else: |
|
runner = TaskRunner.options(runtime_env={ |
|
"env_vars": {"CUDA_VISIBLE_DEVICES": cuda_visible_devices} |
|
}).remote() |
|
ray.get(runner.run.remote(config)) |
|
|
|
|
|
|
|
timeline_json_file = config.ray_init.get("timeline_json_file", None) |
|
if timeline_json_file: |
|
ray.timeline(filename=timeline_json_file) |
|
|
|
|
|
@ray.remote(num_cpus=1) |
|
class TaskRunner: |
|
def run(self, config): |
|
pprint(OmegaConf.to_container(config, resolve=True)) |
|
OmegaConf.resolve(config) |
|
|
|
if config.trainer.debug: |
|
import debugpy |
|
debugpy.listen(("0.0.0.0", config.trainer.debug_port)) |
|
print(f"Debugger listening on port {config.trainer.debug_port}") |
|
debugpy.wait_for_client() |
|
print("Debugger attached!") |
|
|
|
|
|
config.actor_rollout_ref.actor.ppo_mini_batch_size = config.data.train_batch_size * len(config.azr.problem_types) * (2 if config.azr.train_propose else 1) |
|
pprint(f"auto setting ppo_mini_batch_size: {config.actor_rollout_ref.actor.ppo_mini_batch_size}") |
|
config.azr.data_selection_strategy.data_len = config.data.train_batch_size * config.azr.data_selection_strategy.update_iteration |
|
pprint(f"auto setting data_len: {config.azr.data_selection_strategy.data_len}") |
|
|
|
config.trainer.default_local_dir = (Path(config.trainer.default_local_dir) / config.data.train_files.split('/')[-1].split('.')[0] / config.actor_rollout_ref.model.path.split('/')[-1] / config.reward_fn.extraction_type).as_posix() |
|
|
|
assert not (not config.azr.reward.generation_reward_config.reject_multiple_functions and config.azr.data_selection_strategy.composite_function_n_min > 0), "If reject_multiple_functions is False, composite_function_n_min must be 0" |
|
|
|
|
|
local_path = copy_local_path_from_hdfs(config.actor_rollout_ref.model.path) |
|
|
|
|
|
from verl.utils import hf_processor, hf_tokenizer |
|
|
|
trust_remote_code = config.data.get("trust_remote_code", False) |
|
tokenizer = hf_tokenizer(local_path, trust_remote_code=trust_remote_code) |
|
|
|
|
|
if config.actor_rollout_ref.model.pretrained_tokenizer: |
|
tokenizer.chat_template = "{%- for message in messages -%}{{- '\n' if not loop.first -}}{{- message['content'] -}}{%- endfor -%}" |
|
|
|
|
|
processor = hf_processor(local_path, trust_remote_code=trust_remote_code, use_fast=True) |
|
|
|
|
|
if config.actor_rollout_ref.rollout.name in ["vllm"]: |
|
from verl.utils.vllm_utils import is_version_ge |
|
|
|
if config.actor_rollout_ref.model.get("lora_rank", 0) > 0: |
|
if not is_version_ge(pkg="vllm", minver="0.7.3"): |
|
raise NotImplementedError("PPO LoRA is not supported before vllm 0.7.3") |
|
|
|
|
|
if config.actor_rollout_ref.actor.strategy in ["fsdp", "fsdp2"]: |
|
assert config.critic.strategy in ["fsdp", "fsdp2"] |
|
from verl.single_controller.ray import RayWorkerGroup |
|
from verl.workers.fsdp_workers import ActorRolloutRefWorker, AsyncActorRolloutRefWorker, CriticWorker |
|
|
|
actor_rollout_cls = AsyncActorRolloutRefWorker if config.actor_rollout_ref.rollout.mode == "async" else ActorRolloutRefWorker |
|
ray_worker_group_cls = RayWorkerGroup |
|
|
|
elif config.actor_rollout_ref.actor.strategy == "megatron": |
|
assert config.actor_rol |
|
from verl.single_controller.ray.megatron import NVMegatronRayWorkerGroup |
|
from verl.workers.megatron_workers import ActorRolloutRefWorker, AsyncActorRolloutRefWorker, CriticWorker |
|
|
|
actor_rollout_cls = AsyncActorRolloutRefWorker if config.actor_rollout_ref.rollout.mode == "async" else ActorRolloutRefWorker |
|
ray_worker_group_cls = NVMegatronRayWorkerGroup |
|
|
|
else: |
|
raise NotImplementedError |
|
|
|
from verl.trainer.ppo.ray_trainer import ResourcePoolManager, Role |
|
|
|
|
|
role_worker_mapping = { |
|
Role.ActorRollout: ray.remote(actor_rollout_cls), |
|
Role.Critic: ray.remote(CriticWorker), |
|
} |
|
|
|
|
|
|
|
global_pool_id = "global_pool" |
|
resource_pool_spec = { |
|
global_pool_id: [config.trainer.n_gpus_per_node] * config.trainer.nnodes, |
|
} |
|
mapping = { |
|
Role.ActorRollout: global_pool_id, |
|
Role.Critic: global_pool_id, |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if config.reward_model.enable: |
|
if config.reward_model.strategy in ["fsdp", "fsdp2"]: |
|
from verl.workers.fsdp_workers import RewardModelWorker |
|
elif config.reward_model.strategy == "megatron": |
|
from verl.workers.megatron_workers import RewardModelWorker |
|
else: |
|
raise NotImplementedError |
|
role_worker_mapping[Role.RewardModel] = ray.remote(RewardModelWorker) |
|
mapping[Role.RewardModel] = global_pool_id |
|
|
|
|
|
if config.algorithm.use_kl_in_reward or config.actor_rollout_ref.actor.use_kl_loss: |
|
role_worker_mapping[Role.RefPolicy] = ray.remote(ActorRolloutRefWorker) |
|
mapping[Role.RefPolicy] = global_pool_id |
|
|
|
reward_fn = CodeIORewardManager( |
|
tokenizer=tokenizer, |
|
num_examine=0, |
|
reward_fn_extraction_type=config.reward_fn.extraction_type, |
|
math_metric=config.reward_fn.math_metric, |
|
split='train', |
|
splitter=config.reward_fn.splitter, |
|
output_path=config.trainer.default_local_dir, |
|
max_prompt_length=config.data.max_prompt_length, |
|
generation_reward_config=config.azr.reward.generation_reward_config, |
|
valid_program_filter=config.azr.data_selection_strategy.valid_program_filter, |
|
debug=config.trainer.debug, |
|
extract_code_block=config.azr.reward.extract_code_block, |
|
code_f_reward_type=config.azr.reward.code_f_reward_type, |
|
boxed_retry=config.reward_fn.boxed_retry, |
|
) |
|
|
|
|
|
val_reward_fn = CodeIORewardManager( |
|
tokenizer=tokenizer, |
|
num_examine=1, |
|
reward_fn_extraction_type=config.reward_fn.extraction_type, |
|
math_metric=config.reward_fn.math_metric, |
|
split='test', |
|
splitter=config.reward_fn.splitter, |
|
output_path=config.trainer.default_local_dir, |
|
max_prompt_length=config.data.max_prompt_length, |
|
generation_reward_config=config.azr.reward.generation_reward_config, |
|
valid_program_filter=config.azr.data_selection_strategy.valid_program_filter, |
|
debug=config.trainer.debug, |
|
extract_code_block=config.azr.reward.extract_code_block, |
|
code_f_reward_type=config.azr.reward.code_f_reward_type, |
|
boxed_retry=config.reward_fn.boxed_retry, |
|
) |
|
|
|
resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping) |
|
|
|
wandb_tags = [ |
|
'codeio', config.azr.pred_data_mix_strategy, 'executor-' + config.azr.executor, |
|
config.azr.data_selection_strategy.valid_program_filter, config.azr.gen_data_probabilities_strategy, |
|
] |
|
wandb_tags.extend(config.azr.problem_types) |
|
if config.trainer.wandb_tags is not None: |
|
config.trainer.wandb_tags = wandb_tags + config.trainer.wandb_tags.split(',') |
|
else: |
|
config.trainer.wandb_tags = wandb_tags |
|
|
|
trainer = CodeIORayPPOTrainer( |
|
past_epoch_window=config.azr.past_epoch_window, |
|
config=config, |
|
tokenizer=tokenizer, |
|
processor=processor, |
|
role_worker_mapping=role_worker_mapping, |
|
resource_pool_manager=resource_pool_manager, |
|
ray_worker_group_cls=ray_worker_group_cls, |
|
reward_fn=reward_fn, |
|
val_reward_fn=val_reward_fn, |
|
) |
|
|
|
trainer.init_workers() |
|
trainer.fit() |
|
|
|
|
|
if __name__ == '__main__': |
|
try: |
|
main() |
|
except KeyboardInterrupt: |
|
import sys |
|
import traceback |
|
traceback.print_exc() |
|
sys.exit(0) |
|
except Exception as e: |
|
import os |
|
import traceback |
|
traceback.print_exc() |
|
os._exit(1) |
|
|