#!/bin/bash

# This script is designed for internal use within Google.
# External users can update pre-trained model checkpoint GCS path (gs://) to your accessible locations.
# Usage:
#  HF_TOKEN=<huggingface access token> \
#  MODEL=llama3.1-8b TOKENIZER=meta-llama/Llama-3.1-8B-Instruct \
#  NUM_SAMPLERS=2 DEVICES_PER_SAMPLER=8 \
#  TRAINING_PER_DEVICE_BATCH_SIZE=1 \
#  INFERENCE_PER_DEVICE_BATCH_SIZE=1 \
#  TRAINING_SUBSLICE=2,8 \
#  INFERENCE_SUBSLICE=2,8 \
#  MAX_PREFILL_LENGTH=128 \
#  MAX_TARGET_LENGTH=256 \
#  STEPS=20 \
#  bash end_to_end/tpu/test_grpo.sh

set -xe

BASE_OUTPUT_DIRECTORY=gs://runner-maxtext-logs
RUN_NAME=grpo-$(date +%Y-%m-%d-%H-%M-%S)

JAX_PLATFORMS=proxy 
JAX_BACKEND_TARGET=grpc://127.0.0.1:29000
ENABLE_PATHWAYS_PERSISTENCE='1'
HF_TOKEN=${HF_TOKEN}

MAX_PREFILL_LENGTH=${MAX_PREFILL_LENGTH:-128}
MAX_TARGET_LENGTH=${MAX_TARGET_LENGTH:-256}
NUM_GENERATIONS=2

INFERENCE_PER_DEVICE_BS=$((${INFERENCE_PER_DEVICE_BATCH_SIZE} * ${NUM_GENERATIONS}))

COMMON_ARGS="model_name=${MODEL} base_output_directory=${BASE_OUTPUT_DIRECTORY} \
max_prefill_predict_length=${MAX_PREFILL_LENGTH} max_target_length=${MAX_TARGET_LENGTH} \
enable_checkpointing=false async_checkpointing=false \
tokenizer_type=huggingface tokenizer_path=${TOKENIZER} \
dataset_type=hf hf_path='trl-lib/tldr' \
enable_single_controller=true \
dtype=bfloat16 weight_dtype=bfloat16 \
allow_split_physical_axes=true enable_goodput_recording=false monitor_goodput=false"

TRAINING_ARGS="run_name=${RUN_NAME} scan_layers=true \
inference_replicas=${NUM_SAMPLERS} inference_devices_per_replica=${DEVICES_PER_SAMPLER} subslice_shape=${TRAINING_SUBSLICE} \
inference_rollouts=1 \
per_device_batch_size=${TRAINING_PER_DEVICE_BATCH_SIZE} num_generations=${NUM_GENERATIONS} steps=${STEPS} \
profiler=xplane skip_first_n_steps_for_profiler=5 profiler_steps=3"

# Make sure profiles on inference TPUs are not captured while profiling trainers TPUs
# Set a small number for profiler_steps during inference as the profiles turn out large in size
INFERENCE_ARGS="run_name=grpo scan_layers=false \
per_device_batch_size=${INFERENCE_PER_DEVICE_BS} num_generations=${NUM_GENERATIONS} \
ici_data_parallelism=${NUM_SAMPLERS} ici_tensor_parallelism=${DEVICES_PER_SAMPLER} subslice_shape=${INFERENCE_SUBSLICE} \
profiler=xplane skip_first_n_steps_for_profiler=10 profiler_steps=2"

JAX_PLATFORMS=proxy JAX_BACKEND_TARGET=grpc://127.0.0.1:29000 ENABLE_PATHWAYS_PERSISTENCE='1' \
    python3 src/MaxText/experimental/rl/grpo_trainer.py src/MaxText/experimental/rl/grpo.yml  \
    ${COMMON_ARGS} ${TRAINING_ARGS} src/MaxText/experimental/rl/grpo_inference.yml \
    ${COMMON_ARGS} ${INFERENCE_ARGS}
