{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "ed2bedb8",
   "metadata": {},
   "outputs": [],
   "source": [
    "%load_ext autoreload\n",
    "%autoreload 2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "8d0300f3",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<style>\n",
       ".cell-output-ipywidget-background {\n",
       "    background-color: transparent !important;\n",
       "}\n",
       ":root {\n",
       "    --jp-widgets-color: var(--vscode-editor-foreground);\n",
       "    --jp-widgets-font-size: var(--vscode-editor-font-size);\n",
       "}  \n",
       "</style>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "%%html\n",
    "<style>\n",
    ".cell-output-ipywidget-background {\n",
    "    background-color: transparent !important;\n",
    "}\n",
    ":root {\n",
    "    --jp-widgets-color: var(--vscode-editor-foreground);\n",
    "    --jp-widgets-font-size: var(--vscode-editor-font-size);\n",
    "}  \n",
    "</style>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "11f14c1b",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mbradhilton\u001b[0m to \u001b[32mhttps://api.wandb.ai\u001b[0m. Use \u001b[1m`wandb login --relogin`\u001b[0m to force relogin\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "Tracking run with wandb version 0.20.1"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "Run data is saved locally in <code>/home/ubuntu/sky_workdir/dev/tau-bench/wandb/run-20250709_134754-003</code>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "Resuming run <strong><a href='https://wandb.ai/bradhilton/tau-bench/runs/003' target=\"_blank\">003</a></strong> to <a href='https://wandb.ai/bradhilton/tau-bench' target=\"_blank\">Weights & Biases</a> (<a href='https://wandb.me/developer-guide' target=\"_blank\">docs</a>)<br>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       " View project at <a href='https://wandb.ai/bradhilton/tau-bench' target=\"_blank\">https://wandb.ai/bradhilton/tau-bench</a>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       " View run at <a href='https://wandb.ai/bradhilton/tau-bench/runs/003' target=\"_blank\">https://wandb.ai/bradhilton/tau-bench/runs/003</a>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO 07-09 13:48:02 [__init__.py:244] Automatically detected platform cuda.\n",
      "WARNING 07-09 13:48:10 [env_override.py:17] NCCL_CUMEM_ENABLE is set to 0, skipping override. This may increase memory overhead with cudagraph+allreduce: https://github.com/NVIDIA/nccl/issues/1234\n",
      "INFO 07-09 13:48:10 [__init__.py:244] Automatically detected platform cuda.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Downloading '.gitattributes' to '/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/a6344aac8c09253b3b630fb776ae94478aa0275b.incomplete'\n",
      "Download complete. Moving file to /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/a6344aac8c09253b3b630fb776ae94478aa0275b\n",
      "Downloading 'LICENSE' to '/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/6634c8cc3133b3848ec74b9f275acaaa1ea618ab.incomplete'\n",
      "Download complete. Moving file to /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/6634c8cc3133b3848ec74b9f275acaaa1ea618ab\n",
      "Downloading 'README.md' to '/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/761da35f2465ff5ecaae7902edf5549ea97bd7fa.incomplete'\n",
      "Download complete. Moving file to /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/761da35f2465ff5ecaae7902edf5549ea97bd7fa\n",
      "Downloading 'config.json' to '/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/989289c4009026e35063c943c2a228e2c0873d31.incomplete'\n",
      "Download complete. Moving file to /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/989289c4009026e35063c943c2a228e2c0873d31\n",
      "Downloading 'generation_config.json' to '/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/bf077f03dc569cfb8a90b3ec1ad20365a620bad6.incomplete'\n",
      "Download complete. Moving file to /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/bf077f03dc569cfb8a90b3ec1ad20365a620bad6\n",
      "Downloading 'merges.txt' to '/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/20024bfe7c83998e9aeaf98a0cd6a2ce6306c2f0.incomplete'\n",
      "Download complete. Moving file to /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/20024bfe7c83998e9aeaf98a0cd6a2ce6306c2f0\n",
      "Downloading 'model-00001-of-00017.safetensors' to '/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/fa006a0382517a4e9a8781d8e7d3405cd6bdd1b9100fb344d73f925b7da9341c.incomplete'\n",
      "Download complete. Moving file to /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/fa006a0382517a4e9a8781d8e7d3405cd6bdd1b9100fb344d73f925b7da9341c\n",
      "Downloading 'model-00002-of-00017.safetensors' to '/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/0d7f28a4ba7135c2c8365016170cfa25723f65ec41e32ec9a3c81746e4395615.incomplete'\n",
      "Download complete. Moving file to /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/0d7f28a4ba7135c2c8365016170cfa25723f65ec41e32ec9a3c81746e4395615\n",
      "Downloading 'model-00003-of-00017.safetensors' to '/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/8f4b7b811a7ecf117c179ac9413b7a746f2fc0707a41ae4b2efd0282e73bed85.incomplete'\n",
      "Download complete. Moving file to /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/8f4b7b811a7ecf117c179ac9413b7a746f2fc0707a41ae4b2efd0282e73bed85\n",
      "Downloading 'model-00004-of-00017.safetensors' to '/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/de8ab9cb81e6798031d2cf9ef6d15a0034b77fceeba3d4e011647943c8b99afb.incomplete'\n",
      "Download complete. Moving file to /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/de8ab9cb81e6798031d2cf9ef6d15a0034b77fceeba3d4e011647943c8b99afb\n",
      "Downloading 'model-00005-of-00017.safetensors' to '/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/fa86494f0074cc36c610aa54e7bde03edefc57a9be7636216277bd5ed8b8bfda.incomplete'\n",
      "Download complete. Moving file to /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/fa86494f0074cc36c610aa54e7bde03edefc57a9be7636216277bd5ed8b8bfda\n",
      "Downloading 'model-00006-of-00017.safetensors' to '/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/c8fa152bd8a23bab2c0cfca43ff9f586bccd82b1cb67f2f84231f31d0f3cab47.incomplete'\n",
      "Download complete. Moving file to /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/c8fa152bd8a23bab2c0cfca43ff9f586bccd82b1cb67f2f84231f31d0f3cab47\n",
      "Downloading 'model-00007-of-00017.safetensors' to '/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/5e33e16319d18d1e180187d9f858deddbbaf06d950abc88941bf027d1439c7bc.incomplete'\n",
      "Download complete. Moving file to /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/5e33e16319d18d1e180187d9f858deddbbaf06d950abc88941bf027d1439c7bc\n",
      "Downloading 'model-00008-of-00017.safetensors' to '/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/8561b86737884397772bb990b38c0efe896f66ed27815ca207ddc8ec7965ffe4.incomplete'\n",
      "Download complete. Moving file to /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/8561b86737884397772bb990b38c0efe896f66ed27815ca207ddc8ec7965ffe4\n",
      "Downloading 'model-00009-of-00017.safetensors' to '/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/47f562bf77d986b53c5390b01a30a77576c3eed69191b484cf88b53bb64bce19.incomplete'\n",
      "Download complete. Moving file to /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/47f562bf77d986b53c5390b01a30a77576c3eed69191b484cf88b53bb64bce19\n",
      "Downloading 'model-00010-of-00017.safetensors' to '/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/359450e2c9e261751b5803fcdd1d0a040029a6119a271dd8592d7bb37fa722b5.incomplete'\n",
      "Download complete. Moving file to /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/359450e2c9e261751b5803fcdd1d0a040029a6119a271dd8592d7bb37fa722b5\n",
      "Downloading 'model-00011-of-00017.safetensors' to '/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/836f0570f5694ce1d55f505064190e2688926dad4f033384ae519d96faaad670.incomplete'\n",
      "Download complete. Moving file to /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/836f0570f5694ce1d55f505064190e2688926dad4f033384ae519d96faaad670\n",
      "Downloading 'model-00012-of-00017.safetensors' to '/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/5307431e385a177375a5aa887eb1c546f3413dd2e2269c291672f11f752842b4.incomplete'\n",
      "Download complete. Moving file to /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/5307431e385a177375a5aa887eb1c546f3413dd2e2269c291672f11f752842b4\n",
      "Downloading 'model-00013-of-00017.safetensors' to '/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/9b2657db02572dd4370186b35c0e0fa70218d7e2ec1190e4dc4a755c5cbd430e.incomplete'\n",
      "Download complete. Moving file to /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/9b2657db02572dd4370186b35c0e0fa70218d7e2ec1190e4dc4a755c5cbd430e\n",
      "Downloading 'model-00014-of-00017.safetensors' to '/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/b5f1f8af3b6bbf734790985590752fb815dd69566cf05ee578d70af74ace1d4c.incomplete'\n",
      "Download complete. Moving file to /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/b5f1f8af3b6bbf734790985590752fb815dd69566cf05ee578d70af74ace1d4c\n",
      "Downloading 'model-00015-of-00017.safetensors' to '/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/8cdfcbf8913e10056c981db2db817f92c8601998deb9eabcae14fade98d76c14.incomplete'\n",
      "Download complete. Moving file to /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/8cdfcbf8913e10056c981db2db817f92c8601998deb9eabcae14fade98d76c14\n",
      "Downloading 'model-00016-of-00017.safetensors' to '/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/977b3d2ca7f97dc3b518ed5087ddc43ccf7afb371190828f2334d7e4c81224ee.incomplete'\n",
      "Download complete. Moving file to /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/977b3d2ca7f97dc3b518ed5087ddc43ccf7afb371190828f2334d7e4c81224ee\n",
      "Downloading 'model-00017-of-00017.safetensors' to '/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/2853695483f9c61a8bd8b109025b3c591214a0362992fa844537f09acc81bd03.incomplete'\n",
      "Download complete. Moving file to /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/2853695483f9c61a8bd8b109025b3c591214a0362992fa844537f09acc81bd03\n",
      "Downloading 'model.safetensors.index.json' to '/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/4a7971b5e79db2f3ffd239ad57052a1bf607f874.incomplete'\n",
      "Download complete. Moving file to /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/4a7971b5e79db2f3ffd239ad57052a1bf607f874\n",
      "Downloading 'tokenizer.json' to '/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/443909a61d429dff23010e5bddd28ff530edda00.incomplete'\n",
      "Download complete. Moving file to /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/443909a61d429dff23010e5bddd28ff530edda00\n",
      "Downloading 'tokenizer_config.json' to '/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/07bfe0640cb5a0037f9322287fbfc682806cf672.incomplete'\n",
      "Download complete. Moving file to /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/07bfe0640cb5a0037f9322287fbfc682806cf672\n",
      "Downloading 'vocab.json' to '/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/4783fe10ac3adce15ac8f358ef5462739852c569.incomplete'\n",
      "Download complete. Moving file to /home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/blobs/4783fe10ac3adce15ac8f358ef5462739852c569\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-32B-Instruct/snapshots/5ede1c97bbab6ce5cda5812749b4c0bdf79b18dd\n",
      "INFO 07-09 13:49:58 [config.py:823] This model supports multiple tasks: {'score', 'classify', 'embed', 'reward', 'generate'}. Defaulting to 'generate'.\n",
      "INFO 07-09 13:49:58 [config.py:1946] Defaulting to use mp for distributed inference\n",
      "INFO 07-09 13:49:58 [config.py:2195] Chunked prefill is enabled with max_num_batched_tokens=2048.\n",
      "WARNING 07-09 13:49:59 [utils.py:2597] We must use the `spawn` multiprocessing start method. Overriding VLLM_WORKER_MULTIPROC_METHOD to 'spawn'. See https://docs.vllm.ai/en/latest/usage/troubleshooting.html#python-multiprocessing for more information. Reason: CUDA is initialized\n",
      "WARNING 07-09 13:50:00 [env_override.py:17] NCCL_CUMEM_ENABLE is set to 0, skipping override. This may increase memory overhead with cudagraph+allreduce: https://github.com/NVIDIA/nccl/issues/1234\n",
      "INFO 07-09 13:50:01 [__init__.py:244] Automatically detected platform cuda.\n",
      "INFO 07-09 13:50:04 [core.py:455] Waiting for init message from front-end.\n",
      "INFO 07-09 13:50:04 [core.py:70] Initializing a V1 LLM engine (v0.9.1) with config: model='Qwen/Qwen2.5-32B-Instruct', speculative_config=None, tokenizer='Qwen/Qwen2.5-32B-Instruct', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config={}, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=32768, download_dir=None, load_format=LoadFormat.AUTO, tensor_parallel_size=8, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto,  device_config=cuda, decoding_config=DecodingConfig(backend='auto', disable_fallback=False, disable_any_whitespace=False, disable_additional_properties=False, reasoning_backend=''), observability_config=ObservabilityConfig(show_hidden_metrics_for_version=None, otlp_traces_endpoint=None, collect_detailed_traces=None), seed=0, served_model_name=Qwen/Qwen2.5-32B-Instruct, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, pooler_config=None, compilation_config={\"level\":3,\"debug_dump_path\":\"\",\"cache_dir\":\"\",\"backend\":\"\",\"custom_ops\":[\"none\"],\"splitting_ops\":[\"vllm.unified_attention\",\"vllm.unified_attention_with_output\"],\"use_inductor\":true,\"compile_sizes\":[],\"inductor_compile_config\":{\"enable_auto_functionalized_v2\":false},\"inductor_passes\":{},\"use_cudagraph\":true,\"cudagraph_num_of_warmups\":1,\"cudagraph_capture_sizes\":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],\"cudagraph_copy_inputs\":false,\"full_cuda_graph\":false,\"max_capture_size\":512,\"local_cache_dir\":null}\n",
      "WARNING 07-09 13:50:04 [multiproc_worker_utils.py:307] Reducing Torch parallelism from 64 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed.\n",
      "INFO 07-09 13:50:04 [shm_broadcast.py:289] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3, 4, 5, 6, 7], buffer_handle=(8, 16777216, 10, 'psm_d0cce0a9'), local_subscribe_addr='ipc:///tmp/3185ff8c-0b5d-4376-baff-f915deb28f75', remote_subscribe_addr=None, remote_addr_ipv6=False)\n",
      "WARNING 07-09 13:50:05 [env_override.py:17] NCCL_CUMEM_ENABLE is set to 0, skipping override. This may increase memory overhead with cudagraph+allreduce: https://github.com/NVIDIA/nccl/issues/1234\n",
      "WARNING 07-09 13:50:05 [env_override.py:17] NCCL_CUMEM_ENABLE is set to 0, skipping override. This may increase memory overhead with cudagraph+allreduce: https://github.com/NVIDIA/nccl/issues/1234\n",
      "WARNING 07-09 13:50:05 [env_override.py:17] NCCL_CUMEM_ENABLE is set to 0, skipping override. This may increase memory overhead with cudagraph+allreduce: https://github.com/NVIDIA/nccl/issues/1234\n",
      "WARNING 07-09 13:50:05 [env_override.py:17] NCCL_CUMEM_ENABLE is set to 0, skipping override. This may increase memory overhead with cudagraph+allreduce: https://github.com/NVIDIA/nccl/issues/1234\n",
      "WARNING 07-09 13:50:05 [env_override.py:17] NCCL_CUMEM_ENABLE is set to 0, skipping override. This may increase memory overhead with cudagraph+allreduce: https://github.com/NVIDIA/nccl/issues/1234\n",
      "WARNING 07-09 13:50:05 [env_override.py:17] NCCL_CUMEM_ENABLE is set to 0, skipping override. This may increase memory overhead with cudagraph+allreduce: https://github.com/NVIDIA/nccl/issues/1234\n",
      "WARNING 07-09 13:50:05 [env_override.py:17] NCCL_CUMEM_ENABLE is set to 0, skipping override. This may increase memory overhead with cudagraph+allreduce: https://github.com/NVIDIA/nccl/issues/1234\n",
      "WARNING 07-09 13:50:05 [env_override.py:17] NCCL_CUMEM_ENABLE is set to 0, skipping override. This may increase memory overhead with cudagraph+allreduce: https://github.com/NVIDIA/nccl/issues/1234\n",
      "INFO 07-09 13:50:06 [__init__.py:244] Automatically detected platform cuda.\n",
      "INFO 07-09 13:50:06 [__init__.py:244] Automatically detected platform cuda.\n",
      "INFO 07-09 13:50:06 [__init__.py:244] Automatically detected platform cuda.\n",
      "INFO 07-09 13:50:06 [__init__.py:244] Automatically detected platform cuda.\n",
      "INFO 07-09 13:50:06 [__init__.py:244] Automatically detected platform cuda.\n",
      "INFO 07-09 13:50:07 [__init__.py:244] Automatically detected platform cuda.\n",
      "INFO 07-09 13:50:07 [__init__.py:244] Automatically detected platform cuda.\n",
      "INFO 07-09 13:50:07 [__init__.py:244] Automatically detected platform cuda.\n",
      "INFO 07-09 13:50:10 [worker_base.py:590] Injected <class 'art.vllm.engine.WorkerExtension'> into <class 'vllm.v1.worker.gpu_worker.Worker'> for extended collective_rpc calls ['run', 'time']\n",
      "WARNING 07-09 13:50:10 [utils.py:2737] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x7f84385c9850>\n",
      "\u001b[1;36m(VllmWorker rank=7 pid=67693)\u001b[0;0m INFO 07-09 13:50:10 [shm_broadcast.py:289] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_46516c1d'), local_subscribe_addr='ipc:///tmp/2c637963-40c3-464e-9dd1-fc85b3976a77', remote_subscribe_addr=None, remote_addr_ipv6=False)\n",
      "INFO 07-09 13:50:10 [worker_base.py:590] Injected <class 'art.vllm.engine.WorkerExtension'> into <class 'vllm.v1.worker.gpu_worker.Worker'> for extended collective_rpc calls ['run', 'time']\n",
      "WARNING 07-09 13:50:10 [utils.py:2737] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x7f14a4bc3010>\n",
      "\u001b[1;36m(VllmWorker rank=2 pid=67688)\u001b[0;0m INFO 07-09 13:50:10 [shm_broadcast.py:289] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a01c8ad3'), local_subscribe_addr='ipc:///tmp/9259a220-8963-4bb2-8485-7f4b8809cfce', remote_subscribe_addr=None, remote_addr_ipv6=False)\n",
      "INFO 07-09 13:50:10 [worker_base.py:590] Injected <class 'art.vllm.engine.WorkerExtension'> into <class 'vllm.v1.worker.gpu_worker.Worker'> for extended collective_rpc calls ['run', 'time']\n",
      "WARNING 07-09 13:50:10 [utils.py:2737] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x7f5dd3c01d10>\n",
      "\u001b[1;36m(VllmWorker rank=3 pid=67689)\u001b[0;0m INFO 07-09 13:50:10 [shm_broadcast.py:289] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_62d89edb'), local_subscribe_addr='ipc:///tmp/49e83e0f-33de-42ce-826e-f613aafce578', remote_subscribe_addr=None, remote_addr_ipv6=False)\n",
      "INFO 07-09 13:50:10 [worker_base.py:590] Injected <class 'art.vllm.engine.WorkerExtension'> into <class 'vllm.v1.worker.gpu_worker.Worker'> for extended collective_rpc calls ['run', 'time']\n",
      "WARNING 07-09 13:50:10 [utils.py:2737] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x7f328ad7f750>\n",
      "\u001b[1;36m(VllmWorker rank=1 pid=67685)\u001b[0;0m INFO 07-09 13:50:10 [shm_broadcast.py:289] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_0e395130'), local_subscribe_addr='ipc:///tmp/3f1c27d1-ce1f-488c-aa45-b8d6f9e70047', remote_subscribe_addr=None, remote_addr_ipv6=False)\n",
      "INFO 07-09 13:50:10 [worker_base.py:590] Injected <class 'art.vllm.engine.WorkerExtension'> into <class 'vllm.v1.worker.gpu_worker.Worker'> for extended collective_rpc calls ['run', 'time']\n",
      "WARNING 07-09 13:50:10 [utils.py:2737] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x7fd4a839dfd0>\n",
      "\u001b[1;36m(VllmWorker rank=4 pid=67690)\u001b[0;0m INFO 07-09 13:50:10 [shm_broadcast.py:289] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_f729459b'), local_subscribe_addr='ipc:///tmp/15b9d75f-72f2-4e54-a54d-889ccf19a20d', remote_subscribe_addr=None, remote_addr_ipv6=False)\n",
      "INFO 07-09 13:50:10 [worker_base.py:590] Injected <class 'art.vllm.engine.WorkerExtension'> into <class 'vllm.v1.worker.gpu_worker.Worker'> for extended collective_rpc calls ['run', 'time']\n",
      "WARNING 07-09 13:50:10 [utils.py:2737] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x7fe381d2bbd0>\n",
      "\u001b[1;36m(VllmWorker rank=5 pid=67691)\u001b[0;0m INFO 07-09 13:50:10 [shm_broadcast.py:289] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_3c3717a6'), local_subscribe_addr='ipc:///tmp/7cbde1d9-43ab-4188-bb10-420bf00e74e3', remote_subscribe_addr=None, remote_addr_ipv6=False)\n",
      "INFO 07-09 13:50:10 [worker_base.py:590] Injected <class 'art.vllm.engine.WorkerExtension'> into <class 'vllm.v1.worker.gpu_worker.Worker'> for extended collective_rpc calls ['run', 'time']\n",
      "WARNING 07-09 13:50:10 [utils.py:2737] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x7fd3d52c3790>\n",
      "\u001b[1;36m(VllmWorker rank=0 pid=67683)\u001b[0;0m INFO 07-09 13:50:10 [shm_broadcast.py:289] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_89050af1'), local_subscribe_addr='ipc:///tmp/9599b03c-a144-41b2-b073-ed67605568a9', remote_subscribe_addr=None, remote_addr_ipv6=False)\n",
      "INFO 07-09 13:50:11 [worker_base.py:590] Injected <class 'art.vllm.engine.WorkerExtension'> into <class 'vllm.v1.worker.gpu_worker.Worker'> for extended collective_rpc calls ['run', 'time']\n",
      "WARNING 07-09 13:50:11 [utils.py:2737] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x7f9f7867b710>\n",
      "\u001b[1;36m(VllmWorker rank=6 pid=67692)\u001b[0;0m INFO 07-09 13:50:11 [shm_broadcast.py:289] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_cee460d6'), local_subscribe_addr='ipc:///tmp/c2fa123a-3a49-4bdd-a27e-3a48dde9d6e8', remote_subscribe_addr=None, remote_addr_ipv6=False)\n",
      "\u001b[1;36m(VllmWorker rank=4 pid=67690)\u001b[0;0m INFO 07-09 13:50:14 [utils.py:1126] Found nccl from library libnccl.so.2\n",
      "\u001b[1;36m(VllmWorker rank=2 pid=67688)\u001b[0;0m INFO 07-09 13:50:14 [utils.py:1126] Found nccl from library libnccl.so.2\n",
      "\u001b[1;36m(VllmWorker rank=4 pid=67690)\u001b[0;0m \u001b[1;36m(VllmWorker rank=2 pid=67688)\u001b[0;0m INFO 07-09 13:50:14 [pynccl.py:70] vLLM is using nccl==2.26.2\n",
      "INFO 07-09 13:50:14 [pynccl.py:70] vLLM is using nccl==2.26.2\n",
      "\u001b[1;36m(VllmWorker rank=0 pid=67683)\u001b[0;0m INFO 07-09 13:50:14 [utils.py:1126] Found nccl from library libnccl.so.2\n",
      "\u001b[1;36m(VllmWorker rank=0 pid=67683)\u001b[0;0m INFO 07-09 13:50:14 [pynccl.py:70] vLLM is using nccl==2.26.2\n",
      "\u001b[1;36m(VllmWorker rank=1 pid=67685)\u001b[0;0m INFO 07-09 13:50:14 [utils.py:1126] Found nccl from library libnccl.so.2\n",
      "\u001b[1;36m(VllmWorker rank=1 pid=67685)\u001b[0;0m INFO 07-09 13:50:14 [pynccl.py:70] vLLM is using nccl==2.26.2\n",
      "\u001b[1;36m(VllmWorker rank=6 pid=67692)\u001b[0;0m INFO 07-09 13:50:14 [utils.py:1126] Found nccl from library libnccl.so.2\n",
      "\u001b[1;36m(VllmWorker rank=6 pid=67692)\u001b[0;0m INFO 07-09 13:50:14 [pynccl.py:70] vLLM is using nccl==2.26.2\n",
      "\u001b[1;36m(VllmWorker rank=7 pid=67693)\u001b[0;0m INFO 07-09 13:50:14 [utils.py:1126] Found nccl from library libnccl.so.2\n",
      "\u001b[1;36m(VllmWorker rank=7 pid=67693)\u001b[0;0m INFO 07-09 13:50:14 [pynccl.py:70] vLLM is using nccl==2.26.2\n",
      "\u001b[1;36m(VllmWorker rank=3 pid=67689)\u001b[0;0m INFO 07-09 13:50:14 [utils.py:1126] Found nccl from library libnccl.so.2\n",
      "\u001b[1;36m(VllmWorker rank=3 pid=67689)\u001b[0;0m INFO 07-09 13:50:14 [pynccl.py:70] vLLM is using nccl==2.26.2\n",
      "\u001b[1;36m(VllmWorker rank=5 pid=67691)\u001b[0;0m INFO 07-09 13:50:14 [utils.py:1126] Found nccl from library libnccl.so.2\n",
      "\u001b[1;36m(VllmWorker rank=5 pid=67691)\u001b[0;0m INFO 07-09 13:50:14 [pynccl.py:70] vLLM is using nccl==2.26.2\n",
      "\u001b[1;36m(VllmWorker rank=5 pid=67691)\u001b[0;0m WARNING 07-09 13:50:14 [custom_all_reduce.py:137] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.\n",
      "\u001b[1;36m(VllmWorker rank=7 pid=67693)\u001b[0;0m WARNING 07-09 13:50:14 [custom_all_reduce.py:137] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.\n",
      "\u001b[1;36m(VllmWorker rank=0 pid=67683)\u001b[0;0m WARNING 07-09 13:50:14 [custom_all_reduce.py:137] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.\n",
      "\u001b[1;36m(VllmWorker rank=6 pid=67692)\u001b[0;0m WARNING 07-09 13:50:14 [custom_all_reduce.py:137] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.\n",
      "\u001b[1;36m(VllmWorker rank=2 pid=67688)\u001b[0;0m WARNING 07-09 13:50:14 [custom_all_reduce.py:137] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.\n",
      "\u001b[1;36m(VllmWorker rank=1 pid=67685)\u001b[0;0m WARNING 07-09 13:50:14 [custom_all_reduce.py:137] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.\n",
      "\u001b[1;36m(VllmWorker rank=4 pid=67690)\u001b[0;0m WARNING 07-09 13:50:14 [custom_all_reduce.py:137] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.\n",
      "\u001b[1;36m(VllmWorker rank=3 pid=67689)\u001b[0;0m WARNING 07-09 13:50:14 [custom_all_reduce.py:137] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.\n",
      "\u001b[1;36m(VllmWorker rank=0 pid=67683)\u001b[0;0m INFO 07-09 13:50:14 [shm_broadcast.py:289] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3, 4, 5, 6, 7], buffer_handle=(7, 4194304, 6, 'psm_cc7a6398'), local_subscribe_addr='ipc:///tmp/18c2caec-ebeb-4d25-839a-27f7c7612bb3', remote_subscribe_addr=None, remote_addr_ipv6=False)\n",
      "\u001b[1;36m(VllmWorker rank=1 pid=67685)\u001b[0;0m INFO 07-09 13:50:14 [parallel_state.py:1065] rank 1 in world size 8 is assigned as DP rank 0, PP rank 0, TP rank 1, EP rank 1\n",
      "\u001b[1;36m(VllmWorker rank=0 pid=67683)\u001b[0;0m INFO 07-09 13:50:14 [parallel_state.py:1065] rank 0 in world size 8 is assigned as DP rank 0, PP rank 0, TP rank 0, EP rank 0\n",
      "\u001b[1;36m(VllmWorker rank=2 pid=67688)\u001b[0;0m INFO 07-09 13:50:14 [parallel_state.py:1065] rank 2 in world size 8 is assigned as DP rank 0, PP rank 0, TP rank 2, EP rank 2\n",
      "\u001b[1;36m(VllmWorker rank=3 pid=67689)\u001b[0;0m INFO 07-09 13:50:14 [parallel_state.py:1065] rank 3 in world size 8 is assigned as DP rank 0, PP rank 0, TP rank 3, EP rank 3\n",
      "\u001b[1;36m(VllmWorker rank=4 pid=67690)\u001b[0;0m INFO 07-09 13:50:14 [parallel_state.py:1065] rank 4 in world size 8 is assigned as DP rank 0, PP rank 0, TP rank 4, EP rank 4\n",
      "\u001b[1;36m(VllmWorker rank=5 pid=67691)\u001b[0;0m INFO 07-09 13:50:14 [parallel_state.py:1065] rank 5 in world size 8 is assigned as DP rank 0, PP rank 0, TP rank 5, EP rank 5\n",
      "\u001b[1;36m(VllmWorker rank=7 pid=67693)\u001b[0;0m INFO 07-09 13:50:14 [parallel_state.py:1065] rank 7 in world size 8 is assigned as DP rank 0, PP rank 0, TP rank 7, EP rank 7\n",
      "\u001b[1;36m(VllmWorker rank=6 pid=67692)\u001b[0;0m INFO 07-09 13:50:14 [parallel_state.py:1065] rank 6 in world size 8 is assigned as DP rank 0, PP rank 0, TP rank 6, EP rank 6\n",
      "\u001b[1;36m(VllmWorker rank=0 pid=67683)\u001b[0;0m WARNING 07-09 13:50:14 [topk_topp_sampler.py:59] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.\n",
      "\u001b[1;36m(VllmWorker rank=1 pid=67685)\u001b[0;0m WARNING 07-09 13:50:14 [topk_topp_sampler.py:59] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.\n",
      "\u001b[1;36m(VllmWorker rank=2 pid=67688)\u001b[0;0m \u001b[1;36m(VllmWorker rank=3 pid=67689)\u001b[0;0m WARNING 07-09 13:50:14 [topk_topp_sampler.py:59] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.\n",
      "\u001b[1;36m(VllmWorker rank=4 pid=67690)\u001b[0;0m WARNING 07-09 13:50:14 [topk_topp_sampler.py:59] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.\n",
      "\u001b[1;36m(VllmWorker rank=7 pid=67693)\u001b[0;0m WARNING 07-09 13:50:14 [topk_topp_sampler.py:59] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.\n",
      "\u001b[1;36m(VllmWorker rank=6 pid=67692)\u001b[0;0m WARNING 07-09 13:50:14 [topk_topp_sampler.py:59] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.\n",
      "WARNING 07-09 13:50:14 [topk_topp_sampler.py:59] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.\n",
      "\u001b[1;36m(VllmWorker rank=5 pid=67691)\u001b[0;0m WARNING 07-09 13:50:14 [topk_topp_sampler.py:59] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.\n",
      "\u001b[1;36m(VllmWorker rank=7 pid=67693)\u001b[0;0m INFO 07-09 13:50:14 [gpu_model_runner.py:1595] Starting to load model Qwen/Qwen2.5-32B-Instruct...\n",
      "\u001b[1;36m(VllmWorker rank=2 pid=67688)\u001b[0;0m INFO 07-09 13:50:14 [gpu_model_runner.py:1595] Starting to load model Qwen/Qwen2.5-32B-Instruct...\n",
      "\u001b[1;36m(VllmWorker rank=4 pid=67690)\u001b[0;0m INFO 07-09 13:50:14 [gpu_model_runner.py:1595] Starting to load model Qwen/Qwen2.5-32B-Instruct...\n",
      "\u001b[1;36m(VllmWorker rank=3 pid=67689)\u001b[0;0m INFO 07-09 13:50:14 [gpu_model_runner.py:1595] Starting to load model Qwen/Qwen2.5-32B-Instruct...\n",
      "\u001b[1;36m(VllmWorker rank=1 pid=67685)\u001b[0;0m INFO 07-09 13:50:14 [gpu_model_runner.py:1595] Starting to load model Qwen/Qwen2.5-32B-Instruct...\n",
      "\u001b[1;36m(VllmWorker rank=6 pid=67692)\u001b[0;0m INFO 07-09 13:50:14 [gpu_model_runner.py:1595] Starting to load model Qwen/Qwen2.5-32B-Instruct...\n",
      "\u001b[1;36m(VllmWorker rank=0 pid=67683)\u001b[0;0m INFO 07-09 13:50:14 [gpu_model_runner.py:1595] Starting to load model Qwen/Qwen2.5-32B-Instruct...\n",
      "\u001b[1;36m(VllmWorker rank=5 pid=67691)\u001b[0;0m INFO 07-09 13:50:14 [gpu_model_runner.py:1595] Starting to load model Qwen/Qwen2.5-32B-Instruct...\n",
      "\u001b[1;36m(VllmWorker rank=4 pid=67690)\u001b[0;0m INFO 07-09 13:50:15 [gpu_model_runner.py:1600] Loading model from scratch...\n",
      "\u001b[1;36m(VllmWorker rank=7 pid=67693)\u001b[0;0m INFO 07-09 13:50:15 [gpu_model_runner.py:1600] Loading model from scratch...\n",
      "\u001b[1;36m(VllmWorker rank=2 pid=67688)\u001b[0;0m INFO 07-09 13:50:15 [gpu_model_runner.py:1600] Loading model from scratch...\n",
      "\u001b[1;36m(VllmWorker rank=5 pid=67691)\u001b[0;0m INFO 07-09 13:50:15 [gpu_model_runner.py:1600] Loading model from scratch...\n",
      "\u001b[1;36m(VllmWorker rank=3 pid=67689)\u001b[0;0m INFO 07-09 13:50:15 [gpu_model_runner.py:1600] Loading model from scratch...\n",
      "\u001b[1;36m(VllmWorker rank=4 pid=67690)\u001b[0;0m INFO 07-09 13:50:15 [cuda.py:252] Using Flash Attention backend on V1 engine.\n",
      "\u001b[1;36m(VllmWorker rank=7 pid=67693)\u001b[0;0m INFO 07-09 13:50:15 [cuda.py:252] Using Flash Attention backend on V1 engine.\n",
      "\u001b[1;36m(VllmWorker rank=2 pid=67688)\u001b[0;0m INFO 07-09 13:50:15 [cuda.py:252] Using Flash Attention backend on V1 engine.\n",
      "\u001b[1;36m(VllmWorker rank=5 pid=67691)\u001b[0;0m INFO 07-09 13:50:15 [cuda.py:252] Using Flash Attention backend on V1 engine.\n",
      "\u001b[1;36m(VllmWorker rank=0 pid=67683)\u001b[0;0m INFO 07-09 13:50:15 [gpu_model_runner.py:1600] Loading model from scratch...\n",
      "\u001b[1;36m(VllmWorker rank=3 pid=67689)\u001b[0;0m INFO 07-09 13:50:15 [cuda.py:252] Using Flash Attention backend on V1 engine.\n",
      "\u001b[1;36m(VllmWorker rank=6 pid=67692)\u001b[0;0m INFO 07-09 13:50:15 [gpu_model_runner.py:1600] Loading model from scratch...\n",
      "\u001b[1;36m(VllmWorker rank=1 pid=67685)\u001b[0;0m INFO 07-09 13:50:15 [gpu_model_runner.py:1600] Loading model from scratch...\n",
      "\u001b[1;36m(VllmWorker rank=0 pid=67683)\u001b[0;0m INFO 07-09 13:50:15 [cuda.py:252] Using Flash Attention backend on V1 engine.\n",
      "\u001b[1;36m(VllmWorker rank=6 pid=67692)\u001b[0;0m INFO 07-09 13:50:15 [cuda.py:252] Using Flash Attention backend on V1 engine.\n",
      "\u001b[1;36m(VllmWorker rank=1 pid=67685)\u001b[0;0m INFO 07-09 13:50:15 [cuda.py:252] Using Flash Attention backend on V1 engine.\n",
      "\u001b[1;36m(VllmWorker rank=7 pid=67693)\u001b[0;0m INFO 07-09 13:50:15 [weight_utils.py:292] Using model weights format ['*.safetensors']\n",
      "\u001b[1;36m(VllmWorker rank=4 pid=67690)\u001b[0;0m INFO 07-09 13:50:15 [weight_utils.py:292] Using model weights format ['*.safetensors']\n",
      "\u001b[1;36m(VllmWorker rank=5 pid=67691)\u001b[0;0m INFO 07-09 13:50:15 [weight_utils.py:292] Using model weights format ['*.safetensors']\n",
      "\u001b[1;36m(VllmWorker rank=2 pid=67688)\u001b[0;0m INFO 07-09 13:50:15 [weight_utils.py:292] Using model weights format ['*.safetensors']\n",
      "\u001b[1;36m(VllmWorker rank=3 pid=67689)\u001b[0;0m INFO 07-09 13:50:15 [weight_utils.py:292] Using model weights format ['*.safetensors']\n",
      "\u001b[1;36m(VllmWorker rank=0 pid=67683)\u001b[0;0m INFO 07-09 13:50:15 [weight_utils.py:292] Using model weights format ['*.safetensors']\n",
      "\u001b[1;36m(VllmWorker rank=6 pid=67692)\u001b[0;0m INFO 07-09 13:50:15 [weight_utils.py:292] Using model weights format ['*.safetensors']\n",
      "\u001b[1;36m(VllmWorker rank=1 pid=67685)\u001b[0;0m INFO 07-09 13:50:15 [weight_utils.py:292] Using model weights format ['*.safetensors']\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Loading safetensors checkpoint shards:   0% Completed | 0/17 [00:00<?, ?it/s]\n",
      "Loading safetensors checkpoint shards:   6% Completed | 1/17 [00:00<00:03,  4.92it/s]\n",
      "Loading safetensors checkpoint shards:  12% Completed | 2/17 [00:00<00:03,  3.79it/s]\n",
      "Loading safetensors checkpoint shards:  18% Completed | 3/17 [00:00<00:03,  3.82it/s]\n",
      "Loading safetensors checkpoint shards:  24% Completed | 4/17 [00:01<00:03,  3.57it/s]\n",
      "Loading safetensors checkpoint shards:  29% Completed | 5/17 [00:01<00:03,  3.25it/s]\n",
      "Loading safetensors checkpoint shards:  35% Completed | 6/17 [00:01<00:03,  3.20it/s]\n",
      "Loading safetensors checkpoint shards:  41% Completed | 7/17 [00:02<00:03,  3.21it/s]\n",
      "Loading safetensors checkpoint shards:  47% Completed | 8/17 [00:02<00:02,  3.21it/s]\n",
      "Loading safetensors checkpoint shards:  53% Completed | 9/17 [00:02<00:02,  2.83it/s]\n",
      "Loading safetensors checkpoint shards:  59% Completed | 10/17 [00:03<00:02,  2.56it/s]\n",
      "Loading safetensors checkpoint shards:  65% Completed | 11/17 [00:03<00:02,  2.44it/s]\n",
      "Loading safetensors checkpoint shards:  71% Completed | 12/17 [00:04<00:02,  2.38it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[1;36m(VllmWorker rank=7 pid=67693)\u001b[0;0m INFO 07-09 13:50:20 [default_loader.py:272] Loading weights took 4.39 seconds\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Loading safetensors checkpoint shards:  76% Completed | 13/17 [00:04<00:01,  2.21it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[1;36m(VllmWorker rank=7 pid=67693)\u001b[0;0m INFO 07-09 13:50:20 [gpu_model_runner.py:1624] Model loading took 7.6861 GiB and 4.866946 seconds\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Loading safetensors checkpoint shards:  82% Completed | 14/17 [00:05<00:01,  2.23it/s]\n",
      "Loading safetensors checkpoint shards:  88% Completed | 15/17 [00:05<00:00,  2.19it/s]\n",
      "Loading safetensors checkpoint shards:  94% Completed | 16/17 [00:06<00:00,  2.18it/s]\n",
      "Loading safetensors checkpoint shards: 100% Completed | 17/17 [00:06<00:00,  2.25it/s]\n",
      "Loading safetensors checkpoint shards: 100% Completed | 17/17 [00:06<00:00,  2.61it/s]\n",
      "\u001b[1;36m(VllmWorker rank=0 pid=67683)\u001b[0;0m \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[1;36m(VllmWorker rank=0 pid=67683)\u001b[0;0m INFO 07-09 13:50:22 [default_loader.py:272] Loading weights took 6.79 seconds\n",
      "\u001b[1;36m(VllmWorker rank=6 pid=67692)\u001b[0;0m INFO 07-09 13:50:22 [default_loader.py:272] Loading weights took 6.16 seconds\n",
      "\u001b[1;36m(VllmWorker rank=4 pid=67690)\u001b[0;0m INFO 07-09 13:50:22 [default_loader.py:272] Loading weights took 6.69 seconds\n",
      "\u001b[1;36m(VllmWorker rank=5 pid=67691)\u001b[0;0m INFO 07-09 13:50:22 [default_loader.py:272] Loading weights took 6.58 seconds\n",
      "\u001b[1;36m(VllmWorker rank=3 pid=67689)\u001b[0;0m INFO 07-09 13:50:22 [default_loader.py:272] Loading weights took 6.29 seconds\n",
      "\u001b[1;36m(VllmWorker rank=2 pid=67688)\u001b[0;0m INFO 07-09 13:50:22 [default_loader.py:272] Loading weights took 6.47 seconds\n",
      "\u001b[1;36m(VllmWorker rank=1 pid=67685)\u001b[0;0m INFO 07-09 13:50:22 [default_loader.py:272] Loading weights took 6.38 seconds\n",
      "\u001b[1;36m(VllmWorker rank=0 pid=67683)\u001b[0;0m INFO 07-09 13:50:23 [gpu_model_runner.py:1624] Model loading took 7.6861 GiB and 7.274037 seconds\n",
      "\u001b[1;36m(VllmWorker rank=4 pid=67690)\u001b[0;0m INFO 07-09 13:50:23 [gpu_model_runner.py:1624] Model loading took 7.6861 GiB and 7.369671 seconds\n",
      "\u001b[1;36m(VllmWorker rank=3 pid=67689)\u001b[0;0m INFO 07-09 13:50:23 [gpu_model_runner.py:1624] Model loading took 7.6861 GiB and 7.344410 seconds\n",
      "\u001b[1;36m(VllmWorker rank=6 pid=67692)\u001b[0;0m INFO 07-09 13:50:23 [gpu_model_runner.py:1624] Model loading took 7.6861 GiB and 7.266983 seconds\n",
      "\u001b[1;36m(VllmWorker rank=5 pid=67691)\u001b[0;0m INFO 07-09 13:50:23 [gpu_model_runner.py:1624] Model loading took 7.6861 GiB and 7.357371 seconds\n",
      "\u001b[1;36m(VllmWorker rank=1 pid=67685)\u001b[0;0m INFO 07-09 13:50:23 [gpu_model_runner.py:1624] Model loading took 7.6861 GiB and 7.249333 seconds\n",
      "\u001b[1;36m(VllmWorker rank=2 pid=67688)\u001b[0;0m INFO 07-09 13:50:23 [gpu_model_runner.py:1624] Model loading took 7.6861 GiB and 7.365420 seconds\n",
      "\u001b[1;36m(VllmWorker rank=4 pid=67690)\u001b[0;0m INFO 07-09 13:50:32 [backends.py:462] Using cache directory: /home/ubuntu/.cache/vllm/torch_compile_cache/4ef326d1aa/rank_4_0 for vLLM's torch.compile\n",
      "\u001b[1;36m(VllmWorker rank=4 pid=67690)\u001b[0;0m INFO 07-09 13:50:32 [backends.py:472] Dynamo bytecode transform time: 8.66 s\n",
      "\u001b[1;36m(VllmWorker rank=2 pid=67688)\u001b[0;0m INFO 07-09 13:50:32 [backends.py:462] Using cache directory: /home/ubuntu/.cache/vllm/torch_compile_cache/4ef326d1aa/rank_2_0 for vLLM's torch.compile\n",
      "\u001b[1;36m(VllmWorker rank=2 pid=67688)\u001b[0;0m INFO 07-09 13:50:32 [backends.py:472] Dynamo bytecode transform time: 8.71 s\n",
      "\u001b[1;36m(VllmWorker rank=7 pid=67693)\u001b[0;0m INFO 07-09 13:50:32 [backends.py:462] Using cache directory: /home/ubuntu/.cache/vllm/torch_compile_cache/4ef326d1aa/rank_7_0 for vLLM's torch.compile\n",
      "\u001b[1;36m(VllmWorker rank=7 pid=67693)\u001b[0;0m INFO 07-09 13:50:32 [backends.py:472] Dynamo bytecode transform time: 8.71 s\n",
      "\u001b[1;36m(VllmWorker rank=0 pid=67683)\u001b[0;0m INFO 07-09 13:50:32 [backends.py:462] Using cache directory: /home/ubuntu/.cache/vllm/torch_compile_cache/4ef326d1aa/rank_0_0 for vLLM's torch.compile\n",
      "\u001b[1;36m(VllmWorker rank=0 pid=67683)\u001b[0;0m INFO 07-09 13:50:32 [backends.py:472] Dynamo bytecode transform time: 8.74 s\n",
      "\u001b[1;36m(VllmWorker rank=1 pid=67685)\u001b[0;0m INFO 07-09 13:50:32 [backends.py:462] Using cache directory: /home/ubuntu/.cache/vllm/torch_compile_cache/4ef326d1aa/rank_1_0 for vLLM's torch.compile\n",
      "\u001b[1;36m(VllmWorker rank=1 pid=67685)\u001b[0;0m INFO 07-09 13:50:32 [backends.py:472] Dynamo bytecode transform time: 8.74 s\n",
      "\u001b[1;36m(VllmWorker rank=5 pid=67691)\u001b[0;0m INFO 07-09 13:50:32 [backends.py:462] Using cache directory: /home/ubuntu/.cache/vllm/torch_compile_cache/4ef326d1aa/rank_5_0 for vLLM's torch.compile\n",
      "\u001b[1;36m(VllmWorker rank=5 pid=67691)\u001b[0;0m INFO 07-09 13:50:32 [backends.py:472] Dynamo bytecode transform time: 8.76 s\n",
      "\u001b[1;36m(VllmWorker rank=3 pid=67689)\u001b[0;0m INFO 07-09 13:50:32 [backends.py:462] Using cache directory: /home/ubuntu/.cache/vllm/torch_compile_cache/4ef326d1aa/rank_3_0 for vLLM's torch.compile\n",
      "\u001b[1;36m(VllmWorker rank=3 pid=67689)\u001b[0;0m INFO 07-09 13:50:32 [backends.py:472] Dynamo bytecode transform time: 8.78 s\n",
      "\u001b[1;36m(VllmWorker rank=6 pid=67692)\u001b[0;0m INFO 07-09 13:50:32 [backends.py:462] Using cache directory: /home/ubuntu/.cache/vllm/torch_compile_cache/4ef326d1aa/rank_6_0 for vLLM's torch.compile\n",
      "\u001b[1;36m(VllmWorker rank=6 pid=67692)\u001b[0;0m INFO 07-09 13:50:32 [backends.py:472] Dynamo bytecode transform time: 8.79 s\n",
      "\u001b[1;36m(VllmWorker rank=0 pid=67683)\u001b[0;0m INFO 07-09 13:50:35 [backends.py:161] Cache the graph of shape None for later use\n",
      "\u001b[1;36m(VllmWorker rank=7 pid=67693)\u001b[0;0m INFO 07-09 13:50:35 [backends.py:161] Cache the graph of shape None for later use\n",
      "\u001b[1;36m(VllmWorker rank=6 pid=67692)\u001b[0;0m INFO 07-09 13:50:35 [backends.py:161] Cache the graph of shape None for later use\n",
      "\u001b[1;36m(VllmWorker rank=4 pid=67690)\u001b[0;0m INFO 07-09 13:50:35 [backends.py:161] Cache the graph of shape None for later use\n",
      "\u001b[1;36m(VllmWorker rank=2 pid=67688)\u001b[0;0m INFO 07-09 13:50:35 [backends.py:161] Cache the graph of shape None for later use\n",
      "\u001b[1;36m(VllmWorker rank=5 pid=67691)\u001b[0;0m INFO 07-09 13:50:35 [backends.py:161] Cache the graph of shape None for later use\n",
      "\u001b[1;36m(VllmWorker rank=3 pid=67689)\u001b[0;0m INFO 07-09 13:50:35 [backends.py:161] Cache the graph of shape None for later use\n",
      "\u001b[1;36m(VllmWorker rank=1 pid=67685)\u001b[0;0m INFO 07-09 13:50:35 [backends.py:161] Cache the graph of shape None for later use\n",
      "\u001b[1;36m(VllmWorker rank=0 pid=67683)\u001b[0;0m INFO 07-09 13:51:06 [backends.py:173] Compiling a graph for general shape takes 34.15 s\n",
      "\u001b[1;36m(VllmWorker rank=4 pid=67690)\u001b[0;0m INFO 07-09 13:51:06 [backends.py:173] Compiling a graph for general shape takes 34.27 s\n",
      "\u001b[1;36m(VllmWorker rank=6 pid=67692)\u001b[0;0m INFO 07-09 13:51:06 [backends.py:173] Compiling a graph for general shape takes 34.20 s\n",
      "\u001b[1;36m(VllmWorker rank=5 pid=67691)\u001b[0;0m INFO 07-09 13:51:07 [backends.py:173] Compiling a graph for general shape takes 34.32 s\n",
      "\u001b[1;36m(VllmWorker rank=1 pid=67685)\u001b[0;0m INFO 07-09 13:51:07 [backends.py:173] Compiling a graph for general shape takes 34.44 s\n",
      "\u001b[1;36m(VllmWorker rank=7 pid=67693)\u001b[0;0m INFO 07-09 13:51:07 [backends.py:173] Compiling a graph for general shape takes 34.62 s\n",
      "\u001b[1;36m(VllmWorker rank=2 pid=67688)\u001b[0;0m INFO 07-09 13:51:07 [backends.py:173] Compiling a graph for general shape takes 34.67 s\n",
      "\u001b[1;36m(VllmWorker rank=3 pid=67689)\u001b[0;0m INFO 07-09 13:51:07 [backends.py:173] Compiling a graph for general shape takes 34.62 s\n",
      "\u001b[1;36m(VllmWorker rank=7 pid=67693)\u001b[0;0m INFO 07-09 13:51:24 [monitor.py:34] torch.compile takes 43.34 s in total\n",
      "\u001b[1;36m(VllmWorker rank=0 pid=67683)\u001b[0;0m INFO 07-09 13:51:24 [monitor.py:34] torch.compile takes 42.89 s in total\n",
      "\u001b[1;36m(VllmWorker rank=3 pid=67689)\u001b[0;0m INFO 07-09 13:51:24 [monitor.py:34] torch.compile takes 43.40 s in total\n",
      "\u001b[1;36m(VllmWorker rank=4 pid=67690)\u001b[0;0m INFO 07-09 13:51:24 [monitor.py:34] torch.compile takes 42.93 s in total\n",
      "\u001b[1;36m(VllmWorker rank=6 pid=67692)\u001b[0;0m INFO 07-09 13:51:24 [monitor.py:34] torch.compile takes 42.99 s in total\n",
      "\u001b[1;36m(VllmWorker rank=5 pid=67691)\u001b[0;0m INFO 07-09 13:51:24 [monitor.py:34] torch.compile takes 43.08 s in total\n",
      "\u001b[1;36m(VllmWorker rank=2 pid=67688)\u001b[0;0m INFO 07-09 13:51:24 [monitor.py:34] torch.compile takes 43.38 s in total\n",
      "\u001b[1;36m(VllmWorker rank=1 pid=67685)\u001b[0;0m INFO 07-09 13:51:24 [monitor.py:34] torch.compile takes 43.18 s in total\n",
      "\u001b[1;36m(VllmWorker rank=4 pid=67690)\u001b[0;0m INFO 07-09 13:51:25 [gpu_worker.py:227] Available KV cache memory: 37.75 GiB\n",
      "\u001b[1;36m(VllmWorker rank=1 pid=67685)\u001b[0;0m INFO 07-09 13:51:25 [gpu_worker.py:227] Available KV cache memory: 37.75 GiB\n",
      "\u001b[1;36m(VllmWorker rank=0 pid=67683)\u001b[0;0m INFO 07-09 13:51:25 [gpu_worker.py:227] Available KV cache memory: 37.75 GiB\n",
      "\u001b[1;36m(VllmWorker rank=3 pid=67689)\u001b[0;0m INFO 07-09 13:51:25 [gpu_worker.py:227] Available KV cache memory: 37.75 GiB\n",
      "\u001b[1;36m(VllmWorker rank=7 pid=67693)\u001b[0;0m INFO 07-09 13:51:25 [gpu_worker.py:227] Available KV cache memory: 37.75 GiB\n",
      "\u001b[1;36m(VllmWorker rank=6 pid=67692)\u001b[0;0m INFO 07-09 13:51:25 [gpu_worker.py:227] Available KV cache memory: 37.75 GiB\n",
      "\u001b[1;36m(VllmWorker rank=2 pid=67688)\u001b[0;0m INFO 07-09 13:51:25 [gpu_worker.py:227] Available KV cache memory: 37.75 GiB\n",
      "\u001b[1;36m(VllmWorker rank=5 pid=67691)\u001b[0;0m INFO 07-09 13:51:25 [gpu_worker.py:227] Available KV cache memory: 37.75 GiB\n",
      "INFO 07-09 13:51:26 [kv_cache_utils.py:715] GPU KV cache size: 1,237,104 tokens\n",
      "INFO 07-09 13:51:26 [kv_cache_utils.py:719] Maximum concurrency for 32,768 tokens per request: 37.75x\n",
      "INFO 07-09 13:51:26 [kv_cache_utils.py:715] GPU KV cache size: 1,237,104 tokens\n",
      "INFO 07-09 13:51:26 [kv_cache_utils.py:719] Maximum concurrency for 32,768 tokens per request: 37.75x\n",
      "INFO 07-09 13:51:26 [kv_cache_utils.py:715] GPU KV cache size: 1,237,104 tokens\n",
      "INFO 07-09 13:51:26 [kv_cache_utils.py:719] Maximum concurrency for 32,768 tokens per request: 37.75x\n",
      "INFO 07-09 13:51:26 [kv_cache_utils.py:715] GPU KV cache size: 1,237,104 tokens\n",
      "INFO 07-09 13:51:26 [kv_cache_utils.py:719] Maximum concurrency for 32,768 tokens per request: 37.75x\n",
      "INFO 07-09 13:51:26 [kv_cache_utils.py:715] GPU KV cache size: 1,237,104 tokens\n",
      "INFO 07-09 13:51:26 [kv_cache_utils.py:719] Maximum concurrency for 32,768 tokens per request: 37.75x\n",
      "INFO 07-09 13:51:26 [kv_cache_utils.py:715] GPU KV cache size: 1,237,104 tokens\n",
      "INFO 07-09 13:51:26 [kv_cache_utils.py:719] Maximum concurrency for 32,768 tokens per request: 37.75x\n",
      "INFO 07-09 13:51:26 [kv_cache_utils.py:715] GPU KV cache size: 1,237,104 tokens\n",
      "INFO 07-09 13:51:26 [kv_cache_utils.py:719] Maximum concurrency for 32,768 tokens per request: 37.75x\n",
      "INFO 07-09 13:51:26 [kv_cache_utils.py:715] GPU KV cache size: 1,237,104 tokens\n",
      "INFO 07-09 13:51:26 [kv_cache_utils.py:719] Maximum concurrency for 32,768 tokens per request: 37.75x\n",
      "\u001b[1;36m(VllmWorker rank=5 pid=67691)\u001b[0;0m INFO 07-09 13:52:02 [gpu_model_runner.py:2048] Graph capturing finished in 36 secs, took 1.51 GiB\n",
      "\u001b[1;36m(VllmWorker rank=1 pid=67685)\u001b[0;0m INFO 07-09 13:52:02 [gpu_model_runner.py:2048] Graph capturing finished in 36 secs, took 1.51 GiB\n",
      "\u001b[1;36m(VllmWorker rank=6 pid=67692)\u001b[0;0m INFO 07-09 13:52:02 [gpu_model_runner.py:2048] Graph capturing finished in 36 secs, took 1.51 GiB\n",
      "\u001b[1;36m(VllmWorker rank=4 pid=67690)\u001b[0;0m INFO 07-09 13:52:02 [gpu_model_runner.py:2048] Graph capturing finished in 36 secs, took 1.51 GiB\n",
      "\u001b[1;36m(VllmWorker rank=7 pid=67693)\u001b[0;0m INFO 07-09 13:52:02 [gpu_model_runner.py:2048] Graph capturing finished in 36 secs, took 1.51 GiB\n",
      "\u001b[1;36m(VllmWorker rank=2 pid=67688)\u001b[0;0m INFO 07-09 13:52:02 [gpu_model_runner.py:2048] Graph capturing finished in 36 secs, took 1.51 GiB\n",
      "\u001b[1;36m(VllmWorker rank=0 pid=67683)\u001b[0;0m INFO 07-09 13:52:02 [gpu_model_runner.py:2048] Graph capturing finished in 36 secs, took 1.51 GiB\n",
      "\u001b[1;36m(VllmWorker rank=3 pid=67689)\u001b[0;0m INFO 07-09 13:52:02 [gpu_model_runner.py:2048] Graph capturing finished in 36 secs, took 1.51 GiB\n",
      "INFO 07-09 13:52:02 [core.py:171] init engine (profile, create kv cache, warmup model) took 98.96 seconds\n",
      "INFO 07-09 13:52:02 [loggers.py:137] Engine 000: vllm cache_config_info with initialization after num_gpu_blocks is: 77319\n",
      "Loading training tasks...\n",
      "Training on 32 tasks\n",
      "Validation on 60 tasks\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "561463945382478694966857496b5eed",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Iterating dataset:   0%|          | 0/4000 [00:00<?, ?batch/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "--- Training Step 0 (Epoch 0, Step 0) ---\n",
      "\n",
      "--- Evaluating at Step 0 ---\n",
      "Evaluating model on 60 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "88beb6a3c7694868a975e4a154b85977",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/60 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Eval task 32: reward=1.0\n",
      "Eval task 33: reward=1.0\n",
      "Eval task 34: reward=0.0\n",
      "Eval task 35: reward=1.0\n",
      "Eval task 36: reward=0.0\n",
      "Eval task 37: reward=0.0\n",
      "Eval task 38: reward=0.0\n",
      "Eval task 39: reward=0.0\n",
      "Eval task 40: reward=1.0\n",
      "Eval task 41: reward=0.0\n",
      "Eval task 42: reward=0.0\n",
      "Eval task 43: reward=1.0\n",
      "Eval task 44: reward=0.0\n",
      "Eval task 45: reward=0.0\n",
      "Eval task 46: reward=0.0\n",
      "Eval task 47: reward=1.0\n",
      "Eval task 48: reward=1.0\n",
      "Eval task 49: reward=0.0\n",
      "Eval task 50: reward=1.0\n",
      "Eval task 51: reward=1.0\n",
      "Eval task 52: reward=1.0\n",
      "Eval task 53: reward=1.0\n",
      "Eval task 54: reward=1.0\n",
      "Eval task 55: reward=1.0\n",
      "Eval task 56: reward=1.0\n",
      "Eval task 57: reward=0.0\n",
      "Eval task 58: reward=1.0\n",
      "Eval task 59: reward=0.0\n",
      "Eval task 60: reward=1.0\n",
      "Eval task 61: reward=1.0\n",
      "Eval task 62: reward=0.0\n",
      "Eval task 63: reward=0.0\n",
      "Eval task 64: reward=0.0\n",
      "Eval task 65: reward=1.0\n",
      "Eval task 66: reward=1.0\n",
      "Eval task 67: reward=0.0\n",
      "Eval task 68: reward=1.0\n",
      "Eval task 69: reward=0.0\n",
      "Eval task 70: reward=1.0\n",
      "Eval task 71: reward=0.0\n",
      "Eval task 72: reward=0.0\n",
      "Eval task 73: reward=1.0\n",
      "Eval task 74: reward=0.0\n",
      "Eval task 75: reward=1.0\n",
      "Eval task 76: reward=1.0\n",
      "Eval task 77: reward=1.0\n",
      "Eval task 78: reward=1.0\n",
      "Eval task 79: reward=0.0\n",
      "Eval task 80: reward=1.0\n",
      "Eval task 81: reward=0.0\n",
      "Eval task 82: reward=1.0\n",
      "Eval task 83: reward=0.0\n",
      "Eval task 84: reward=0.0\n",
      "Eval task 85: reward=1.0\n",
      "Eval task 86: reward=1.0\n",
      "Eval task 87: reward=0.0\n",
      "Eval task 88: reward=1.0\n",
      "Eval task 89: reward=1.0\n",
      "Eval task 90: reward=1.0\n",
      "Eval task 91: reward=1.0\n",
      "Average evaluation reward: 0.5666666666666667\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "f192272c2c4b4934ab9a847f2d85ff99",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 112 trajectories into 62 sequences of length 16384\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "7627c353148a4658a4637f5a733e05c3",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/8 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 0: Average training reward = 0.4296875\n",
      "\n",
      "--- Training Step 1 (Epoch 0, Step 1) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "b6c3653e6e5644a7b6f4c0a1f7960f7b",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 96 trajectories into 45 sequences of length 16384\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "fbc37bf9acdb43b0b89cc98bbd4df21b",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/6 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 1: Average training reward = 0.5\n",
      "\n",
      "--- Training Step 2 (Epoch 0, Step 2) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "7d118968ce104ed6b65a02ce0c47f648",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 128 trajectories into 68 sequences of length 14336\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "87e425a0fa6d4c34934609240bdea904",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/9 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 2: Average training reward = 0.703125\n",
      "\n",
      "--- Training Step 3 (Epoch 0, Step 3) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "bf05e3da0c31480a9229534fe886f12e",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 128 trajectories into 85 sequences of length 14336\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "e9832d215b064d0c899640783a52bc16",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/11 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 3: Average training reward = 0.5\n",
      "\n",
      "--- Training Step 4 (Epoch 1, Step 0) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "6365030bc59440d192995bbdb404589e",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 112 trajectories into 51 sequences of length 16384\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "b302c4027cea40cc81afef4f655fcf00",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/7 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 4: Average training reward = 0.640625\n",
      "\n",
      "--- Training Step 5 (Epoch 1, Step 1) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "df5de7c7c11f444aad3e380d3415f0fc",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 128 trajectories into 103 sequences of length 12288\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "742546822be3402cbf2e1e65c4cf1273",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/13 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 5: Average training reward = 0.59375\n",
      "\n",
      "--- Training Step 6 (Epoch 1, Step 2) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "d829f8f5784e41cb88a000364d3803ab",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 128 trajectories into 48 sequences of length 22528\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "b2959d1851c648e9be383196b4c29135",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/6 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 6: Average training reward = 0.484375\n",
      "\n",
      "--- Training Step 7 (Epoch 1, Step 3) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "191904a4905243dfa7def8f606e95f33",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "ERROR:asyncio:Unclosed client session\n",
      "client_session: <aiohttp.client.ClientSession object at 0x7f4885debfd0>\n",
      "ERROR:asyncio:Unclosed client session\n",
      "client_session: <aiohttp.client.ClientSession object at 0x7f4893046990>\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 112 trajectories into 77 sequences of length 14336\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "7f7cc42ebfc14a82bdd480101c57763e",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/10 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 7: Average training reward = 0.484375\n",
      "\n",
      "--- Training Step 8 (Epoch 2, Step 0) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "312d5861ebd248d5bf78d15d6d60df39",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 112 trajectories into 57 sequences of length 16384\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "2634058014a142bbaab0baff48e5e3f8",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/8 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 8: Average training reward = 0.3984375\n",
      "\n",
      "--- Training Step 9 (Epoch 2, Step 1) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "f203294c66a74a6d97a5e50c44e02877",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 112 trajectories into 78 sequences of length 10240\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "f4cf409f19234370992ec70a916fcc61",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/10 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 9: Average training reward = 0.5859375\n",
      "\n",
      "--- Training Step 10 (Epoch 2, Step 2) ---\n",
      "\n",
      "--- Evaluating at Step 10 ---\n",
      "Evaluating model on 60 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "d7f9000a460d46cb936fc49c2a3f451d",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/60 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Eval task 32: reward=1.0\n",
      "Eval task 33: reward=1.0\n",
      "Eval task 34: reward=0.0\n",
      "Eval task 35: reward=1.0\n",
      "Eval task 36: reward=0.0\n",
      "Eval task 37: reward=0.0\n",
      "Eval task 38: reward=0.0\n",
      "Eval task 39: reward=0.0\n",
      "Eval task 40: reward=1.0\n",
      "Eval task 41: reward=0.0\n",
      "Eval task 42: reward=0.0\n",
      "Eval task 43: reward=1.0\n",
      "Eval task 44: reward=1.0\n",
      "Eval task 45: reward=0.0\n",
      "Eval task 46: reward=0.0\n",
      "Eval task 47: reward=1.0\n",
      "Eval task 48: reward=1.0\n",
      "Eval task 49: reward=1.0\n",
      "Eval task 50: reward=1.0\n",
      "Eval task 51: reward=1.0\n",
      "Eval task 52: reward=1.0\n",
      "Eval task 53: reward=1.0\n",
      "Eval task 54: reward=0.0\n",
      "Eval task 55: reward=1.0\n",
      "Eval task 56: reward=1.0\n",
      "Eval task 57: reward=0.0\n",
      "Eval task 58: reward=0.0\n",
      "Eval task 59: reward=1.0\n",
      "Eval task 60: reward=0.0\n",
      "Eval task 61: reward=1.0\n",
      "Eval task 62: reward=1.0\n",
      "Eval task 63: reward=0.0\n",
      "Eval task 64: reward=1.0\n",
      "Eval task 65: reward=1.0\n",
      "Eval task 66: reward=1.0\n",
      "Eval task 67: reward=0.0\n",
      "Eval task 68: reward=1.0\n",
      "Eval task 69: reward=0.0\n",
      "Eval task 70: reward=1.0\n",
      "Eval task 71: reward=1.0\n",
      "Eval task 72: reward=0.0\n",
      "Eval task 73: reward=1.0\n",
      "Eval task 74: reward=0.0\n",
      "Eval task 75: reward=1.0\n",
      "Eval task 76: reward=0.0\n",
      "Eval task 77: reward=1.0\n",
      "Eval task 78: reward=1.0\n",
      "Eval task 79: reward=0.0\n",
      "Eval task 80: reward=1.0\n",
      "Eval task 81: reward=1.0\n",
      "Eval task 82: reward=0.0\n",
      "Eval task 83: reward=1.0\n",
      "Eval task 84: reward=0.0\n",
      "Eval task 85: reward=1.0\n",
      "Eval task 86: reward=1.0\n",
      "Eval task 87: reward=0.0\n",
      "Eval task 88: reward=1.0\n",
      "Eval task 89: reward=1.0\n",
      "Eval task 90: reward=1.0\n",
      "Eval task 91: reward=0.0\n",
      "Average evaluation reward: 0.6\n",
      "Deleted checkpoint /home/ubuntu/sky_workdir/.art/tau-bench/models/003/checkpoints/0009\n",
      "Deleted checkpoint /home/ubuntu/sky_workdir/.art/tau-bench/models/003/checkpoints/0002\n",
      "Deleted checkpoint /home/ubuntu/sky_workdir/.art/tau-bench/models/003/checkpoints/0008\n",
      "Deleted checkpoint /home/ubuntu/sky_workdir/.art/tau-bench/models/003/checkpoints/0006\n",
      "Deleted checkpoint /home/ubuntu/sky_workdir/.art/tau-bench/models/003/checkpoints/0004\n",
      "Deleted checkpoint /home/ubuntu/sky_workdir/.art/tau-bench/models/003/checkpoints/0005\n",
      "Deleted checkpoint /home/ubuntu/sky_workdir/.art/tau-bench/models/003/checkpoints/0003\n",
      "Deleted checkpoint /home/ubuntu/sky_workdir/.art/tau-bench/models/003/checkpoints/0001\n",
      "Deleted checkpoint /home/ubuntu/sky_workdir/.art/tau-bench/models/003/checkpoints/0007\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "f3047e6fdac04067aae3676a66c38c6a",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\u001b[1;31mGive Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new\u001b[0m\n",
      "LiteLLM.Info: If you need to debug this error, use `litellm._turn_on_debug()'.\n",
      "\n",
      "Training on 8 trajectory groups...\n",
      "Packed 128 trajectories into 100 sequences of length 12288\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "080211143f994137aea81b1204029607",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/13 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 10: Average training reward = 0.7421875\n",
      "\n",
      "--- Training Step 11 (Epoch 2, Step 3) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "7277d20c09914b098fac2b893789e54f",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 96 trajectories into 60 sequences of length 16384\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "d92fa98642b84efe94a4646b0b24bf57",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/8 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 11: Average training reward = 0.4609375\n",
      "\n",
      "--- Training Step 12 (Epoch 3, Step 0) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "5aa278429aea4d98a2791cc5aad7ddc7",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 96 trajectories into 42 sequences of length 16384\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "43a70fc69ae046cdb67413364a826957",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/6 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 12: Average training reward = 0.6015625\n",
      "\n",
      "--- Training Step 13 (Epoch 3, Step 1) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "36ae6e3d02934b8c97a829914c7bc419",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 128 trajectories into 63 sequences of length 16384\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "800207225c54420e8b155ce52db9b6ec",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/8 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 13: Average training reward = 0.578125\n",
      "\n",
      "--- Training Step 14 (Epoch 3, Step 2) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "1bc783d1975d498abaec088168d222fb",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "ERROR:asyncio:Unclosed client session\n",
      "client_session: <aiohttp.client.ClientSession object at 0x7f47dfed2490>\n",
      "ERROR:asyncio:Unclosed client session\n",
      "client_session: <aiohttp.client.ClientSession object at 0x7f44daeb41d0>\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 96 trajectories into 70 sequences of length 14336\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "7532d403c1bc4ac7a84c03e9657b0d17",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/9 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 14: Average training reward = 0.46875\n",
      "\n",
      "--- Training Step 15 (Epoch 3, Step 3) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "6be9d4388590421a9028e513f4a4b871",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 96 trajectories into 77 sequences of length 12288\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "8bd9669b5fda4e41ab25c45fa3a2262b",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/10 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 15: Average training reward = 0.484375\n",
      "\n",
      "--- Training Step 16 (Epoch 4, Step 0) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "56ab00cbe99442b38b817dbfeca035b1",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\u001b[1;31mGive Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new\u001b[0m\n",
      "LiteLLM.Info: If you need to debug this error, use `litellm._turn_on_debug()'.\n",
      "\n",
      "Training on 8 trajectory groups...\n",
      "Packed 96 trajectories into 53 sequences of length 14336\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "d6fd4575bf084ab1ac2f453803dfd2b8",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/7 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 16: Average training reward = 0.5703125\n",
      "\n",
      "--- Training Step 17 (Epoch 4, Step 1) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "64078146d6ba4a51a0bc48460dcfb1df",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 114 trajectories into 51 sequences of length 16384\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "dd13747bcced46879d6730d671766045",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/7 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 17: Average training reward = 0.5\n",
      "\n",
      "--- Training Step 18 (Epoch 4, Step 2) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "7b6bab1aca4544aa8d89b0cdb96b266b",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 128 trajectories into 102 sequences of length 12288\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "637d23c842eb44d5bee5b77dd7d026c4",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/13 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 18: Average training reward = 0.6640625\n",
      "\n",
      "--- Training Step 19 (Epoch 4, Step 3) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "836c1e70b3e34d3fb39cea9dabee0d52",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 110 trajectories into 59 sequences of length 16384\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "c2ba44393d374249a160c5a3f2e7e4a7",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/8 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 19: Average training reward = 0.328125\n",
      "\n",
      "--- Training Step 20 (Epoch 5, Step 0) ---\n",
      "\n",
      "--- Evaluating at Step 20 ---\n",
      "Evaluating model on 60 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "e1f2c0da10804224a5c481ca310a4e9b",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/60 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Eval task 32: reward=1.0\n",
      "Eval task 33: reward=0.0\n",
      "Eval task 34: reward=0.0\n",
      "Eval task 35: reward=1.0\n",
      "Eval task 36: reward=0.0\n",
      "Eval task 37: reward=0.0\n",
      "Eval task 38: reward=0.0\n",
      "Eval task 39: reward=0.0\n",
      "Eval task 40: reward=1.0\n",
      "Eval task 41: reward=0.0\n",
      "Eval task 42: reward=1.0\n",
      "Eval task 43: reward=1.0\n",
      "Eval task 44: reward=1.0\n",
      "Eval task 45: reward=1.0\n",
      "Eval task 46: reward=1.0\n",
      "Eval task 47: reward=1.0\n",
      "Eval task 48: reward=1.0\n",
      "Eval task 49: reward=1.0\n",
      "Eval task 50: reward=1.0\n",
      "Eval task 51: reward=1.0\n",
      "Eval task 52: reward=1.0\n",
      "Eval task 53: reward=0.0\n",
      "Eval task 54: reward=0.0\n",
      "Eval task 55: reward=0.0\n",
      "Eval task 56: reward=0.0\n",
      "Eval task 57: reward=0.0\n",
      "Eval task 58: reward=0.0\n",
      "Eval task 59: reward=1.0\n",
      "Eval task 60: reward=1.0\n",
      "Eval task 61: reward=1.0\n",
      "Eval task 62: reward=0.0\n",
      "Eval task 63: reward=0.0\n",
      "Eval task 64: reward=0.0\n",
      "Eval task 65: reward=1.0\n",
      "Eval task 66: reward=1.0\n",
      "Eval task 67: reward=0.0\n",
      "Eval task 68: reward=1.0\n",
      "Eval task 69: reward=0.0\n",
      "Eval task 70: reward=0.0\n",
      "Eval task 71: reward=0.0\n",
      "Eval task 72: reward=0.0\n",
      "Eval task 73: reward=1.0\n",
      "Eval task 74: reward=0.0\n",
      "Eval task 75: reward=1.0\n",
      "Eval task 76: reward=0.0\n",
      "Eval task 77: reward=1.0\n",
      "Eval task 78: reward=1.0\n",
      "Eval task 79: reward=0.0\n",
      "Eval task 80: reward=0.0\n",
      "Eval task 81: reward=0.0\n",
      "Eval task 82: reward=0.0\n",
      "Eval task 83: reward=1.0\n",
      "Eval task 84: reward=1.0\n",
      "Eval task 85: reward=0.0\n",
      "Eval task 86: reward=0.0\n",
      "Eval task 87: reward=1.0\n",
      "Eval task 88: reward=1.0\n",
      "Eval task 89: reward=1.0\n",
      "Eval task 90: reward=1.0\n",
      "Eval task 91: reward=0.0\n",
      "Average evaluation reward: 0.5\n",
      "Deleted checkpoint /home/ubuntu/sky_workdir/.art/tau-bench/models/003/checkpoints/0018\n",
      "Deleted checkpoint /home/ubuntu/sky_workdir/.art/tau-bench/models/003/checkpoints/0012\n",
      "Deleted checkpoint /home/ubuntu/sky_workdir/.art/tau-bench/models/003/checkpoints/0019\n",
      "Deleted checkpoint /home/ubuntu/sky_workdir/.art/tau-bench/models/003/checkpoints/0017\n",
      "Deleted checkpoint /home/ubuntu/sky_workdir/.art/tau-bench/models/003/checkpoints/0010\n",
      "Deleted checkpoint /home/ubuntu/sky_workdir/.art/tau-bench/models/003/checkpoints/0013\n",
      "Deleted checkpoint /home/ubuntu/sky_workdir/.art/tau-bench/models/003/checkpoints/0014\n",
      "Deleted checkpoint /home/ubuntu/sky_workdir/.art/tau-bench/models/003/checkpoints/0015\n",
      "Deleted checkpoint /home/ubuntu/sky_workdir/.art/tau-bench/models/003/checkpoints/0016\n",
      "Deleted checkpoint /home/ubuntu/sky_workdir/.art/tau-bench/models/003/checkpoints/0011\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "da9e8c73e90a4939803c5581f6f1f812",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 112 trajectories into 72 sequences of length 12288\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "226d93c79cfb4b1b960240dfdf912f45",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/9 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 20: Average training reward = 0.5390625\n",
      "\n",
      "--- Training Step 21 (Epoch 5, Step 1) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "b0eb9d0701dc4679bcbd2ba01da88510",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "ERROR:asyncio:Unclosed client session\n",
      "client_session: <aiohttp.client.ClientSession object at 0x7f489329b650>\n",
      "ERROR:asyncio:Unclosed client session\n",
      "client_session: <aiohttp.client.ClientSession object at 0x7f44daf50cd0>\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 128 trajectories into 57 sequences of length 16384\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "884d0d3edf344000b414e0efdf3e7e43",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/8 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 21: Average training reward = 0.46875\n",
      "\n",
      "--- Training Step 22 (Epoch 5, Step 2) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "a2f6af1436f944ae88ff41271671a7a9",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 128 trajectories into 90 sequences of length 14336\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "843140cb3ada40ccaba7e6bb701ac0e6",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/12 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 22: Average training reward = 0.46875\n",
      "\n",
      "--- Training Step 23 (Epoch 5, Step 3) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "fa77fdcf5e044f5b9528d5d40416db5d",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 112 trajectories into 56 sequences of length 16384\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "63b30fa7f7944f3685acf18218044f81",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/7 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 23: Average training reward = 0.484375\n",
      "\n",
      "--- Training Step 24 (Epoch 6, Step 0) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "9c41a4ed42404c51b7a369f634ffaa5d",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 128 trajectories into 86 sequences of length 14336\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "774d16189b074bfd9e419d00e36fd373",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/11 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 24: Average training reward = 0.3046875\n",
      "\n",
      "--- Training Step 25 (Epoch 6, Step 1) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "f4fb7e1228844c3d8a8124499ecd231f",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 128 trajectories into 64 sequences of length 16384\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "38304818042c4a1b8b6aa129ad114757",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/8 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 25: Average training reward = 0.3984375\n",
      "\n",
      "--- Training Step 26 (Epoch 6, Step 2) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "67e37105ddfd4feba3520e5f16a5c2d9",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 128 trajectories into 77 sequences of length 12288\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "28b6133ae3be4654969f242740d62218",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/10 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 26: Average training reward = 0.6484375\n",
      "\n",
      "--- Training Step 27 (Epoch 6, Step 3) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "400f59e27b8a447f97c98e26a6538f07",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 112 trajectories into 80 sequences of length 14336\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "8ec60d48cb264c5fa0d3298c206bc09c",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/10 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 27: Average training reward = 0.5703125\n",
      "\n",
      "--- Training Step 28 (Epoch 7, Step 0) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "01a82a7c51e64e27957b2afc6ecddc8a",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "ERROR:asyncio:Unclosed client session\n",
      "client_session: <aiohttp.client.ClientSession object at 0x7f4875860c50>\n",
      "ERROR:asyncio:Unclosed client session\n",
      "client_session: <aiohttp.client.ClientSession object at 0x7f4876637950>\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 128 trajectories into 56 sequences of length 18432\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "74a4e2cece814118987fa6731a17262d",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/7 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 28: Average training reward = 0.46875\n",
      "\n",
      "--- Training Step 29 (Epoch 7, Step 1) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "7db9d9bc3bbf46fb9c8a53248c9b096c",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\u001b[1;31mGive Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new\u001b[0m\n",
      "LiteLLM.Info: If you need to debug this error, use `litellm._turn_on_debug()'.\n",
      "\n",
      "\n",
      "\u001b[1;31mGive Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new\u001b[0m\n",
      "LiteLLM.Info: If you need to debug this error, use `litellm._turn_on_debug()'.\n",
      "\n",
      "\n",
      "\u001b[1;31mGive Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new\u001b[0m\n",
      "LiteLLM.Info: If you need to debug this error, use `litellm._turn_on_debug()'.\n",
      "\n",
      "Training on 8 trajectory groups...\n",
      "Packed 94 trajectories into 46 sequences of length 16384\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "5f4d9bf3f3e944789ed5faef92cfe2c3",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/6 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 29: Average training reward = 0.5625\n",
      "\n",
      "--- Training Step 30 (Epoch 7, Step 2) ---\n",
      "\n",
      "--- Evaluating at Step 30 ---\n",
      "Evaluating model on 60 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "6840833bec124c44b1f8186bba695f12",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/60 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Eval task 32: reward=0.0\n",
      "Eval task 33: reward=-1\n",
      "Eval task 34: reward=0.0\n",
      "Eval task 35: reward=1.0\n",
      "Eval task 36: reward=0.0\n",
      "Eval task 37: reward=0.0\n",
      "Eval task 38: reward=1.0\n",
      "Eval task 39: reward=0.0\n",
      "Eval task 40: reward=1.0\n",
      "Eval task 41: reward=0.0\n",
      "Eval task 42: reward=0.0\n",
      "Eval task 43: reward=1.0\n",
      "Eval task 44: reward=0.0\n",
      "Eval task 45: reward=1.0\n",
      "Eval task 46: reward=0.0\n",
      "Eval task 47: reward=1.0\n",
      "Eval task 48: reward=0.0\n",
      "Eval task 49: reward=1.0\n",
      "Eval task 50: reward=1.0\n",
      "Eval task 51: reward=1.0\n",
      "Eval task 52: reward=1.0\n",
      "Eval task 53: reward=1.0\n",
      "Eval task 54: reward=0.0\n",
      "Eval task 55: reward=0.0\n",
      "Eval task 56: reward=1.0\n",
      "Eval task 57: reward=0.0\n",
      "Eval task 58: reward=0.0\n",
      "Eval task 59: reward=0.0\n",
      "Eval task 60: reward=1.0\n",
      "Eval task 61: reward=1.0\n",
      "Eval task 62: reward=0.0\n",
      "Eval task 63: reward=0.0\n",
      "Eval task 64: reward=1.0\n",
      "Eval task 65: reward=1.0\n",
      "Eval task 66: reward=1.0\n",
      "Eval task 67: reward=0.0\n",
      "Eval task 68: reward=1.0\n",
      "Eval task 69: reward=1.0\n",
      "Eval task 70: reward=1.0\n",
      "Eval task 71: reward=0.0\n",
      "Eval task 72: reward=0.0\n",
      "Eval task 73: reward=1.0\n",
      "Eval task 74: reward=0.0\n",
      "Eval task 75: reward=1.0\n",
      "Eval task 76: reward=-1\n",
      "Eval task 77: reward=0.0\n",
      "Eval task 78: reward=1.0\n",
      "Eval task 79: reward=0.0\n",
      "Eval task 80: reward=1.0\n",
      "Eval task 81: reward=0.0\n",
      "Eval task 82: reward=1.0\n",
      "Eval task 83: reward=0.0\n",
      "Eval task 84: reward=1.0\n",
      "Eval task 85: reward=1.0\n",
      "Eval task 86: reward=1.0\n",
      "Eval task 87: reward=1.0\n",
      "Eval task 88: reward=0.0\n",
      "Eval task 89: reward=1.0\n",
      "Eval task 90: reward=1.0\n",
      "Eval task 91: reward=0.0\n",
      "Average evaluation reward: 0.48333333333333334\n",
      "Deleted checkpoint /home/ubuntu/sky_workdir/.art/tau-bench/models/003/checkpoints/0020\n",
      "Deleted checkpoint /home/ubuntu/sky_workdir/.art/tau-bench/models/003/checkpoints/0023\n",
      "Deleted checkpoint /home/ubuntu/sky_workdir/.art/tau-bench/models/003/checkpoints/0024\n",
      "Deleted checkpoint /home/ubuntu/sky_workdir/.art/tau-bench/models/003/checkpoints/0029\n",
      "Deleted checkpoint /home/ubuntu/sky_workdir/.art/tau-bench/models/003/checkpoints/0025\n",
      "Deleted checkpoint /home/ubuntu/sky_workdir/.art/tau-bench/models/003/checkpoints/0022\n",
      "Deleted checkpoint /home/ubuntu/sky_workdir/.art/tau-bench/models/003/checkpoints/0026\n",
      "Deleted checkpoint /home/ubuntu/sky_workdir/.art/tau-bench/models/003/checkpoints/0028\n",
      "Deleted checkpoint /home/ubuntu/sky_workdir/.art/tau-bench/models/003/checkpoints/0027\n",
      "Deleted checkpoint /home/ubuntu/sky_workdir/.art/tau-bench/models/003/checkpoints/0021\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "179992b05bfc4ec6bd26fd4b273ae1fa",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 128 trajectories into 88 sequences of length 12288\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "6b736d97a6b748dbb5e1a1d71871928b",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/11 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 30: Average training reward = 0.6875\n",
      "\n",
      "--- Training Step 31 (Epoch 7, Step 3) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "6db061e786a54ba0aa03f34370bba457",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 96 trajectories into 71 sequences of length 12288\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "e6c3ad41a36b49be8d596969c0786662",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/9 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 31: Average training reward = 0.3984375\n",
      "\n",
      "--- Training Step 32 (Epoch 8, Step 0) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "675f7a0c2bda4c4f9c0a25a192fcb27b",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 128 trajectories into 62 sequences of length 16384\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "09cda97f03a346f1baf2e9be3abb9fbe",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/8 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 32: Average training reward = 0.5625\n",
      "\n",
      "--- Training Step 33 (Epoch 8, Step 1) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "69a83c75331542d0ab89be3648fc7ac0",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training on 8 trajectory groups...\n",
      "Packed 96 trajectories into 57 sequences of length 12288\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "0247ecd6a1d8450792272f3e34fb2940",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "train:   0%|          | 0/8 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 33: Average training reward = 0.5625\n",
      "\n",
      "--- Training Step 34 (Epoch 8, Step 2) ---\n",
      "Generating trajectories for 8 tasks...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "c3cc440e04d44172b0ad5d70a7b12900",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "gather:   0%|          | 0/128 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "import art\n",
    "from dotenv import load_dotenv\n",
    "from tau_bench.types import TauBenchPolicyConfig, TauBenchTrainingConfig\n",
    "from run_rl import train\n",
    "from run import RunConfig\n",
    "import torch\n",
    "\n",
    "load_dotenv()\n",
    "\n",
    "MODEL_NAME = \"003\"\n",
    "model = art.TrainableModel(\n",
    "    name=MODEL_NAME,\n",
    "    project=\"tau-bench\",\n",
    "    base_model=\"Qwen/Qwen2.5-32B-Instruct\",\n",
    "    config=TauBenchPolicyConfig(\n",
    "        training_config=TauBenchTrainingConfig(\n",
    "            trajectories_per_group=16,\n",
    "            groups_per_step=8,\n",
    "            learning_rate=2e-6,\n",
    "            eval_steps=10,\n",
    "            val_set_size=60,\n",
    "            training_dataset_size=32,\n",
    "            num_epochs=1000,\n",
    "            train_mode=\"sync_rl\",\n",
    "        ),\n",
    "        run_config=RunConfig(\n",
    "            model_provider=\"hosted_vllm\",\n",
    "            user_model_provider=\"openai\",\n",
    "            model=MODEL_NAME,\n",
    "            user_model=\"gpt-4o\",\n",
    "            agent_strategy=\"tool-calling-rl\",\n",
    "            temperature=1.0,\n",
    "            task_split=\"test\",\n",
    "            log_dir=\"rl_results\",\n",
    "            skip_eval=False,\n",
    "        ),\n",
    "    ),\n",
    "    _internal_config=art.dev.InternalModelConfig(\n",
    "        engine_args=art.dev.EngineArgs(\n",
    "            tensor_parallel_size=torch.cuda.device_count(), gpu_memory_utilization=0.65\n",
    "        ),\n",
    "        torchtune_args=art.dev.TorchtuneArgs(\n",
    "            model=\"qwen2_5_32b_instruct\", model_type=\"QWEN2\", async_weight_syncing=True\n",
    "        ),\n",
    "    ),\n",
    ")\n",
    "await train(model)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": ".venv",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
