Spaces:
Running
Running
Mock vllm to bypass TRL missing module error
Browse files
ultimate_sota_training.py
CHANGED
|
@@ -104,6 +104,13 @@ import transformers.utils.hub
|
|
| 104 |
if not hasattr(transformers.utils.hub, "TRANSFORMERS_CACHE"):
|
| 105 |
transformers.utils.hub.TRANSFORMERS_CACHE = "/tmp"
|
| 106 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
from trl import GRPOConfig, GRPOTrainer
|
| 108 |
from unsloth import FastLanguageModel
|
| 109 |
|
|
|
|
| 104 |
if not hasattr(transformers.utils.hub, "TRANSFORMERS_CACHE"):
|
| 105 |
transformers.utils.hub.TRANSFORMERS_CACHE = "/tmp"
|
| 106 |
|
| 107 |
+
# CRITICAL FIX for vllm crash:
|
| 108 |
+
# TRL's latest versions unconditionally import vllm in their extras module.
|
| 109 |
+
# Since Unsloth uses its own fast inference backend, we mock vllm to prevent the crash.
|
| 110 |
+
import sys
|
| 111 |
+
from unittest.mock import MagicMock
|
| 112 |
+
sys.modules['vllm'] = MagicMock()
|
| 113 |
+
|
| 114 |
from trl import GRPOConfig, GRPOTrainer
|
| 115 |
from unsloth import FastLanguageModel
|
| 116 |
|