Spaces:
Running
Running
Add TRANSFORMERS_CACHE mock to fix TRL/llm_blender crash
Browse files
ultimate_sota_training.py
CHANGED
|
@@ -95,6 +95,14 @@ bootstrap_deps()
|
|
| 95 |
import httpx
|
| 96 |
import torch
|
| 97 |
from datasets import Dataset
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 98 |
from trl import GRPOConfig, GRPOTrainer
|
| 99 |
from unsloth import FastLanguageModel
|
| 100 |
|
|
|
|
| 95 |
import httpx
|
| 96 |
import torch
|
| 97 |
from datasets import Dataset
|
| 98 |
+
|
| 99 |
+
# CRITICAL FIX for llm_blender crash:
|
| 100 |
+
# llm_blender unconditionally tries to import TRANSFORMERS_CACHE which was removed from transformers 4.40+.
|
| 101 |
+
# Since we don't even use llm_blender, we just mock it here so TRL doesn't crash on import.
|
| 102 |
+
import transformers.utils.hub
|
| 103 |
+
if not hasattr(transformers.utils.hub, "TRANSFORMERS_CACHE"):
|
| 104 |
+
transformers.utils.hub.TRANSFORMERS_CACHE = "/tmp"
|
| 105 |
+
|
| 106 |
from trl import GRPOConfig, GRPOTrainer
|
| 107 |
from unsloth import FastLanguageModel
|
| 108 |
|