Spaces:
Runtime error
Runtime error
""" | |
Pre-download the model to ensure it's available during app startup. | |
This script should be run before launching the app. | |
""" | |
import os | |
import torch | |
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor | |
# Set cache directories | |
os.environ["TRANSFORMERS_CACHE"] = "/tmp/transformers_cache" | |
os.environ["HF_HOME"] = "/tmp/hf_home" | |
os.environ["HUGGINGFACE_HUB_CACHE"] = "/tmp/hf_hub_cache" | |
# Create cache directories | |
for cache_dir in ["/tmp/transformers_cache", "/tmp/hf_home", "/tmp/hf_hub_cache"]: | |
os.makedirs(cache_dir, exist_ok=True) | |
# Model configuration | |
MODEL_NAME = "openai/whisper-large-v3-turbo" | |
device = "cuda:0" if torch.cuda.is_available() else "cpu" | |
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 | |
print("Pre-downloading model to cache...") | |
print(f"Using device: {device}") | |
print(f"Using dtype: {torch_dtype}") | |
try: | |
# Download model | |
model = AutoModelForSpeechSeq2Seq.from_pretrained( | |
MODEL_NAME, | |
torch_dtype=torch_dtype, | |
low_cpu_mem_usage=True, | |
use_safetensors=True, | |
cache_dir="/tmp/transformers_cache", | |
) | |
print("Model downloaded successfully!") | |
# Download processor | |
processor = AutoProcessor.from_pretrained( | |
MODEL_NAME, | |
cache_dir="/tmp/transformers_cache", | |
) | |
print("Processor downloaded successfully!") | |
print("Pre-download complete! Model is ready for use.") | |
except Exception as e: | |
print(f"Error during pre-download: {e}") | |
raise |