Upload serve_ministral_fixed.py with huggingface_hub
Browse files- serve_ministral_fixed.py +6 -1
serve_ministral_fixed.py
CHANGED
|
@@ -140,10 +140,15 @@ async def load_model():
|
|
| 140 |
print("Setting Mistral chat template...")
|
| 141 |
processor.tokenizer.chat_template = MISTRAL_CHAT_TEMPLATE
|
| 142 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 143 |
model = AutoModelForImageTextToText.from_pretrained(
|
| 144 |
model_id,
|
| 145 |
torch_dtype=torch.bfloat16,
|
| 146 |
-
device_map=
|
| 147 |
trust_remote_code=True,
|
| 148 |
)
|
| 149 |
model.eval()
|
|
|
|
| 140 |
print("Setting Mistral chat template...")
|
| 141 |
processor.tokenizer.chat_template = MISTRAL_CHAT_TEMPLATE
|
| 142 |
|
| 143 |
+
# Explicitly use CUDA
|
| 144 |
+
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
| 145 |
+
print(f"Using device: {device}")
|
| 146 |
+
print(f"CUDA available: {torch.cuda.is_available()}")
|
| 147 |
+
|
| 148 |
model = AutoModelForImageTextToText.from_pretrained(
|
| 149 |
model_id,
|
| 150 |
torch_dtype=torch.bfloat16,
|
| 151 |
+
device_map=device,
|
| 152 |
trust_remote_code=True,
|
| 153 |
)
|
| 154 |
model.eval()
|