Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
+
compute_dtype = torch.bfloat16
|
4 |
+
device = 'cuda'
|
5 |
+
model_id = "mobiuslabsgmbh/DeepSeek-R1-ReDistill-Qwen-1.5B-v1.0"
|
6 |
+
|
7 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=compute_dtype, attn_implementation="sdpa", device_map=device)
|
8 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
9 |
+
|
10 |
+
chat = tokenizer.apply_chat_template([{"role":"user", "content":"What is 1.5+102.2?"}], tokenize=True, add_generation_prompt=True, return_tensors="pt")
|
11 |
+
outputs = model.generate(chat.to(device), max_new_tokens=1024, do_sample=True)
|
12 |
+
print(tokenizer.decode(outputs[0]))
|