DeepMount00
commited on
Commit
•
284719e
1
Parent(s):
c9091ac
Update README.md
Browse files
README.md
CHANGED
@@ -35,29 +35,20 @@ import transformers
|
|
35 |
from transformers import TextStreamer
|
36 |
import torch
|
37 |
|
38 |
-
|
39 |
|
40 |
-
|
41 |
-
|
42 |
|
43 |
-
def
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
inputs = tokenizer([prompt], return_tensors="pt").to(runtimeFlag)
|
49 |
-
streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
50 |
-
_ = model.generate(**inputs, streamer=streamer, max_new_tokens=300, temperature=0.0001,
|
51 |
-
repetition_penalty=1.2, eos_token_id=2, do_sample=True, num_return_sequences=1)
|
52 |
|
53 |
-
|
54 |
-
|
55 |
-
[-5, 10, 15, 20, 25, 30, 35]
|
56 |
-
"""
|
57 |
|
58 |
-
prompt = domanda + "\n" + contesto
|
59 |
-
|
60 |
-
stream(prompt)
|
61 |
```
|
62 |
---
|
63 |
## Developer
|
|
|
35 |
from transformers import TextStreamer
|
36 |
import torch
|
37 |
|
38 |
+
MODEL_NAME = "DeepMount00/Mistral-Ita-7b"
|
39 |
|
40 |
+
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.bfloat16).eval()
|
41 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
42 |
|
43 |
+
def generate_answer(prompt):
|
44 |
+
encoded_input = tokenizer.apply_chat_template([{"role": "user", "content": prompt}], return_tensors="pt").to(device)
|
45 |
+
generated_ids = model.generate(**encoded_input, max_new_tokens=200, do_sample=True, temperature=0.001, eos_token_id=tokenizer.eos_token_id)
|
46 |
+
answer = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
|
47 |
+
return answer
|
|
|
|
|
|
|
|
|
48 |
|
49 |
+
prompt = "Se un mattone pesa 1kg più metà di se stesso, quanto pesa il mattone? Rispondi impostando l'equazione matematica"
|
50 |
+
print(generate_answer(prompt))
|
|
|
|
|
51 |
|
|
|
|
|
|
|
52 |
```
|
53 |
---
|
54 |
## Developer
|