Update logic/generator.py
Browse files- logic/generator.py +16 -12
logic/generator.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
2 |
import torch
|
3 |
|
@@ -10,15 +11,18 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
10 |
)
|
11 |
|
12 |
def generate_code(prompt):
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
|
|
|
|
|
|
|
1 |
+
# logic/generator.py
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
import torch
|
4 |
|
|
|
11 |
)
|
12 |
|
13 |
def generate_code(prompt):
|
14 |
+
try:
|
15 |
+
formatted_prompt = f"# Escreva um c贸digo Python que fa莽a o seguinte:\n# {prompt}\n"
|
16 |
+
inputs = tokenizer(formatted_prompt, return_tensors="pt").to(model.device)
|
17 |
+
outputs = model.generate(
|
18 |
+
**inputs,
|
19 |
+
max_new_tokens=256,
|
20 |
+
do_sample=True,
|
21 |
+
temperature=0.3,
|
22 |
+
top_k=50,
|
23 |
+
top_p=0.95
|
24 |
+
)
|
25 |
+
result = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
26 |
+
return result.strip()
|
27 |
+
except Exception as e:
|
28 |
+
return f"Erro ao gerar c贸digo: {str(e)}"
|