cryptocalypse commited on
Commit
f0edf49
1 Parent(s): 16c8c95

Update gen.py

Browse files
Files changed (1) hide show
  1. gen.py +5 -5
gen.py CHANGED
@@ -3,10 +3,10 @@ import sys
3
  import sys
4
  from transformers import AutoModelForCausalLM, AutoTokenizer
5
 
6
- tokenizer = AutoTokenizer.from_pretrained('google/gemma-2-2b-it')
7
  model = AutoModelForCausalLM.from_pretrained(
8
- 'google/gemma-2-2b-it',
9
- device_map="auto"
10
  )
11
 
12
 
@@ -153,7 +153,7 @@ prompt = (
153
 
154
  def generate(event):
155
  # Generar el texto usando el modelo
156
- prompt_msg = [{'role': 'user', 'content': prompt+"\n\n"+event}]
157
  inputs = tokenizer.apply_chat_template(
158
  prompt_msg,
159
  add_generation_prompt=False,
@@ -169,4 +169,4 @@ def generate(event):
169
 
170
 
171
  # Imprimir la salida generada
172
- return tokenizer.decode(tokens[0], skip_special_tokens=True)
 
3
  import sys
4
  from transformers import AutoModelForCausalLM, AutoTokenizer
5
 
6
+ tokenizer = AutoTokenizer.from_pretrained('stabilityai/stablelm-2-zephyr-1_6b')
7
  model = AutoModelForCausalLM.from_pretrained(
8
+ 'stabilityai/stablelm-2-zephyr-1_6b',
9
+ device_map="auto",
10
  )
11
 
12
 
 
153
 
154
  def generate(event):
155
  # Generar el texto usando el modelo
156
+ prompt_msg = [{"role":"system","content":prompt},{'role': 'user', 'content': event}]
157
  inputs = tokenizer.apply_chat_template(
158
  prompt_msg,
159
  add_generation_prompt=False,
 
169
 
170
 
171
  # Imprimir la salida generada
172
+ return "{".join(tokenizer.decode(tokens[0], skip_special_tokens=True).split("<|user|>")[1].split("{")[1:-1])