Guxtavv commited on
Commit
dc8c02d
1 Parent(s): f81e1bb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
 
4
  model_name = "allenai/OLMo-7B"
5
  tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
6
  model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
@@ -10,5 +11,6 @@ def generate_text(prompt):
10
  outputs = model.generate(**inputs)
11
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
12
 
13
- iface = gr.Interface(fn=generate_text, inputs="text", outputs="text", title="OLMo-7B Text Generator", description="Enter a prompt to generate text using the OLMo-7B model.")
 
14
  iface.launch()
 
1
  import gradio as gr
2
+ from hf_olmo import AutoModelForCausalLM, AutoTokenizer # Importando do hf_olmo
3
 
4
+ # Carregar o modelo e tokenizer
5
  model_name = "allenai/OLMo-7B"
6
  tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
7
  model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
 
11
  outputs = model.generate(**inputs)
12
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
13
 
14
+ iface = gr.Interface(fn=generate_text, inputs="text", outputs="text",
15
+ title="OLMo-7B Text Generator", description="Enter a prompt to generate text.")
16
  iface.launch()