rovi27 commited on
Commit
f630ef4
1 Parent(s): b933c54

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -13,7 +13,7 @@ sft_model = "somosnlp/ComeBien_gemma-2b-it-bnb-4bit"
13
  #base_model_name = "unsloth/gemma-2b-bnb-4bit"
14
  base_model_name = "unsloth/gemma-2b-it-bnb-4bit"
15
 
16
- max_seq_length=1000
17
  base_model = AutoModelForCausalLM.from_pretrained(base_model_name,return_dict=True,device_map="auto", torch_dtype=torch.float16,)
18
  tokenizer = AutoTokenizer.from_pretrained(base_model_name, max_length = max_seq_length)
19
  ft_model = PeftModel.from_pretrained(base_model, sft_model)
@@ -82,7 +82,7 @@ mis_ejemplos = [
82
 
83
  iface = gr.Interface(
84
  fn=mostrar_respuesta,
85
- inputs=[gr.Textbox(label="Pregunta"), gr.Textbox(label="Contexto", value="Eres un experto cocinero hispanoamericano."),],
86
  outputs=[gr.Textbox(label="Respuesta", lines=2),],
87
  title="ComeBien",
88
  description="Introduce tu pregunta sobre recetas de cocina.",
 
13
  #base_model_name = "unsloth/gemma-2b-bnb-4bit"
14
  base_model_name = "unsloth/gemma-2b-it-bnb-4bit"
15
 
16
+ max_seq_length=400
17
  base_model = AutoModelForCausalLM.from_pretrained(base_model_name,return_dict=True,device_map="auto", torch_dtype=torch.float16,)
18
  tokenizer = AutoTokenizer.from_pretrained(base_model_name, max_length = max_seq_length)
19
  ft_model = PeftModel.from_pretrained(base_model, sft_model)
 
82
 
83
  iface = gr.Interface(
84
  fn=mostrar_respuesta,
85
+ inputs=[gr.Textbox(label="Pregunta"), gr.Textbox(label="Contexto", value="You are a helpful AI assistant. Eres un experto cocinero hispanoamericano."),],
86
  outputs=[gr.Textbox(label="Respuesta", lines=2),],
87
  title="ComeBien",
88
  description="Introduce tu pregunta sobre recetas de cocina.",