Teddy-Project commited on
Commit
badbc70
·
verified ·
1 Parent(s): fd97d86

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -18
app.py CHANGED
@@ -1,28 +1,26 @@
1
- import gradio as gr
2
- import torch
3
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
 
 
4
 
5
- model_id = "HuggingFaceH4/zephyr-7b-beta"
6
 
7
  tokenizer = AutoTokenizer.from_pretrained(model_id)
8
  model = AutoModelForCausalLM.from_pretrained(
9
  model_id,
10
- torch_dtype=torch.bfloat16,
11
- device_map="auto"
12
  )
13
 
14
- pipe = pipeline(
15
- "text-generation",
16
- model=model,
17
- tokenizer=tokenizer,
18
- max_new_tokens=200,
19
- do_sample=True,
20
- temperature=0.7,
21
- top_p=0.9,
22
- )
23
 
24
- def responder(prompt):
25
- respuesta = pipe(prompt)[0]["generated_text"]
26
- return respuesta.split(prompt)[-1].strip()
 
 
 
27
 
28
- gr.Interface(fn=responder, inputs="text", outputs="text", title="Bot de Texto").launch()
 
 
 
1
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
2
+ import torch
3
+ import gradio as gr
4
 
5
+ model_id = "NousResearch/zephyr-1.3b"
6
 
7
  tokenizer = AutoTokenizer.from_pretrained(model_id)
8
  model = AutoModelForCausalLM.from_pretrained(
9
  model_id,
10
+ torch_dtype=torch.float32
 
11
  )
12
 
13
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
14
+
15
+ def generate_text(prompt):
16
+ output = pipe(prompt, max_new_tokens=200, do_sample=True, temperature=0.7)
17
+ return output[0]["generated_text"]
 
 
 
 
18
 
19
+ demo = gr.Interface(
20
+ fn=generate_text,
21
+ inputs=gr.Textbox(lines=4, placeholder="Escribe algo..."),
22
+ outputs="text",
23
+ title="Generador de texto - Zephyr 1.3B (CPU compatible)"
24
+ )
25
 
26
+ demo.launch()