Teddy-Project commited on
Commit
fd97d86
·
verified ·
1 Parent(s): a254524

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -17
app.py CHANGED
@@ -1,23 +1,28 @@
1
  import gradio as gr
2
  import torch
3
- from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
4
- from diffusers import StableDiffusionPipeline
5
- from huggingface_hub import login
6
- import os
7
- from PIL import Image
8
- import io
9
 
10
- login(token=os.environ["HF_TOKEN"])
11
 
12
- model_id = "mistralai/Mistral-7B-Instruct-v0.1" # Requiere acceso
13
- tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=True)
14
- model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.float16, use_auth_token=True)
15
- pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
 
 
16
 
17
- def chat(user_input):
18
- prompt = f"""<s>[INST] {user_input.strip()} [/INST]"""
19
- output = pipe(prompt, max_new_tokens=200, temperature=0.7, do_sample=True)[0]["generated_text"]
20
- response = output.split("[/INST]")[-1].strip()
21
- return response
 
 
 
 
22
 
23
- gr.Interface(fn=chat, inputs="text", outputs="text", title="MyBot - Texto").launch()
 
 
 
 
 
1
  import gradio as gr
2
  import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
 
 
 
 
 
4
 
5
+ model_id = "HuggingFaceH4/zephyr-7b-beta"
6
 
7
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
8
+ model = AutoModelForCausalLM.from_pretrained(
9
+ model_id,
10
+ torch_dtype=torch.bfloat16,
11
+ device_map="auto"
12
+ )
13
 
14
+ pipe = pipeline(
15
+ "text-generation",
16
+ model=model,
17
+ tokenizer=tokenizer,
18
+ max_new_tokens=200,
19
+ do_sample=True,
20
+ temperature=0.7,
21
+ top_p=0.9,
22
+ )
23
 
24
+ def responder(prompt):
25
+ respuesta = pipe(prompt)[0]["generated_text"]
26
+ return respuesta.split(prompt)[-1].strip()
27
+
28
+ gr.Interface(fn=responder, inputs="text", outputs="text", title="Bot de Texto").launch()