Spaces:
Sleeping
Sleeping
Upload 4 files
Browse files- README (1).md +12 -0
- app.py +86 -0
- gitattributes +35 -0
- requirements (1).txt +4 -0
README (1).md
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Chaterapia Prueba
|
3 |
+
emoji: 😻
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: indigo
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 4.25.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
---
|
11 |
+
|
12 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
2 |
+
from peft import PeftModel
|
3 |
+
import gradio as gr
|
4 |
+
import os
|
5 |
+
import torch
|
6 |
+
|
7 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
8 |
+
|
9 |
+
# Asegúrate de que tu token de Hugging Face está cargado como una variable de entorno
|
10 |
+
hf_token = os.environ.get("token")
|
11 |
+
if hf_token is not None:
|
12 |
+
from huggingface_hub import HfFolder
|
13 |
+
HfFolder.save_token(hf_token)
|
14 |
+
|
15 |
+
# Configuración inicial
|
16 |
+
tokenizer = AutoTokenizer.from_pretrained("somosnlp/chaterapia_model")
|
17 |
+
model_base = AutoModelForCausalLM.from_pretrained("google/gemma-2b-it").to(device)
|
18 |
+
model_base.resize_token_embeddings(len(tokenizer))
|
19 |
+
model_with_adapter = PeftModel.from_pretrained(model_base, "somosnlp/chaterapia_model").to(device)
|
20 |
+
|
21 |
+
CHAT_TEMPLATE= """{% for message in messages %}
|
22 |
+
{% if message['role'] == 'user' %}
|
23 |
+
{{'<user> ' + message['content'].strip() + ' </user>' }}
|
24 |
+
{% elif message['role'] == 'system' %}
|
25 |
+
{{'<system>\\n' + message['content'].strip() + '\\n</system>\\n\\n' }}
|
26 |
+
{% elif message['role'] == 'assistant' %}
|
27 |
+
{{ message['content'].strip() + ' </assistant>' + eos_token }}
|
28 |
+
{% elif message['role'] == 'input' %}
|
29 |
+
{{'<input> ' + message['content'] + ' </input>' }}
|
30 |
+
{% endif %}
|
31 |
+
{% endfor %}""" # Asegúrate de usar tu CHAT_TEMPLATE aquí
|
32 |
+
tokenizer.chat_template = CHAT_TEMPLATE
|
33 |
+
|
34 |
+
chat_history = [] # Historial de chat global
|
35 |
+
chatbot_text = []
|
36 |
+
def generate_response(user_input):
|
37 |
+
global chat_history
|
38 |
+
# Agregar input del usuario al historial
|
39 |
+
chat_history.append({"content": user_input, "role": "user"})
|
40 |
+
|
41 |
+
# Preparación del input para el modelo
|
42 |
+
user_input = tokenizer.apply_chat_template(chat_history, tokenize=False)
|
43 |
+
input_tokens = tokenizer(user_input, return_tensors='pt', padding=True, truncation=True, max_length=1024).to(device)
|
44 |
+
|
45 |
+
# Generación de la respuesta del modelo
|
46 |
+
output_tokens = model_with_adapter.generate(**input_tokens, max_length=1024, pad_token_id=tokenizer.eos_token_id, top_k=50, do_sample=True, top_p=0.95, temperature=0.7)
|
47 |
+
generated_text = tokenizer.decode(output_tokens[0], skip_special_tokens=True)
|
48 |
+
|
49 |
+
# Extracción de la respuesta generada
|
50 |
+
last_us = generated_text.rfind("</user>") + len("</user>")
|
51 |
+
last_as = generated_text.rfind("</assistant>")
|
52 |
+
generated_text = generated_text[last_us:last_as].strip()
|
53 |
+
|
54 |
+
# Agregar la respuesta del bot al historial
|
55 |
+
chat_history.append({"content": generated_text, "role": "assistant"})
|
56 |
+
return generated_text
|
57 |
+
|
58 |
+
def respond(message):
|
59 |
+
global chatbot_text
|
60 |
+
if message: # Verificar si el mensaje no está vacío
|
61 |
+
bot_response = generate_response(message)
|
62 |
+
chatbot_text.append((message, bot_response))
|
63 |
+
return chatbot_text
|
64 |
+
return [("", "")]
|
65 |
+
|
66 |
+
|
67 |
+
def clear_chat_and_history():
|
68 |
+
global chat_history
|
69 |
+
global chatbot_text
|
70 |
+
chat_history.clear()# Vaciar el historial de chat
|
71 |
+
chatbot_text.clear()
|
72 |
+
return "", []
|
73 |
+
|
74 |
+
with gr.Blocks() as demo:
|
75 |
+
chatbot = gr.Chatbot()
|
76 |
+
# Usar un Button regular en lugar de ClearButton para tener control sobre la función que se ejecuta
|
77 |
+
with gr.Row():
|
78 |
+
msg = gr.Textbox(label="Tu mensaje", placeholder="Escribe aquí...", lines=1)
|
79 |
+
send_btn = gr.Button("Enviar")
|
80 |
+
clear_btn = gr.Button("Limpiar Chat")
|
81 |
+
|
82 |
+
# Acción al presionar el botón Enviar
|
83 |
+
send_btn.click(fn=respond, inputs=msg, outputs=chatbot)
|
84 |
+
clear_btn.click(fn=clear_chat_and_history, inputs=None, outputs=[msg, chatbot])
|
85 |
+
|
86 |
+
demo.launch()
|
gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
requirements (1).txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
transformers==4.38.2
|
2 |
+
torch
|
3 |
+
gradio
|
4 |
+
peft==0.10.0
|