|
import gradio as gr |
|
from transformers import pipeline |
|
|
|
|
|
contexto_explicito = "Esta es una conversación informal en la que es aceptable el uso de lenguaje explícito si el tema lo amerita." |
|
contexto_formal = "Esta es una conversación formal y no se debe usar lenguaje explícito." |
|
|
|
|
|
generator = pipeline('text-generation', model='PlanTL-GOB-ES/gpt2-large-bne') |
|
|
|
|
|
def chatbot_response(input_text): |
|
print(f"Entrada recibida: {input_text}") |
|
|
|
try: |
|
|
|
if any(palabra in input_text.lower() for palabra in ["sexo", "relaciones", "adultos"]): |
|
prompt = f"{contexto_explicito} {input_text}" |
|
else: |
|
prompt = f"{contexto_formal} {input_text}" |
|
|
|
|
|
outputs = generator(prompt, |
|
max_length=50, |
|
num_return_sequences=1, |
|
do_sample=True, |
|
top_k=50, |
|
top_p=0.95, |
|
temperature=0.7) |
|
|
|
response = outputs[0]['generated_text'] |
|
print(f"Respuesta generada: {response}") |
|
return response |
|
|
|
except Exception as e: |
|
print(f"Error generado: {str(e)}") |
|
return f"Error: {str(e)}" |
|
|
|
|
|
iface = gr.Interface(fn=chatbot_response, inputs="text", outputs="text", title="Chatbot Contextual") |
|
|
|
|
|
iface.launch() |