llm_lab / app.py
jolucas's picture
Update app.py
225b2c4
import gradio as gr
import requests
import os
import json
API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom"
def translate(user, seed = 42):
prompt = f"Instruction: Details: {os.environ['secret_thing']}. Given the following English input sentence translate it into a Spanish sentence. \ninput: {user}"
data = {
"inputs": prompt,
"parameters": {
"top_p": 0.9,
"temperature": 0.1,
"max_new_tokens": 250,
"return_full_text": False,
"do_sample": False,
"seed": seed,
"early_stopping": False,
"length_penalty": 0.0,
"eos_token_id": None,
},
"options": {
"use_cache": False,
"wait_for_model": True,
},
}
response = requests.request("POST", API_URL, json=data)
output = json.loads(response.content.decode("utf-8"))
output_tmp = output[0]['generated_text']
answer = output_tmp.splitlines()
try:
return list(filter(lambda x: "output" in x, answer))[0]
except IndexError:
return ["no entiendo"]
demo = gr.Blocks()
with demo:
input_prompt = gr.Textbox(label="Enter the sentence : ",
value=f"",
lines=6)
generated_txt = gr.Textbox(lines=3)
b1 = gr.Button("translate")
b1.click(translate,inputs=[input_prompt], outputs=generated_txt)
demo.launch(enable_queue=True, debug=False)