Spaces:
Paused
Paused
File size: 1,311 Bytes
a090993 0d26f03 0d31d60 0d26f03 a090993 0d26f03 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
import gradio as gr
import requests
# Define the Hugging Face API endpoint and your API token
API_URL = "https://z94ka3s1dsuof4va.us-east-1.aws.endpoints.huggingface.cloud"
API_TOKEN = "hf\_XgrSWzAWKtqKXgSFLZMZsQeSSjCcMbqUIt" # Replace with your actual API token
# Function to query the Hugging Face model
def query_huggingface_model(input_text):
headers = {"Authorization": f"Bearer {API_TOKEN}"}
payload = {"inputs": input_text}
response = requests.post(API_URL, headers=headers, json=payload)
if response.status_code == 200:
return response.json()
else:
return {"error": f"Request failed with status code {response.status_code}"}
# Define a function to process the input and return the model's output
def generate_response(input_text):
response = query_huggingface_model(input_text)
if "error" in response:
return response["error"]
else:
return response[0]['generated_text']
# Create a Gradio interface
iface = gr.Interface(
fn=generate_response,
inputs=gr.Textbox(lines=2, placeholder="Enter your text here..."),
outputs="text",
title="LLaMA-2-7B Guanaco Dolly Mini Model",
description="Generate responses using the LLaMA-2-7B Guanaco Dolly Mini model from Hugging Face."
)
# Launch the interface
iface.launch()
|