Spaces:
Running
Running
File size: 1,817 Bytes
a010433 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
import streamlit as st
import requests
import os
# Define the endpoint and API key
api_url = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct"
api_key = os.getenv('HFSecret')
headers = {
"Authorization": f"Bearer {api_key}"
}
# API call function
def call_huggingface_api(prompt):
data = {"inputs": prompt, "parameters": {"max_length": 500, "temperature": 0.5}}
response = requests.post(api_url, headers=headers, json=data)
if response.status_code != 200:
st.error(f"Error: {response.status_code} - {response.text}")
return None
return response.json()
# Streamlit layout
st.title("Sentiment Analysis, Summarization, and Keyword Extraction")
text_input = st.text_area("Enter text for analysis")
if st.button("Analyze"):
if text_input:
# Sentiment Analysis
sentiment_prompt = f"Perform sentiment analysis on the following text: {text_input}"
sentiment_result = call_huggingface_api(sentiment_prompt)
if sentiment_result:
st.write("Sentiment Analysis Result:", sentiment_result[0]['generated_text'])
# Summarization
summarization_prompt = f"Summarize the following text: {text_input}"
summarization_result = call_huggingface_api(summarization_prompt)
if summarization_result:
st.write("Summarization Result:", summarization_result[0]['generated_text'])
# Keyword Extraction (Using LLM, not RAKE)
keyword_prompt = f"Extract important keywords from the following text: {text_input}"
keyword_result = call_huggingface_api(keyword_prompt)
if keyword_result:
st.write("Keyword Extraction Result:", keyword_result[0]['generated_text'])
else:
st.warning("Please enter some text for analysis.")
|