import streamlit as st import requests import os # Define the endpoint and API key api_url = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct" api_key = os.getenv('HFSecret') headers = { "Authorization": f"Bearer {api_key}" } # API call function def call_huggingface_api(prompt): data = {"inputs": prompt, "parameters": {"max_length": 500, "temperature": 0.5}} response = requests.post(api_url, headers=headers, json=data) if response.status_code != 200: st.error(f"Error: {response.status_code} - {response.text}") return None return response.json() # Streamlit layout st.title("Sentiment Analysis, Summarization, and Keyword Extraction") text_input = st.text_area("Enter text for analysis") if st.button("Analyze"): if text_input: # Sentiment Analysis sentiment_prompt = f"Perform sentiment analysis on the following text: {text_input}" sentiment_result = call_huggingface_api(sentiment_prompt) if sentiment_result: st.write("Sentiment Analysis Result:", sentiment_result[0]['generated_text']) # Summarization summarization_prompt = f"Summarize the following text: {text_input}" summarization_result = call_huggingface_api(summarization_prompt) if summarization_result: st.write("Summarization Result:", summarization_result[0]['generated_text']) # Keyword Extraction (Using LLM, not RAKE) keyword_prompt = f"Extract important keywords from the following text: {text_input}" keyword_result = call_huggingface_api(keyword_prompt) if keyword_result: st.write("Keyword Extraction Result:", keyword_result[0]['generated_text']) else: st.warning("Please enter some text for analysis.")