torch2 / app.py
peterciank's picture
Update app.py
39377c0 verified
raw
history blame
3.49 kB
import streamlit as st
from openai import OpenAI
import os
import requests
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# Initialize the client with HuggingFace
client = OpenAI(
base_url="https://api-inference.huggingface.co/v1",
api_key=os.environ.get('HFSecret') # Replace with your HuggingFace token
)
# Define the Llama 3 8B model
repo_id = "meta-llama/Meta-Llama-3-8B-Instruct"
# Title of the App
st.title("Text Analysis with Llama 3: Sentiment, Summarization, and Keyword Extraction")
# Dropdown options to choose a text file
options = ['None', 'Appreciation Letter', 'Regret Letter', 'Kindness Tale', 'Lost Melody Tale', 'Twitter Example 1', 'Twitter Example 2']
# Create a dropdown menu to select options
selected_option = st.selectbox("Select a preset option", options)
# Define URLs for different text options
url_dict = {
'Appreciation Letter': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Appreciation_Letter.txt",
'Regret Letter': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Regret_Letter.txt",
'Kindness Tale': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Kindness_Tale.txt",
'Lost Melody Tale': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Lost_Melody_Tale.txt",
'Twitter Example 1': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Twitter_Example_1.txt",
'Twitter Example 2': "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Twitter_Example_2.txt"
}
# Function to fetch text content
def fetch_text_content(option):
if option in url_dict:
response = requests.get(url_dict[option])
return response.text if response.status_code == 200 else "Error fetching the text"
return ""
# Fetch the selected text
jd = fetch_text_content(selected_option)
# Display fetched text
text = st.text_area('Enter the text to analyze', jd)
# Function to call Llama 3 for analysis
def call_llama_analysis(task, text):
prompt = f"Perform {task} on the following text:\n\n{text}"
# Call Llama 3 for the task
response = client.completions.create(
model=repo_id,
prompt=prompt,
max_tokens=3000,
temperature=0.5
)
return response['choices'][0]['text']
# Start analysis on button click
if st.button("Start Analysis"):
with st.spinner("Analyzing Sentiment..."):
try:
sentiment_result = call_llama_analysis("sentiment analysis", text)
with st.expander("Sentiment Analysis - ✅ Completed", expanded=True):
st.write(sentiment_result)
except Exception as e:
st.error(f"Error in Sentiment Analysis: {str(e)}")
with st.spinner("Summarizing..."):
try:
summary_result = call_llama_analysis("summarization", text)
with st.expander("Summarization - ✅ Completed", expanded=True):
st.write(summary_result)
except Exception as e:
st.error(f"Error in Summarization: {str(e)}")
with st.spinner("Extracting Keywords..."):
try:
keywords_result = call_llama_analysis("keyword extraction", text)
with st.expander("Keywords Extraction - ✅ Completed", expanded=True):
st.write(keywords_result)
except Exception as e:
st.error(f"Error in Keyword Extraction: {str(e)}")