NewsSearch / app.py
KatGaw's picture
adding files
1a4d79b
from openai import OpenAI
import streamlit as st
from langchain_openai import ChatOpenAI
from tools import sentiment_analysis_util
import numpy as np
import pandas as pd
from dotenv import load_dotenv
import os
st.set_page_config(page_title="LangChain Agent", layout="wide")
load_dotenv()
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
from langchain_core.runnables import RunnableConfig
st.title("💬 News Search")
st.image('el_pic.png')
#@st.cache_resource
if "messages" not in st.session_state:
st.session_state["messages"] = [{"role":"system", "content":"""💬 How can I help you?"""}]
# Display all previous messages
for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])
#initialize_session_state()
sideb=st.sidebar
with st.sidebar:
prompt=st.text_input("Enter topic for sentiment analysis: ")
check1=sideb.button(f"analyze {prompt}")
if check1:
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# ========================== Sentiment analysis
#Perform sentiment analysis on the cryptocurrency news & predict dominant sentiment along with plotting the sentiment breakdown chart
# Downloading from reddit
# Downloading from alpaca
if len(prompt.split(' '))<3:
st.write('I am analyzing Google News ...')
news_articles = sentiment_analysis_util.fetch_news(str(prompt))
st.write('Now, I am analyzing Reddit ...')
reddit_news_articles=sentiment_analysis_util.fetch_reddit_news(prompt)
# Fetch news articles
tavily_news_articles = sentiment_analysis_util.fetch_tavily_news(prompt)
# Handle empty results
if not tavily_news_articles:
print("No news articles found. Try adjusting your search terms.")
else:
# Process the articles
for url in tavily_news_articles:
try:
# Your existing article processing code
st.write(f"Article URL: {url}")
# ... rest of your processing ...
except Exception as e:
st.error(f"Error processing article {url}: {e}")
continue
analysis_results = []
#Perform sentiment analysis for each product review
if len(prompt.split(' '))<3:
for article in news_articles:
if prompt.lower()[0:6] in article['News_Article'].lower():
sentiment_analysis_result = sentiment_analysis_util.analyze_sentiment(article['News_Article'])
# Display sentiment analysis results
#print(f'News Article: {sentiment_analysis_result["News_Article"]} : Sentiment: {sentiment_analysis_result["Sentiment"]}', '\n')
result = {
'News_Article': sentiment_analysis_result["News_Article"],
'Sentiment': sentiment_analysis_result["Sentiment"][0]['label'],
'Index': sentiment_analysis_result["Sentiment"][0]['score'],
'URL': article['URL']
}
analysis_results.append(result)
articles_url=[]
for article in reddit_news_articles:
if prompt.lower()[0:6] in article.lower():
sentiment_analysis_result_reddit = sentiment_analysis_util.analyze_sentiment(article)
# Display sentiment analysis results
#print(f'News Article: {sentiment_analysis_result_reddit["News_Article"]} : Sentiment: {sentiment_analysis_result_reddit["Sentiment"]}', '\n')
result = {
'News_Article': sentiment_analysis_result_reddit["News_Article"],
'Index':np.round(sentiment_analysis_result_reddit["Sentiment"][0]['score'],2)
}
analysis_results.append(np.append(result,np.append(article.split('URL:')[-1:], ((article.split('Date: ')[-1:])[0][0:10]))))
for article in tavily_news_articles:
if prompt.lower()[0:5] in article:
sentiment_analysis_result_tavily = sentiment_analysis_util.analyze_sentiment(article)
# Display sentiment analysis results
#print(f'News Article: {sentiment_analysis_result_tavily["News_Article"]} : Sentiment: {sentiment_analysis_result_tavily["Sentiment"]}', '\n')
result = {
'News_Article': sentiment_analysis_result_tavily["News_Article"],
'Index':np.round(sentiment_analysis_result_tavily["Sentiment"][0]['score'],2)
}
analysis_results.append(np.append(result,np.append(article.split('URL:')[-1:], ((article.split('Date: ')[-1:])[0][0:10]))))
print('is_present tavily 2',analysis_results)
# #LinkedIn and Twitter previously downloaded from phantombuster
# st.write('Teď analyzuji data z LinkedInu a Twitteru ...')
# df=pd.read_csv('./data/LinkedIn_transurban_phantombuster.csv',index_col='postTimestamp',parse_dates=True,infer_datetime_format=True)
# df=df.sort_index(ascending=False)
# df=df.dropna()
# from tools import sentiment_analysis_util
# for linkedin_news in df['postContent']:
# print(linkedin_news)
# news_article={
# 'News_Article': linkedin_news,
# 'URL': df.loc[df['postContent']==linkedin_news]['postUrl'][0],
# 'date': df.loc[df['postContent']==linkedin_news].index[0]}
# if prompt.lower()[0:6] in linkedin_news.lower():
# sentiment_analysis_result = sentiment_analysis_util.analyze_sentiment(news_article)
# news_article["Sentiment"]=sentiment_analysis_result["Sentiment"][0]['label']
# news_article["Index"]=sentiment_analysis_result["Sentiment"][0]['score']
# analysis_results.append(news_article)
# count=0
# df=pd.read_csv('./data/Twitter_transurban_phantombuster.csv',index_col='tweetDate',parse_dates=True,infer_datetime_format=True)
# df=df.sort_index(ascending=False)
# df=df.dropna()
# from tools import sentiment_analysis_util
# for twitter_news in df['text']:
# print(twitter_news)
# news_article={
# 'News_Article': twitter_news,
# 'URL': df['tweetLink'][count],
# 'date': df.iloc[count:count+1,:].index[0]}
# if prompt.lower()[0:6] in twitter_news.lower():
# sentiment_analysis_result = sentiment_analysis_util.analyze_sentiment(news_article)
# news_article["Sentiment"]=sentiment_analysis_result["Sentiment"][0]['label']
# news_article["Index"]=sentiment_analysis_result["Sentiment"][0]['score']
# analysis_results.append(news_article)
# count+=1
#Generate summarized message rationalize dominant sentiment
#st.write(analysis_results)
summary = sentiment_analysis_util.generate_summary_of_sentiment(analysis_results) #, dominant_sentiment)
st.chat_message("assistant").write((summary))
st.session_state.messages.append({"role": "assistant", "content": summary})
#answers=np.append(res["messages"][-1].content,summary)
client = ChatOpenAI(model="gpt-4o",api_key=OPENAI_API_KEY)
if "openai_model" not in st.session_state:
st.session_state["openai_model"] = "gpt-4o"
if prompt := st.chat_input("Any other questions? "):
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Display assistant response in chat message container
with st.chat_message("assistant"):
stream = client.chat.completions.create(
model=st.session_state["openai_model"],
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
stream=True,
)
response = st.write_stream(stream)
st.session_state.messages.append({"role": "assistant", "content": response})