chagu-demo / app.py
talexm
update
1b7191e
raw
history blame
6.56 kB
import streamlit as st
import os
from pathlib import Path
from PIL import Image
from rag_sec.document_search_system import DocumentSearchSystem
from chainguard.blockchain_logger import BlockchainLogger
import requests
import pandas as pd
# SerpAPI Key (Replace with your SerpAPI key)
SERPAPI_KEY = "your_serpapi_api_key"
# Blockchain Logger
blockchain_logger = BlockchainLogger()
# Initialize DocumentSearchSystem
@st.cache_resource
def initialize_system():
"""Initialize the DocumentSearchSystem and load documents."""
home_dir = Path(os.getenv("HOME", "/"))
system = DocumentSearchSystem(
neo4j_uri="neo4j+s://0ca71b10.databases.neo4j.io",
neo4j_user="neo4j",
neo4j_password="HwGDOxyGS1-79nLeTiX5bx5ohoFSpvHCmTv8IRgt-lY"
)
system.retriever.load_documents()
return system
# Initialize the system
system = initialize_system()
# Function to Fetch News from SerpAPI
def fetch_news(query, num_results=5):
"""Fetch search results using SerpAPI."""
url = "https://serpapi.com/search"
params = {
"engine": "google",
"q": query,
"api_key": SERPAPI_KEY,
"num": num_results
}
try:
response = requests.get(url, params=params)
response.raise_for_status()
search_results = response.json().get("organic_results", [])
return [{"title": result["title"], "link": result["link"]} for result in search_results]
except Exception as e:
return [{"error": f"An error occurred: {str(e)}"}]
# Mock User Information (Replace with actual Google Login integration if needed)
def get_user_info():
"""Fetch or mock user details."""
return {"name": "Talex Maxim", "email": "taimax13@gmail.com"} # Replace with dynamic user info
# Directory for storing uploaded files
UPLOAD_DIR = "uploaded_files"
os.makedirs(UPLOAD_DIR, exist_ok=True)
# Streamlit Layout
st.title("Memora: Advanced File Upload and News Insights")
st.subheader("Securely upload, organize, and query your files while staying informed.")
# User-Specific Information
user_info = get_user_info()
if user_info:
st.sidebar.write("### Logged in as:")
st.sidebar.write(f"**Name:** {user_info['name']}")
st.sidebar.write(f"**Email:** {user_info['email']}")
else:
st.sidebar.write("### Not Logged In")
st.sidebar.write("We invite you on the journey! Please log in with your Google account.")
# Google Search: User-Specific News
if user_info:
st.subheader("1. Latest News About You")
user_name = user_info["name"]
st.write(f"Fetching latest news for **{user_name}**...")
user_news = fetch_news(user_name, num_results=5)
if user_news and "error" not in user_news[0]:
st.success(f"Top {len(user_news)} results for '{user_name}':")
user_news_df = pd.DataFrame(user_news)
st.dataframe(user_news_df)
else:
st.error(user_news[0].get("error", "No news found."))
else:
st.warning("Please log in with your Google account to fetch personalized news.")
# Google Search: Global News Categories
st.subheader("2. Global News Insights")
categories = ["Technology", "Sports", "Politics", "Entertainment", "Science"]
for category in categories:
st.write(f"Fetching news for **{category}**...")
category_results = fetch_news(f"latest {category} news", num_results=3)
if category_results and "error" not in category_results[0]:
st.success(f"Top {len(category_results)} results for '{category}':")
for idx, result in enumerate(category_results, start=1):
st.write(f"{idx}. [{result['title']}]({result['link']})")
else:
st.error(category_results[0].get("error", "No news found."))
# File Upload Section
st.subheader("3. Upload and Organize Files")
uploaded_files = st.file_uploader("Upload your files", accept_multiple_files=True, type=['jpg', 'jpeg', 'png', 'mp4', 'avi'])
if uploaded_files:
for uploaded_file in uploaded_files:
# Save file locally
file_path = os.path.join(UPLOAD_DIR, uploaded_file.name)
with open(file_path, "wb") as f:
f.write(uploaded_file.getbuffer())
st.success(f"File saved locally: {file_path}")
# Display file details
if uploaded_file.type.startswith('image'):
image = Image.open(uploaded_file)
st.image(image, caption=uploaded_file.name, use_column_width=True)
# Metadata Input
album = st.text_input(f"Album for {uploaded_file.name}", "Default Album")
tags = st.text_input(f"Tags for {uploaded_file.name} (comma-separated)", "")
# Log Metadata
if st.button(f"Log Metadata for {uploaded_file.name}"):
metadata = {"file_name": uploaded_file.name, "tags": tags.split(','), "album": album}
blockchain_details = blockchain_logger.log_data(metadata)
blockchain_hash = blockchain_details.get("block_hash", "N/A")
# Use Neo4jHandler from DocumentSearchSystem to log transaction
system.neo4j_handler.log_relationships(uploaded_file.name, album, blockchain_hash, [])
st.write(f"Metadata logged successfully! Blockchain Details: {blockchain_details}")
# Blockchain Integrity Validation
if st.button("Validate Blockchain Integrity"):
is_valid = blockchain_logger.is_blockchain_valid()
st.write("Blockchain Integrity:", "Valid βœ…" if is_valid else "Invalid ❌")
# Query System
st.subheader("4. Search Documents")
query = st.text_input("Enter your query (e.g., 'sports news', 'machine learning')")
if st.button("Search Documents"):
if query:
result = system.process_query(query)
if result["status"] == "success":
st.success(f"Query processed successfully!")
st.write("### Query Response:")
st.write(result["response"])
st.write("### Retrieved Documents:")
for idx, doc in enumerate(result["retrieved_documents"], start=1):
st.write(f"**Document {idx}:**")
st.write(doc[:500]) # Display the first 500 characters
st.write("### Blockchain Details:")
st.json(result["blockchain_details"])
elif result["status"] == "no_results":
st.warning("No relevant documents found for your query.")
elif result["status"] == "rejected":
st.error(result["message"])
else:
st.warning("Please enter a query to search.")
# Debugging Section
if st.checkbox("Show Debug Information"):
st.write(f"Total documents loaded: {len(system.retriever.documents)}")