File size: 6,562 Bytes
7405474
a463e6e
1b7191e
 
3e269ec
7405474
1b7191e
 
 
 
 
7405474
5398274
7405474
 
3e269ec
759c15a
 
 
1b7191e
759c15a
 
 
 
 
 
 
 
8ce7f13
759c15a
7405474
1b7191e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5398274
1b7191e
 
 
 
 
 
 
 
 
 
 
 
7405474
8ce7f13
1b7191e
 
 
 
 
 
 
 
 
 
a463e6e
1b7191e
 
 
8ce7f13
 
 
 
 
1b7191e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8ce7f13
1b7191e
 
6dd2090
 
8ce7f13
6dd2090
759c15a
 
 
 
 
 
 
 
 
 
 
 
6dd2090
759c15a
 
0a4227c
 
6dd2090
 
 
759c15a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
import streamlit as st
import os
from pathlib import Path
from PIL import Image
from rag_sec.document_search_system import DocumentSearchSystem
from chainguard.blockchain_logger import BlockchainLogger
import requests
import pandas as pd

# SerpAPI Key (Replace with your SerpAPI key)
SERPAPI_KEY = "your_serpapi_api_key"

# Blockchain Logger
blockchain_logger = BlockchainLogger()

# Initialize DocumentSearchSystem
@st.cache_resource
def initialize_system():
    """Initialize the DocumentSearchSystem and load documents."""
    home_dir = Path(os.getenv("HOME", "/"))
    system = DocumentSearchSystem(
        neo4j_uri="neo4j+s://0ca71b10.databases.neo4j.io",
        neo4j_user="neo4j",
        neo4j_password="HwGDOxyGS1-79nLeTiX5bx5ohoFSpvHCmTv8IRgt-lY"
    )
    system.retriever.load_documents()
    return system

# Initialize the system
system = initialize_system()

# Function to Fetch News from SerpAPI
def fetch_news(query, num_results=5):
    """Fetch search results using SerpAPI."""
    url = "https://serpapi.com/search"
    params = {
        "engine": "google",
        "q": query,
        "api_key": SERPAPI_KEY,
        "num": num_results
    }
    try:
        response = requests.get(url, params=params)
        response.raise_for_status()
        search_results = response.json().get("organic_results", [])
        return [{"title": result["title"], "link": result["link"]} for result in search_results]
    except Exception as e:
        return [{"error": f"An error occurred: {str(e)}"}]

# Mock User Information (Replace with actual Google Login integration if needed)
def get_user_info():
    """Fetch or mock user details."""
    return {"name": "Talex Maxim", "email": "taimax13@gmail.com"}  # Replace with dynamic user info

# Directory for storing uploaded files
UPLOAD_DIR = "uploaded_files"
os.makedirs(UPLOAD_DIR, exist_ok=True)

# Streamlit Layout
st.title("Memora: Advanced File Upload and News Insights")
st.subheader("Securely upload, organize, and query your files while staying informed.")

# User-Specific Information
user_info = get_user_info()
if user_info:
    st.sidebar.write("### Logged in as:")
    st.sidebar.write(f"**Name:** {user_info['name']}")
    st.sidebar.write(f"**Email:** {user_info['email']}")
else:
    st.sidebar.write("### Not Logged In")
    st.sidebar.write("We invite you on the journey! Please log in with your Google account.")

# Google Search: User-Specific News
if user_info:
    st.subheader("1. Latest News About You")
    user_name = user_info["name"]
    st.write(f"Fetching latest news for **{user_name}**...")
    user_news = fetch_news(user_name, num_results=5)

    if user_news and "error" not in user_news[0]:
        st.success(f"Top {len(user_news)} results for '{user_name}':")
        user_news_df = pd.DataFrame(user_news)
        st.dataframe(user_news_df)
    else:
        st.error(user_news[0].get("error", "No news found."))
else:
    st.warning("Please log in with your Google account to fetch personalized news.")

# Google Search: Global News Categories
st.subheader("2. Global News Insights")
categories = ["Technology", "Sports", "Politics", "Entertainment", "Science"]

for category in categories:
    st.write(f"Fetching news for **{category}**...")
    category_results = fetch_news(f"latest {category} news", num_results=3)
    if category_results and "error" not in category_results[0]:
        st.success(f"Top {len(category_results)} results for '{category}':")
        for idx, result in enumerate(category_results, start=1):
            st.write(f"{idx}. [{result['title']}]({result['link']})")
    else:
        st.error(category_results[0].get("error", "No news found."))

# File Upload Section
st.subheader("3. Upload and Organize Files")

uploaded_files = st.file_uploader("Upload your files", accept_multiple_files=True, type=['jpg', 'jpeg', 'png', 'mp4', 'avi'])

if uploaded_files:
    for uploaded_file in uploaded_files:
        # Save file locally
        file_path = os.path.join(UPLOAD_DIR, uploaded_file.name)
        with open(file_path, "wb") as f:
            f.write(uploaded_file.getbuffer())
        st.success(f"File saved locally: {file_path}")

        # Display file details
        if uploaded_file.type.startswith('image'):
            image = Image.open(uploaded_file)
            st.image(image, caption=uploaded_file.name, use_column_width=True)

        # Metadata Input
        album = st.text_input(f"Album for {uploaded_file.name}", "Default Album")
        tags = st.text_input(f"Tags for {uploaded_file.name} (comma-separated)", "")

        # Log Metadata
        if st.button(f"Log Metadata for {uploaded_file.name}"):
            metadata = {"file_name": uploaded_file.name, "tags": tags.split(','), "album": album}
            blockchain_details = blockchain_logger.log_data(metadata)
            blockchain_hash = blockchain_details.get("block_hash", "N/A")

            # Use Neo4jHandler from DocumentSearchSystem to log transaction
            system.neo4j_handler.log_relationships(uploaded_file.name, album, blockchain_hash, [])
            st.write(f"Metadata logged successfully! Blockchain Details: {blockchain_details}")

# Blockchain Integrity Validation
if st.button("Validate Blockchain Integrity"):
    is_valid = blockchain_logger.is_blockchain_valid()
    st.write("Blockchain Integrity:", "Valid βœ…" if is_valid else "Invalid ❌")

# Query System
st.subheader("4. Search Documents")
query = st.text_input("Enter your query (e.g., 'sports news', 'machine learning')")

if st.button("Search Documents"):
    if query:
        result = system.process_query(query)
        if result["status"] == "success":
            st.success(f"Query processed successfully!")
            st.write("### Query Response:")
            st.write(result["response"])
            st.write("### Retrieved Documents:")
            for idx, doc in enumerate(result["retrieved_documents"], start=1):
                st.write(f"**Document {idx}:**")
                st.write(doc[:500])  # Display the first 500 characters
            st.write("### Blockchain Details:")
            st.json(result["blockchain_details"])
        elif result["status"] == "no_results":
            st.warning("No relevant documents found for your query.")
        elif result["status"] == "rejected":
            st.error(result["message"])
    else:
        st.warning("Please enter a query to search.")

# Debugging Section
if st.checkbox("Show Debug Information"):
    st.write(f"Total documents loaded: {len(system.retriever.documents)}")