grok_test / rss_processor.py
broadfield-dev's picture
Update rss_processor.py
715921b verified
raw
history blame
3.37 kB
import os
import feedparser
from langchain.vectorstores import Chroma
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.docstore.document import Document
import logging
# Setup logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Constants
LOCAL_DB_DIR = "chroma_db"
RSS_FEEDS = [
"https://www.nasa.gov/rss/dyn/breaking_news.rss",
"https://www.sciencedaily.com/rss/top/science.xml",
"https://www.wired.com/feed/rss",
# Add more feeds as needed; starting with reliable ones
]
# Initialize embedding model and vector DB
embedding_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
vector_db = Chroma(persist_directory=LOCAL_DB_DIR, embedding_function=embedding_model)
def fetch_rss_feeds():
articles = []
seen_keys = set()
for feed_url in RSS_FEEDS:
try:
logger.info(f"Fetching {feed_url}")
feed = feedparser.parse(feed_url)
if feed.bozo:
logger.warning(f"Parse error for {feed_url}: {feed.bozo_exception}")
continue
for entry in feed.entries:
title = entry.get("title", "No Title")
link = entry.get("link", "")
description = entry.get("summary", entry.get("description", "No Description"))
key = f"{title}|{link}"
if key not in seen_keys:
seen_keys.add(key)
image = (entry.get("media_content", [{}])[0].get("url") or
entry.get("media_thumbnail", [{}])[0].get("url") or "svg")
articles.append({
"title": title,
"link": link,
"description": description,
"published": entry.get("published", "Unknown Date"),
"category": categorize_feed(feed_url),
"image": image,
})
except Exception as e:
logger.error(f"Error fetching {feed_url}: {e}")
logger.info(f"Total articles fetched: {len(articles)}")
return articles
def categorize_feed(url):
if "sciencedaily" in url:
return "Science"
elif "nasa" in url:
return "Space"
elif "wired" in url:
return "Tech"
return "Uncategorized"
def process_and_store_articles(articles):
documents = []
for article in articles:
try:
metadata = {
"title": article["title"],
"link": article["link"],
"original_description": article["description"],
"published": article["published"],
"category": article["category"],
"image": article["image"],
}
doc = Document(page_content=article["description"], metadata=metadata)
documents.append(doc)
except Exception as e:
logger.error(f"Error processing article {article['title']}: {e}")
if documents:
try:
vector_db.add_documents(documents)
logger.info(f"Stored {len(documents)} articles in DB")
except Exception as e:
logger.error(f"Error storing articles: {e}")
if __name__ == "__main__":
articles = fetch_rss_feeds()
process_and_store_articles(articles)