File size: 4,484 Bytes
41c3554
120af41
9f9e0f1
db7a151
8379514
0e97c1f
9b9dfb3
 
0e97c1f
c4b815a
 
 
 
 
 
 
 
 
92e609e
 
 
 
 
 
9b9dfb3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4e3626e
92e609e
41e7bb5
4e3626e
92e609e
 
c4b815a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29c7963
41e7bb5
 
c4b815a
 
9b9dfb3
 
 
 
db3de66
4e3626e
9b9dfb3
 
 
 
4e3626e
9f9e0f1
c4b815a
fa5e64a
 
c4b815a
 
9b9dfb3
 
 
 
 
 
 
 
 
92e609e
 
 
 
8379514
92e609e
c4b815a
 
9b9dfb3
4e3626e
cc8ee0f
 
9b9dfb3
92e609e
 
 
 
 
c4b815a
 
 
 
 
 
 
 
92e609e
 
 
 
 
7c1aeac
 
db3de66
7c1aeac
91e6d60
5f7526f
92e609e
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
import gradio as gr
from transformers import pipeline
import feedparser
from datetime import datetime, timedelta
import pytz
from bs4 import BeautifulSoup
import hashlib
import threading

# Global settings
SUMMARIZER_MODELS = {
    "Default (facebook/bart-large-cnn)": "facebook/bart-large-cnn",
    "Free Model (distilbart-cnn-6-6)": "sshleifer/distilbart-cnn-6-6"
}
CACHE_SIZE = 500
RSS_FETCH_INTERVAL = timedelta(hours=8)
ARTICLE_LIMIT = 5

NEWS_SOURCES = {
    "Technology": {"TheNewYorkTimes": "https://rss.nytimes.com/services/xml/rss/nyt/Technology.xml"},
    "Business": {"TheNewYorkTimes": "https://rss.nytimes.com/services/xml/rss/nyt/Business.xml"},
    "World News": {"BBC": "http://feeds.bbci.co.uk/news/world/rss.xml"}
}

class NewsCache:
    def __init__(self, size):
        self.cache = {}
        self.size = size
        self.lock = threading.Lock()

    def get(self, key):
        with self.lock:
            return self.cache.get(key)

    def set(self, key, value):
        with self.lock:
            if len(self.cache) >= self.size:
                oldest_key = next(iter(self.cache))
                del self.cache[oldest_key]
            self.cache[key] = value

cache = NewsCache(CACHE_SIZE)

def fetch_rss_news(categories):
    articles = []
    cutoff_time = datetime.now(pytz.UTC) - RSS_FETCH_INTERVAL
    for category in categories:
        for source, url in NEWS_SOURCES.get(category, {}).items():
            try:
                feed = feedparser.parse(url)
                for entry in feed.entries:
                    published = datetime(*entry.published_parsed[:6], tzinfo=pytz.UTC)
                    if published > cutoff_time:
                        articles.append({
                            "title": entry.title,
                            "description": BeautifulSoup(entry.description, "html.parser").get_text(),
                            "link": entry.link,
                            "category": category,
                            "source": source,
                            "published": published
                        })
            except Exception:
                continue
    articles = sorted(articles, key=lambda x: x["published"], reverse=True)[:ARTICLE_LIMIT]
    return articles

def summarize_text(text, model_name):
    summarizer = pipeline("summarization", model=model_name, device=-1)
    content_hash = hashlib.md5(text.encode()).hexdigest()
    cached_summary = cache.get(content_hash)
    if cached_summary:
        return cached_summary
    try:
        result = summarizer(text, max_length=120, min_length=40, truncation=True)
        summary = result[0]['summary_text']
        cache.set(content_hash, summary)
        return summary
    except Exception:
        return "Summary unavailable."

def summarize_articles(articles, model_name):
    summaries = []
    for article in articles:
        content = article["description"]
        summary = summarize_text(content, model_name)
        summaries.append(f"""
        πŸ“° {article['title']}
        - πŸ“ Category: {article['category']}
        - πŸ’‘ Source: {article['source']}
        - πŸ”— Read More: {article['link']}
        πŸ“ƒ Summary: {summary}
        """)
    return "\n".join(summaries)

def generate_summary(selected_categories, model_name):
    if not selected_categories:
        return "Please select at least one category."
    articles = fetch_rss_news(selected_categories)
    if not articles:
        return "No recent news found in the selected categories."
    return summarize_articles(articles, model_name)

# Gradio Interface
demo = gr.Blocks()

with demo:
    gr.Markdown("# πŸ“° AI News Summarizer")
    with gr.Row():
        categories = gr.CheckboxGroup(
            choices=list(NEWS_SOURCES.keys()),
            label="Select News Categories"
        )
        model_selector = gr.Radio(
            choices=list(SUMMARIZER_MODELS.keys()),
            label="Choose Summarization Model",
            value="Default (facebook/bart-large-cnn)"
        )
    summarize_button = gr.Button("Get News Summary")
    summary_output = gr.Textbox(label="News Summary", lines=20)

    def get_summary(selected_categories, selected_model):
        model_name = SUMMARIZER_MODELS[selected_model]
        return generate_summary(selected_categories, model_name)

    summarize_button.click(get_summary, inputs=[categories, model_selector], outputs=summary_output)

if __name__ == "__main__":
    demo.launch()