import gradio as gr from transformers import pipeline # 1. Use a lighter model and GPU if available summarizer = pipeline( "summarization", model="sshleifer/distilbart-cnn-12-6", device=0 # set to -1 for CPU-only ) def chunked_summary(text, chunk_size=800): tokens = text.split() chunks = [" ".join(tokens[i:i+chunk_size]) for i in range(0, len(tokens), chunk_size)] summaries = [ summarizer(c, max_length=80, min_length=20, do_sample=False)[0]["summary_text"] for c in chunks ] return " ".join(summaries) def classify_action(email_text): email_lower = email_text.lower() if "meeting" in email_lower or "schedule" in email_lower: return "Schedule a meeting" elif "question" in email_lower or "reply" in email_lower or "can you" in email_lower: return "Reply" elif "unsubscribe" in email_lower or "spam" in email_lower: return "Delete or Mark as Spam" else: return "Read and Archive" def summarize_and_recommend(email_text): if not email_text.strip(): return "No content provided.", "No action" # 2. Decide whether to chunk word_count = len(email_text.split()) if word_count > 800: summary = chunked_summary(email_text) else: summary = summarizer( email_text, max_length=80, min_length=20, do_sample=False )[0]['summary_text'] action = classify_action(email_text) return summary, action iface = gr.Interface( fn=summarize_and_recommend, inputs=gr.Textbox(lines=15, placeholder="Paste your email here..."), outputs=[ gr.Textbox(label="Summary"), gr.Textbox(label="Suggested Action") ], title="📩 Smart Email Summarizer & Action Recommender", description="Faster summarization with a distilled model and length controls.", ) iface.launch(server_name="0.0.0.0", server_port=7860)