saradha12 commited on
Commit
32b8e5a
1 Parent(s): 3e33dd5

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -80
README.md CHANGED
@@ -1,80 +1,4 @@
1
- !pip install transformers
2
-
3
- from transformers import AutoModel, AutoTokenizer
4
- model_name = "bert-base-uncased"
5
- model = AutoModel.from_pretrained(model_name)
6
- tokenizer = AutoTokenizer.from_pretrained(model_name)
7
-
8
- from transformers import BertTokenizerFast, EncoderDecoderModel
9
- import torch
10
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
11
- tokenizer = BertTokenizerFast.from_pretrained('mrm8488/bert-small2bert-small-finetuned-cnn_daily_mail-summarization')
12
- model = EncoderDecoderModel.from_pretrained('mrm8488/bert-small2bert-small-finetuned-cnn_daily_mail-summarization').to(device)
13
-
14
- def generate_summary(text):
15
-
16
- inputs = tokenizer([text], padding="max_length", truncation=True, max_length=512, return_tensors="pt")
17
- input_ids = inputs.input_ids.to(device)
18
- attention_mask = inputs.attention_mask.to(device)
19
-
20
- output = model.generate(input_ids, attention_mask=attention_mask)
21
-
22
- return tokenizer.decode(output[0], skip_special_tokens=True)
23
-
24
- text = """Thanks for joining XX Day – by far our biggest event ever! The conference was a great success and we’re thrilled you were able to participate. With all the rich technical content across 5 tracks and 80 sessions, be sure to catch the replays of sessions you may have missed.
25
-
26
- For those seeking the Data & AI Skills Certificate or the XX Skills Certificate, please ensure you complete all the required sessions and take the knowledge quiz to earn your certificate. Remember the deadline is October 25th! For detailed information, go to:
27
-
28
- We have one final request – your feedback is a gift. Please complete this short survey on XX Day to provide your input about the conference. It will only take a few minutes and you will be entered to win a $50 gift card. Help us make next year’s XX Day even bigger and better!
29
- """
30
- generate_summary(text)
31
-
32
-
33
- import imaplib
34
- import email
35
- from transformers import BartForConditionalGeneration, BartTokenizer
36
-
37
- model_name = 'facebook/bart-large-cnn'
38
- tokenizer = BartTokenizer.from_pretrained(model_name)
39
- model = BartForConditionalGeneration.from_pretrained(model_name)
40
-
41
- mail = imaplib.IMAP4_SSL('smtp.gmail.com')
42
- mail.login('gmail', 'password')
43
- mail.select('inbox')
44
-
45
-
46
- from datetime import datetime
47
- today_date = datetime.now().strftime("%d-%b-%Y")
48
-
49
- status, email_ids = mail.search(None, 'SINCE', today_date)
50
- email_ids = email_ids[0].split()
51
-
52
- def generate_summary(email_text):
53
- inputs = tokenizer(email_text, return_tensors="pt", max_length=1024, truncation=True)
54
- summary_ids = model.generate(inputs["input_ids"], max_length=150, min_length=50, length_penalty=2.0, num_beams=4, early_stopping=True)
55
- summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
56
- return summary
57
-
58
- for email_id in email_ids[-10:]:
59
- status, msg_data = mail.fetch(email_id, "(RFC822)")
60
- raw_email = msg_data[0][1]
61
- email_message = email.message_from_bytes(raw_email)
62
-
63
- email_subject = email_message["Subject"]
64
- email_body = ""
65
-
66
- if email_message.is_multipart():
67
- for part in email_message.walk():
68
- if part.get_content_type() == "text/plain":
69
- email_body = part.get_payload(decode=True).decode()
70
-
71
- if email_body:
72
- summary = generate_summary(email_body)
73
- print(f"From: {sender}")
74
- print(f"Email Subject: {subject}")
75
- print(f"Generated Summary: {summary}")
76
- print(f"Sentiment Label: {email_label}")
77
- print(f"Sentiment Score: {score}")
78
- print("-" * 50)
79
-
80
- mail.logout()
 
1
+ ---
2
+ language:
3
+ - en
4
+ ---