Shivam29rathore commited on
Commit
a0c33a2
β€’
1 Parent(s): 24ace04

Create new file

Browse files
Files changed (1) hide show
  1. app.py +125 -0
app.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
2
+ import pickle
3
+ import torch
4
+ from transformers import PegasusTokenizer, PegasusForConditionalGeneration
5
+ import tensorflow as tf
6
+ from tensorflow.python.lib.io import file_io
7
+ from nltk.tokenize import sent_tokenize
8
+
9
+
10
+ import io
11
+
12
+
13
+
14
+
15
+
16
+
17
+
18
+
19
+
20
+ tf.compat.v1.disable_eager_execution()
21
+ # Let's load the model and the tokenizer
22
+ model_name = "human-centered-summarization/financial-summarization-pegasus"
23
+ tokenizer = PegasusTokenizer.from_pretrained(model_name)
24
+ model2 = PegasusForConditionalGeneration.from_pretrained(model_name)
25
+
26
+
27
+ #tokenizer = AutoTokenizer.from_pretrained(checkpoint)
28
+ #model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint)
29
+
30
+
31
+ import nltk
32
+ from finbert_embedding.embedding import FinbertEmbedding
33
+ import pandas as pd
34
+ from nltk.cluster import KMeansClusterer
35
+ import numpy as np
36
+ import os
37
+ from scipy.spatial import distance_matrix
38
+ from tensorflow.python.lib.io import file_io
39
+ import pickle
40
+
41
+ nltk.download('punkt')
42
+
43
+
44
+ def pegasus(text):
45
+ '''A function to obtain summaries for each tokenized sentence.
46
+ It returns a summarized document as output'''
47
+
48
+ import nltk
49
+ nltk.download('punkt')
50
+
51
+ import os
52
+ data_path = "/tmp/"
53
+ if not os.path.exists(data_path):
54
+ os.makedirs(data_path)
55
+ input_ = "/tmp/input.txt"
56
+
57
+ with open(input_, "w") as file:
58
+ file.write(text)
59
+ # read the written txt into a variable
60
+ with open(input_ , 'r') as f:
61
+ text_ = f.read()
62
+
63
+ def tokenized_sentences(file):
64
+ '''A function to generate chunks of sentences and texts.
65
+ Returns tokenized texts'''
66
+ # Create empty arrays
67
+ tokenized_sentences = []
68
+ sentences = []
69
+ length = 0
70
+ for sentence in sent_tokenize(file):
71
+ length += len(sentence)
72
+ # 512 is the maximum input length for the Pegasus model
73
+ if length < 512:
74
+ sentences.append(sentence)
75
+ else:
76
+ tokenized_sentences.append(sentences)
77
+ sentences = [sentence]
78
+ length = len(sentence)
79
+
80
+ sentences = [sentence.strip() for sentence in sentences]
81
+ # Append all tokenized sentences
82
+ if sentences:
83
+ tokenized_sentences.append(sentences)
84
+ return tokenized_sentences
85
+
86
+ tokenized = tokenized_sentences(text_)
87
+ # Use GPU if available
88
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
89
+ global summary
90
+ # Create an empty array for all summaries
91
+ summary = []
92
+ # Loop to encode tokens, to generate abstractive summary and finally decode tokens
93
+ for token in tokenized:
94
+ # Encoding
95
+ inputs = tokenizer.encode(' '.join(token), truncation=True, return_tensors='pt')
96
+ # Use CPU or GPU
97
+ inputs = inputs.to(device)
98
+ # Get summaries from transformer model
99
+ all_summary = model2.to(device).generate(inputs,do_sample=True,
100
+ max_length=50, top_k=50, top_p=0.95,
101
+ num_beams = 5, early_stopping=True)
102
+ # num_return_sequences=5)
103
+ # length_penalty=0.2, no_repeat_ngram_size=2
104
+ # min_length=10,
105
+ # max_length=50)
106
+ # Decoding
107
+ output = [tokenizer.decode(each_summary, skip_special_tokens=True, clean_up_tokenization_spaces=False) for each_summary in all_summary]
108
+ # Append each output to array
109
+ summary.append(output)
110
+ # Get final summary
111
+ summary = [sentence for each in summary for sentence in each]
112
+ final = "".join(summary)
113
+
114
+ return final
115
+
116
+
117
+ import gradio as gr
118
+
119
+
120
+
121
+
122
+ interface1 = gr.Interface(fn=pegasus,
123
+ inputs =gr.inputs.Textbox(lines=15,placeholder="Enter your text !!",label='Input-10k Sections'),
124
+ outputs=gr.outputs.Textbox(label='Output- Pegasus')).launch()
125
+