File size: 10,834 Bytes
59d5e33
 
1f95777
 
151c2dd
 
 
8daf73a
9c1234d
5cad0cc
 
151c2dd
96538e7
151c2dd
5cad0cc
151c2dd
1f95777
5cad0cc
 
 
 
151c2dd
 
 
0227a07
1ec143e
151c2dd
 
561abab
 
 
151c2dd
 
0227a07
1ec143e
a604031
 
151c2dd
 
5cad0cc
1f95777
5cad0cc
 
 
1f95777
 
0227a07
59d5e33
0227a07
 
 
1f95777
 
 
 
 
0227a07
a604031
1f95777
0227a07
1f95777
 
 
a604031
1f95777
 
 
 
0227a07
6e5749a
 
0227a07
 
59d5e33
0227a07
 
2164d57
 
 
 
6e5749a
 
 
 
 
 
 
 
 
 
48cf3e1
6e5749a
 
 
 
8daf73a
48cf3e1
 
 
 
 
 
 
 
 
 
6e5749a
 
0227a07
2164d57
 
 
 
 
6e5749a
5cad0cc
 
 
 
 
 
 
 
 
 
 
 
14029bd
5cad0cc
 
 
14029bd
0227a07
14029bd
 
 
 
 
0227a07
2decb45
5cad0cc
 
 
2decb45
 
 
 
 
5cad0cc
0227a07
 
 
5cad0cc
 
0227a07
 
 
5cad0cc
 
 
dc5c663
0227a07
5cad0cc
 
 
0227a07
 
5cad0cc
 
 
 
0227a07
5cad0cc
 
 
6e5749a
 
 
5cad0cc
 
8daf73a
5cad0cc
 
 
8daf73a
 
5cad0cc
 
8daf73a
 
 
 
 
5cad0cc
0227a07
8daf73a
5cad0cc
0227a07
8daf73a
5cad0cc
 
 
 
 
dc5c663
0227a07
 
 
 
 
8daf73a
 
 
 
0227a07
8daf73a
0227a07
8daf73a
0227a07
 
 
8daf73a
 
0227a07
 
 
8daf73a
 
0227a07
 
 
 
 
8daf73a
 
 
 
0227a07
 
8daf73a
 
0227a07
 
 
 
 
6e5749a
0227a07
151c2dd
0e7333a
151c2dd
0e7333a
 
1f95777
561abab
1f95777
561abab
1f95777
14029bd
561abab
 
0227a07
164690b
0e7333a
151c2dd
0227a07
0e7333a
 
 
 
 
2164d57
164690b
ad98547
 
0e7333a
164690b
 
 
2164d57
ad98547
0227a07
2164d57
5cad0cc
0227a07
 
 
 
 
 
9c1234d
14029bd
2164d57
 
 
0227a07
0e7333a
2164d57
0e7333a
2164d57
 
ad98547
2164d57
0e7333a
 
ad98547
0e7333a
 
 
0227a07
151c2dd
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
from os import makedirs, remove
from os.path import exists, dirname
from functools import cache
import json
import streamlit as st
from googleapiclient.discovery import build
from slugify import slugify
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline 
import uuid
import spacy
from spacy.matcher import PhraseMatcher

from beautiful_soup.beautiful_soup import get_url_content


@cache
def google_search_api_request( query ):
    """
    Request Google Search API with query and return results.
    """

    service = build(
        "customsearch",
        "v1",
        developerKey=st.secrets["google_search_api_key"],
        cache_discovery=False
    )

    # Exclude PDFs from search results.
    query = query + ' -filetype:pdf'

    return service.cse().list(
        q=query,
        cx=st.secrets["google_search_engine_id"],
        num=5,
        lr='lang_en', # lang_de
        fields='items(title,link),searchInformation(totalResults)'
        ).execute()


def search_results( query ):
    """
    Request Google Search API with query and return results. Results are cached in files.
    """
    file_path = 'search-results/' + slugify( query ) + '.json'

    # Create cache directory if it doesn't exist.
    makedirs(dirname(file_path), exist_ok=True)

    results = []
    # Check if cache file exists.
    if exists( file_path ):
        with open( file_path, 'r' ) as results_file:
            results = json.load( results_file )
    else:
        search_result = google_search_api_request( query )
        # Check if search contains results.
        if int( search_result['searchInformation']['totalResults'] ) > 0:
            results = search_result['items']
            # Save results to cache file.
            with open( file_path, 'w' ) as results_file:
                json.dump( results, results_file )

    if len( results ) == 0:
        raise Exception('No results found.')
    
    return results

def get_summary( url, keywords ):
    url_id = uuid.uuid5( uuid.NAMESPACE_URL, url ).hex
    file_path = 'summaries/' + url_id + '.json'

    # Create cache directory if it doesn't exist.
    makedirs(dirname(file_path), exist_ok=True)

    # Check if cache file exists.
    if exists( file_path ):
        with open( file_path, 'r' ) as file:
            summary = json.load( file )
    else:
        try:
            strings = get_url_content( url )
            content_cache = 'content/' + url_id + '.txt'

            # Create cache directory if it doesn't exist.
            makedirs(dirname(content_cache), exist_ok=True)

            # Check if content cache file exists.
            if exists( content_cache ):
                with open( content_cache, 'r' ) as file:
                    content = file.read().rstrip()
            else:
                content = prep_chunks_summary( strings, keywords )
                # Save content to cache file.
                with open( content_cache, 'w' ) as file:
                    print(content.strip(), file=file)

            max_lenth = 200
            # Rudementary method to count number of tokens in a chunk.
            word_count = len( content.split(' ') )
            # If content is longer then 200 words summarize it.
            if word_count > max_lenth:
                # Generate summary from compiled content.
                summary = generate_summary( content, max_lenth )
            else:
                summary = [ { "summary_text": content } ]
        except Exception as exception:
            raise exception
        # Save results to cache file.
        with open( file_path, 'w' ) as file:
            json.dump( summary, file )
    
    return summary

def generate_summary( content, max_length ):
    """
    Generate summary for content.
    """
    try:
        summarizer = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6")
        # https://huggingface.co/docs/transformers/v4.18.0/en/main_classes/pipelines#transformers.SummarizationPipeline
        summary = summarizer(content, max_length, min_length=30, do_sample=False, truncation=True)
    except Exception as exception:
        raise exception
    
    return summary

def exception_notice( exception ):
    """
    Helper function for exception notices.
    """
    query_params = st.experimental_get_query_params()
    # If debug mode is enabled, show exception else show warning.
    if 'debug' in query_params.keys() and query_params['debug'][0] == 'true':
        st.exception(exception)
    else:
        st.warning(str(exception))

# Unused function.
def is_keyword_in_string( keywords, string ):
    """
    Checks if string contains keyword.
    """
    for keyword in keywords:
        if keyword in string:
            return True
    return False

def filter_sentences_by_keywords( strings, keywords ):
    """
    Filter sentences by keywords using spacy.
    """
    nlp = spacy.load("en_core_web_sm")
    matcher = PhraseMatcher(nlp.vocab)

    # Add keywords to matcher.
    patterns = [nlp(keyword) for keyword in keywords]
    matcher.add("QueryList", patterns)

    sentences = []
    for string in strings:
        # Exclude sentences shorten than 5 words.
        string_length = len( string.split(' ') )
        if string_length < 5:
            continue

        # Loop through sentences and check if any of the keywords are in the sentence.
        doc = nlp(string)
        for sentence in doc.sents:
            matches = matcher(nlp(sentence.text))
            for match_id, start, end in matches:
                # If keyword is in sentence, add sentence to list.
                if nlp.vocab.strings[match_id] in ["QueryList"]:
                    sentences.append(sentence.text) 

    if ( len(sentences) == 0 ):
        raise Exception('No sentences with keywords found.')

    return sentences

def split_content_into_chunks( sentences, tokenizer ):
    """
    Split content into chunks.
    """
    combined_length = 0
    chunk = ""
    chunks = []
    for sentence in sentences:
        # Lenth of tokens in sentence.
        length = len( tokenizer.tokenize( sentence ) )

        # If the combined token length plus the current sentence is larger then max length, start a new chunk.
        if combined_length + length > tokenizer.max_len_single_sentence: 
            chunks.append(chunk)
            chunk = '' # Reset chunk.
            combined_length = 0 # Reset token length.

        # Add sentence to chunk.
        combined_length += length
        chunk += sentence + ' '

    chunks.append(chunk)

    return chunks

def prep_chunks_summary( strings, keywords ):
    """
    Chunk summary.
    """
    try:
        checkpoint = "sshleifer/distilbart-cnn-12-6"
        tokenizer = AutoTokenizer.from_pretrained(checkpoint)
        model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint)

        sentences = filter_sentences_by_keywords( strings, keywords )
        chunks = split_content_into_chunks( sentences, tokenizer )

        content = ''
        number_of_chunks = len( chunks )
        # Loop through chunks if there are more than one.
        if number_of_chunks > 1:
            # Calculate the max summary length based on the number of chunks so that the final combined text is not longer than max tokens.
            max_length = int( tokenizer.max_len_single_sentence / number_of_chunks )

            # Loop through chunks and generate summary.
            for chunk in chunks:
                # Number of tokens in a chunk.
                chunk_length = len( tokenizer.tokenize( chunk ) )
                # If chunk is shorter than max length, divide chunk length by 2.
                if chunk_length < max_length:
                    max_length = int( chunk_length / 2 )

                # Generate summary for chunk.
                summarizer = pipeline("summarization", model=model, tokenizer=tokenizer)
                # https://huggingface.co/docs/transformers/v4.18.0/en/main_classes/pipelines#transformers.SummarizationPipeline
                chunk_summary = summarizer(chunk, max_length, min_length=10, do_sample=False, truncation=True)

                for summary in chunk_summary:
                    content += summary['summary_text'] + ' '

        elif number_of_chunks == 1:
            content = chunks[0]

        return content

    except Exception as exception:
        raise exception

def main():
    st.title('Racoon Search')
    query = st.text_input('Search query')
    query_params = st.experimental_get_query_params()

    if query :
        with st.spinner('Loading search results...'):
            try:
                results = search_results( query )
            except Exception as exception:
                exception_notice(exception)
                return

        # Count results.
        number_of_results = len( results )
        st.success( 'Found {} results for "{}".'.format( number_of_results, query ) )

        # If debug mode is enabled, show search results in JSON.
        if 'debug' in query_params.keys() and query_params['debug'][0] == 'true':
            with st.expander("Search results JSON"):
                if st.button('Delete search result cache', key=query + 'cache'):
                    remove( 'search-results/' + slugify( query ) + '.json' )
                st.json( results )

        progress_bar = st.progress(0)

        st.header('Search results')
        st.markdown('---')

        # for result in results:
        for index, result in enumerate(results):
            with st.container():
                st.markdown('### ' + result['title'])
                # Create a unique id for the result.
                url_id = uuid.uuid5( uuid.NAMESPACE_URL, result['link'] ).hex

                # List of query keywords.
                keywords = query.split(' ')
                try :
                    # Create summary of summarized content.
                    summary = get_summary( result['link'], keywords )
                    st.markdown(summary[0]['summary_text'])
                except Exception as exception:
                    exception_notice(exception)

                progress_bar.progress( ( index + 1 ) / number_of_results )

                # Show links and buttons.
                col1, col2, col3 = st.columns(3)
                with col1:
                    st.markdown('[Website Link]({})'.format(result['link']))

                with col2:
                    if st.button('Delete content from cache', key=url_id + 'content'):
                        remove( 'page-content/' + url_id + '.txt' )

                with col3:
                    if st.button('Delete summary from cache', key=url_id + 'summary'):
                        remove( 'summaries/' + url_id + '.json' )

                st.markdown('---')


if __name__ == '__main__':
    main()