File size: 1,573 Bytes
c25ddba
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
from flask import Flask, request, jsonify
from nltk import sent_tokenize, word_tokenize, FreqDist
from nltk.corpus import stopwords
from nltk.tokenize.treebank import TreebankWordDetokenizer
import nltk

app = Flask(__name__)

nltk.download("punkt")
nltk.download("stopwords")

stop_words = set(stopwords.words("english"))

def summarize_text(text):
    sentences = sent_tokenize(text)
    words = word_tokenize(text)

    # Remove stopwords
    words = [word.lower() for word in words if word.isalnum() and word.lower() not in stop_words]

    # Calculate word frequency
    freq_dist = FreqDist(words)

    # Sort sentences based on the sum of word frequencies
    sorted_sentences = sorted(sentences, key=lambda sentence: sum(freq_dist[word] for word in word_tokenize(sentence)))

    # Take the top 3 sentences as the summary
    summary = " ".join(sorted_sentences[-3:])
    
    return summary

def extract_keywords(text):
    words = word_tokenize(text)
    keywords = [word.lower() for word in words if word.isalnum() and word.lower() not in stop_words]
    return keywords

@app.route('/text/summarize', methods=['POST'])
def summarize_text_route():
    data = request.get_json()
    text = data['text']
    
    summary = summarize_text(text)
    
    return jsonify({'summary': summary})

@app.route('/text/extract', methods=['POST'])
def extract_keywords_route():
    data = request.get_json()
    text = data['text']
    
    keywords = extract_keywords(text)
    
    return jsonify({'keywords': keywords})

if __name__ == '__main__':
    app.run(debug=True)