File size: 1,664 Bytes
e959bb8
 
 
 
 
 
 
 
 
1fb75bd
e959bb8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import nltk
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
nltk.download('brown')

import spacy
from spacy import displacy
from collections import Counter
import en_core_web_sm
nlp = en_core_web_sm.load()

from transformers import pipeline
summarization = pipeline("summarization", model = "facebook/bart-large-cnn")

from textblob import TextBlob

import gradio as gr

def story(txt):

  #Find all the names that appear in the story and define the function for finding the most appeared name

  doc = nlp(txt)
  labellist = []
  namelist = []
  orglist = [(X.text, X.label_) for X in doc.ents]
  def most_common(List):
    return max(set(List), key=List.count)
  for i in orglist:
    if i[1] == ("PERSON"):
      labellist.append(i[0])
  for i  in labellist:
    if i not in namelist:
      namelist.append(i)

  #Generate a short summary for the story
  
  summary = summarization(txt)

  #Determine whether the story is positive, negative or neutral
 
  count = 0
  count2 = 0
  blob = TextBlob(txt)
  for sentence in blob.sentences:
    for i in sentence.sentiment:
      if sentence.sentiment[0]>0:
        count += 1
      if sentence.sentiment[0]<0:
        count2 += 1
  if count>count2:
    sentiment = ("The story is positive.")
  elif count<count2:
    sentiment = ("The stroy is negative.")
  else:
    sentiment = ("The story is neutral.")
  #Determine the output
  output = ("All names appeared:", namelist, "The most appeared name is", most_common(labellist)+".", "Short summary:", summary, sentiment)
  return(output)

#create web app using Gradio

demo = gr.Interface(fn = story, inputs="text", outputs="text")

demo.launch()