import nltk nltk.download('punkt') nltk.download('averaged_perceptron_tagger') nltk.download('brown') import spacy from spacy import displacy from collections import Counter import en_core_web_sm nlp = en_core_web_sm.load() from transformers import pipeline summarization = pipeline("summarization", model = "facebook/bart-large-cnn") from textblob import TextBlob import gradio as gr def story(txt): #Find all the names that appear in the story and define the function for finding the most appeared name doc = nlp(txt) labellist = [] namelist = [] orglist = [(X.text, X.label_) for X in doc.ents] def most_common(List): return max(set(List), key=List.count) for i in orglist: if i[1] == ("PERSON"): labellist.append(i[0]) for i in labellist: if i not in namelist: namelist.append(i) #Generate a short summary for the story summary = summarization(txt) #Determine whether the story is positive, negative or neutral count = 0 count2 = 0 blob = TextBlob(txt) for sentence in blob.sentences: for i in sentence.sentiment: if sentence.sentiment[0]>0: count += 1 if sentence.sentiment[0]<0: count2 += 1 if count>count2: sentiment = ("The story is positive.") elif count