from gensim.parsing.preprocessing import STOPWORDS import wikipedia import gradio as gr from gradio.mix import Parallel import requests import nltk from nltk.tokenize import word_tokenize from nltk.tokenize import sent_tokenize import re nltk.download('punkt') def opendomain(text): question_words = STOPWORDS.union(set(['likes','play','.',',','like',"don't",'?','use','choose','important','better','?'])) lower_text = text.lower() lower_text = word_tokenize(lower_text) new_text = [i for i in lower_text if i not in question_words] new_txt = "".join(new_text) r = requests.post( url="https://jaimin-new-content.hf.space/run/predict", json={"data": [new_txt, "en"]}, ) response = r.json() text1 = response["data"] final_out = text1[0] final_out=re.sub(r'\=.+\=', '', final_out) result = list(filter(lambda x: x != '', final_out.split('\n\n'))) answer = [] for i in range(6): if len(result[i]) > 500: summary_point=result[i].split(".")[0] answer.append(summary_point) l = [] for i in range(len(answer)): l.append("".join(answer[i])) gen_output = [] for i in range(len(l)): gen_output.append(l[i] + ".") listToStr = ' '.join([str(elem) for elem in gen_output]) listToStr = listToStr.replace("\n", "") return listToStr #return final_answer iface = gr.Interface(fn=opendomain, inputs=[gr.inputs.Textbox(lines=5)], outputs="text") iface.launch()