File size: 1,552 Bytes
0f150a5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import wikipedia
import gradio as gr
from gradio.mix import Parallel
import requests
import nltk
from nltk.tokenize import word_tokenize
from nltk.tokenize import sent_tokenize
import re
nltk.download('punkt')


def opendomain(text):
  question_words = ["what", "why", "when", "where","name", "is", "how", "do", "does",
                    "which", "are", "could", "would","should", "has", "have", "whom",
                    "whose", "don't", "a", "an","?",".","the","i","you","he","she","it",
                    "that","this",",","am",","]
  lower_text = text.lower()
  lower_text = word_tokenize(lower_text)
  new_text = [i for i in lower_text if i not in question_words]
  new_txt = "".join(new_text)

  r = requests.post(
      url="https://jaimin-new-content.hf.space/run/predict",
      json={"data": [new_txt, "en"]},
    )
  response = r.json()
  text1 = response["data"]
  final_out = text1[0]
  final_out=re.sub(r'\=.+\=', '', final_out)

  result = list(filter(lambda x: x != '', final_out.split('\n\n')))

  answer = []
  for i in range(6):
      if len(result[i]) > 500:
          summary_point=result[i].split(".")[0]
          answer.append(summary_point)

  l = []
  for i in range(len(answer)):
      l.append("".join(answer[i]))
  gen_output = []
  for i in range(len(l)):
      gen_output.append(l[i] + ".")

  listToStr = ' '.join([str(elem) for elem in gen_output])
  listToStr=listToStr.replace("\n","")
  return listToStr
iface = gr.Interface(fn=opendomain, inputs=[gr.inputs.Textbox(lines=5)], outputs="text")
iface.launch()