nickmuchi commited on
Commit
972218d
1 Parent(s): 2081499

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -362,28 +362,28 @@ def clean_text(text,doc=False,plain_text=False,url=False):
362
 
363
 
364
 
365
- @st.cache(allow_output_mutation=True,suppress_st_warning=True)
366
  def get_spacy():
367
  nlp = en_core_web_lg.load()
368
  return nlp
369
 
370
- @st.cache(allow_output_mutation=True,suppress_st_warning=True)
371
  def facebook_model():
372
 
373
  summarizer = pipeline('summarization',model='facebook/bart-large-cnn')
374
  return summarizer
375
 
376
- @st.cache(allow_output_mutation=True,suppress_st_warning=True)
377
  def schleifer_model():
378
 
379
  summarizer = pipeline('summarization',model='sshleifer/distilbart-cnn-12-6')
380
  return summarizer
381
 
382
- @st.cache(allow_output_mutation=True,suppress_st_warning=True)
383
  def get_sentence_embedding_model():
384
  return SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
385
 
386
- @st.cache(allow_output_mutation=True,suppress_st_warning=True)
387
  def get_ner_pipeline():
388
  tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-large-finetuned-conll03-english")
389
  model = AutoModelForTokenClassification.from_pretrained("xlm-roberta-large-finetuned-conll03-english")
 
362
 
363
 
364
 
365
+ @st.experimental_singleton(suppress_st_warning=True)
366
  def get_spacy():
367
  nlp = en_core_web_lg.load()
368
  return nlp
369
 
370
+ @st.experimental_singleton(suppress_st_warning=True)
371
  def facebook_model():
372
 
373
  summarizer = pipeline('summarization',model='facebook/bart-large-cnn')
374
  return summarizer
375
 
376
+ @st.experimental_singleton(suppress_st_warning=True)
377
  def schleifer_model():
378
 
379
  summarizer = pipeline('summarization',model='sshleifer/distilbart-cnn-12-6')
380
  return summarizer
381
 
382
+ @st.experimental_singleton(suppress_st_warning=True)
383
  def get_sentence_embedding_model():
384
  return SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
385
 
386
+ @st.experimental_singleton(suppress_st_warning=True)
387
  def get_ner_pipeline():
388
  tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-large-finetuned-conll03-english")
389
  model = AutoModelForTokenClassification.from_pretrained("xlm-roberta-large-finetuned-conll03-english")