Update model caching
Browse files
models.py
CHANGED
@@ -28,7 +28,7 @@ def create_nest_sentences(document:str, token_max_length = 1024):
|
|
28 |
return nested
|
29 |
|
30 |
# Reference: https://github.com/MaartenGr/KeyBERT
|
31 |
-
@st.
|
32 |
def load_keyword_model():
|
33 |
kw_model = KeyBERT()
|
34 |
return kw_model
|
@@ -45,7 +45,7 @@ def keyword_gen(kw_model, sequence:str):
|
|
45 |
|
46 |
|
47 |
# Reference: https://huggingface.co/facebook/bart-large-mnli
|
48 |
-
@st.
|
49 |
def load_summary_model():
|
50 |
model_name = "facebook/bart-large-cnn"
|
51 |
summarizer = pipeline(task='summarization', model=model_name)
|
@@ -82,7 +82,7 @@ def summarizer_gen(summarizer, sequence:str, maximum_tokens:int, minimum_tokens:
|
|
82 |
|
83 |
|
84 |
# Reference: https://huggingface.co/spaces/team-zero-shot-nli/zero-shot-nli/blob/main/utils.py
|
85 |
-
@st.
|
86 |
def load_model():
|
87 |
model_name = "facebook/bart-large-mnli"
|
88 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
|
28 |
return nested
|
29 |
|
30 |
# Reference: https://github.com/MaartenGr/KeyBERT
|
31 |
+
@st.cache_resource
|
32 |
def load_keyword_model():
|
33 |
kw_model = KeyBERT()
|
34 |
return kw_model
|
|
|
45 |
|
46 |
|
47 |
# Reference: https://huggingface.co/facebook/bart-large-mnli
|
48 |
+
@st.cache_resource
|
49 |
def load_summary_model():
|
50 |
model_name = "facebook/bart-large-cnn"
|
51 |
summarizer = pipeline(task='summarization', model=model_name)
|
|
|
82 |
|
83 |
|
84 |
# Reference: https://huggingface.co/spaces/team-zero-shot-nli/zero-shot-nli/blob/main/utils.py
|
85 |
+
@st.cache_resource
|
86 |
def load_model():
|
87 |
model_name = "facebook/bart-large-mnli"
|
88 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|