Update functions.py
Browse files- functions.py +1 -1
functions.py
CHANGED
@@ -35,7 +35,7 @@ def load_models():
|
|
35 |
q_tokenizer = AutoTokenizer.from_pretrained("nickmuchi/quantized-optimum-finbert-tone")
|
36 |
ner_tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-large-finetuned-conll03-english")
|
37 |
sent_pipe = pipeline("text-classification",model=q_model, tokenizer=q_tokenizer)
|
38 |
-
sum_pipe = pipeline("summarization",model="facebook/bart-large-cnn", tokenizer="facebook/bart-large-cnn")
|
39 |
ner_pipe = pipeline("ner", model=ner_model, tokenizer=ner_tokenizer, grouped_entities=True)
|
40 |
sbert = SentenceTransformer("all-mpnet-base-v2")
|
41 |
cross_encoder = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-12-v2')
|
|
|
35 |
q_tokenizer = AutoTokenizer.from_pretrained("nickmuchi/quantized-optimum-finbert-tone")
|
36 |
ner_tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-large-finetuned-conll03-english")
|
37 |
sent_pipe = pipeline("text-classification",model=q_model, tokenizer=q_tokenizer)
|
38 |
+
sum_pipe = pipeline("summarization",model="facebook/bart-large-cnn", tokenizer="facebook/bart-large-cnn",clean_up_tokenization_spaces=True)
|
39 |
ner_pipe = pipeline("ner", model=ner_model, tokenizer=ner_tokenizer, grouped_entities=True)
|
40 |
sbert = SentenceTransformer("all-mpnet-base-v2")
|
41 |
cross_encoder = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-12-v2')
|