Spaces:
Runtime error
Runtime error
ok lancer huggingface-cli login dans terminal en1e
Browse files- src/inference_t5.py +4 -6
src/inference_t5.py
CHANGED
@@ -2,7 +2,6 @@
|
|
2 |
Allows to predict the summary for a given entry text
|
3 |
"""
|
4 |
import torch
|
5 |
-
import contractions
|
6 |
import re
|
7 |
import string
|
8 |
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
|
@@ -10,13 +9,12 @@ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
|
|
10 |
|
11 |
def clean_text(texts: str) -> str:
|
12 |
texts = texts.lower()
|
13 |
-
texts = contractions.fix(texts)
|
14 |
texts = texts.translate(str.maketrans("", "", string.punctuation))
|
15 |
texts = re.sub(r'\n', ' ', texts)
|
16 |
return texts
|
17 |
|
18 |
|
19 |
-
def
|
20 |
"""
|
21 |
Predict the summary for an input text
|
22 |
--------
|
@@ -31,10 +29,10 @@ def inferenceAPI(text: str) -> str:
|
|
31 |
# On défini les paramètres d'entrée pour le modèle
|
32 |
text = clean_text(text)
|
33 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
34 |
-
tokenizer = (AutoTokenizer.from_pretrained("Linggg/t5_summary"))
|
35 |
# load local model
|
36 |
model = (AutoModelForSeq2SeqLM
|
37 |
-
.from_pretrained("Linggg/t5_summary")
|
38 |
.to(device))
|
39 |
|
40 |
text_encoding = tokenizer(
|
@@ -64,4 +62,4 @@ def inferenceAPI(text: str) -> str:
|
|
64 |
|
65 |
# if __name__ == "__main__":
|
66 |
# text = input('Entrez votre phrase à résumer : ')
|
67 |
-
# print('summary:',
|
|
|
2 |
Allows to predict the summary for a given entry text
|
3 |
"""
|
4 |
import torch
|
|
|
5 |
import re
|
6 |
import string
|
7 |
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
|
|
|
9 |
|
10 |
def clean_text(texts: str) -> str:
|
11 |
texts = texts.lower()
|
|
|
12 |
texts = texts.translate(str.maketrans("", "", string.punctuation))
|
13 |
texts = re.sub(r'\n', ' ', texts)
|
14 |
return texts
|
15 |
|
16 |
|
17 |
+
def inferenceAPI_T5(text: str) -> str:
|
18 |
"""
|
19 |
Predict the summary for an input text
|
20 |
--------
|
|
|
29 |
# On défini les paramètres d'entrée pour le modèle
|
30 |
text = clean_text(text)
|
31 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
32 |
+
tokenizer = (AutoTokenizer.from_pretrained("Linggg/t5_summary",use_auth_token=True))
|
33 |
# load local model
|
34 |
model = (AutoModelForSeq2SeqLM
|
35 |
+
.from_pretrained("Linggg/t5_summary",use_auth_token=True)
|
36 |
.to(device))
|
37 |
|
38 |
text_encoding = tokenizer(
|
|
|
62 |
|
63 |
# if __name__ == "__main__":
|
64 |
# text = input('Entrez votre phrase à résumer : ')
|
65 |
+
# print('summary:', inferenceAPI_T5(text))
|