alice-hml commited on
Commit
2329469
1 Parent(s): 741d69e

Upload corrector.py

Browse files
Files changed (1) hide show
  1. corrector.py +35 -0
corrector.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from transformers import MBartTokenizerFast, MBartForConditionalGeneration
3
+ import streamlit as st
4
+ from datasets import load_from_disk
5
+ from datasets.filesystems import S3FileSystem
6
+
7
+ s3 = S3FileSystem(anon=True)
8
+
9
+ @st.cache(allow_output_mutation=True)
10
+ def load_model():
11
+ print("Load correction model")
12
+ return MBartForConditionalGeneration.from_pretrained("aligator/mBART_french_correction")
13
+
14
+
15
+ @st.cache(allow_output_mutation=True)
16
+ def load_tokenizer():
17
+ print("Load tokenizer for correction model")
18
+ return MBartTokenizerFast.from_pretrained("aligator/mBART_french_correction")
19
+
20
+
21
+ model = load_model()
22
+ tokenizer = load_tokenizer()
23
+
24
+ def correct(sentence: str):
25
+ tokenizer.src_lang = "fr_XX"
26
+ encoded_orig = tokenizer(sentence, return_tensors="pt")
27
+ generated_tokens = model.generate(**encoded_orig,
28
+ forced_bos_token_id=tokenizer.lang_code_to_id["fr_XX"],
29
+ max_length=math.ceil(len(encoded_orig.input_ids[0])*1.20),
30
+ min_length=math.ceil(len(encoded_orig.input_ids[0])*0.8),
31
+ num_beams=5,
32
+ repetition_penalty=1.1,
33
+ # max_time=5,
34
+ )
35
+ return tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]