SilvusTV commited on
Commit
794777e
1 Parent(s): 121d715

fix translation (in array)

Browse files
Files changed (2) hide show
  1. app.py +4 -3
  2. translation.py +2 -2
app.py CHANGED
@@ -19,9 +19,10 @@ if st.button('générer'):
19
  st.write('response is :', responseBase)
20
 
21
  st.write('Part 2')
22
- completeResponse = englishtofrench(longText(responseBase, question))
23
- st.write('en : ',longText(responseBase, question))
24
- st.write('fr : ',completeResponse)
 
25
 
26
  st.write('Part 3')
27
  st.write('Next step : TTS')
 
19
  st.write('response is :', responseBase)
20
 
21
  st.write('Part 2')
22
+ enResponse = longText(responseBase, question)
23
+ frResponse = englishtofrench(enResponse)
24
+ st.write('en : ',enResponse)
25
+ st.write('fr : ',frResponse)
26
 
27
  st.write('Part 3')
28
  st.write('Next step : TTS')
translation.py CHANGED
@@ -11,11 +11,11 @@ def frenchtoenglish(input):
11
  tokenizer.src_lang = "fr"
12
  encoded_fr = tokenizer(input, return_tensors="pt")
13
  generated_tokens = model.generate(**encoded_fr, forced_bos_token_id=tokenizer.get_lang_id("en"))
14
- return tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
15
 
16
  def englishtofrench(input):
17
  # translate English to French
18
  tokenizer.src_lang = "en"
19
  encoded_en = tokenizer(input, return_tensors="pt")
20
  generated_tokens = model.generate(**encoded_en, forced_bos_token_id=tokenizer.get_lang_id("fr"))
21
- return tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
 
11
  tokenizer.src_lang = "fr"
12
  encoded_fr = tokenizer(input, return_tensors="pt")
13
  generated_tokens = model.generate(**encoded_fr, forced_bos_token_id=tokenizer.get_lang_id("en"))
14
+ return tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
15
 
16
  def englishtofrench(input):
17
  # translate English to French
18
  tokenizer.src_lang = "en"
19
  encoded_en = tokenizer(input, return_tensors="pt")
20
  generated_tokens = model.generate(**encoded_en, forced_bos_token_id=tokenizer.get_lang_id("fr"))
21
+ return tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]