Paulie-Aditya commited on
Commit
11dcb45
·
1 Parent(s): 7d5a525

only using one model now

Browse files
Files changed (1) hide show
  1. app.py +28 -29
app.py CHANGED
@@ -36,36 +36,35 @@ def translation(text,dest_lang,dest_lang_code, src_lang_code):
36
 
37
  headers = {"Authorization": f"Bearer {api_token_header}"}
38
 
39
- # Bengali Done
40
- if(dest_lang == "Bengali" and src_lang_code == "en_XX"):
41
- API_URL = "https://api-inference.huggingface.co/models/csebuetnlp/banglat5_nmt_en_bn"
42
- def query(payload):
43
- response = requests.post(API_URL, headers=headers, json=payload)
44
- return response.json()
45
- output = query({
46
- "inputs": text,
47
- })
48
- print(output)
49
- return output[0]['translation_text']
 
 
 
 
50
  else:
51
- global model
52
- if model:
53
- pass
54
- else:
55
- model = load_model()
56
- loaded_model = model
57
- tokenizer = MBart50TokenizerFast.from_pretrained("SnypzZz/Llama2-13b-Language-translate", src_lang=src_lang_code)
58
- #model_inputs = tokenizer(text, return_tensors="pt")
59
- loaded_model_inputs = tokenizer(text, return_tensors="pt")
60
-
61
- # translate
62
- generated_tokens = loaded_model.generate(
63
- **loaded_model_inputs,
64
- forced_bos_token_id=tokenizer.lang_code_to_id[dest_lang_code]
65
- )
66
- output = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
67
- print(output)
68
- return output[0]
69
 
70
 
71
  def main_translation(text,dest_lang_code,src_lang_code):
 
36
 
37
  headers = {"Authorization": f"Bearer {api_token_header}"}
38
 
39
+ # # Bengali Done
40
+ # if(dest_lang == "Bengali" and src_lang_code == "en_XX"):
41
+ # API_URL = "https://api-inference.huggingface.co/models/csebuetnlp/banglat5_nmt_en_bn"
42
+ # def query(payload):
43
+ # response = requests.post(API_URL, headers=headers, json=payload)
44
+ # return response.json()
45
+ # output = query({
46
+ # "inputs": text,
47
+ # })
48
+ # print(output)
49
+ # return output[0]['translation_text']
50
+ # else:
51
+ global model
52
+ if model:
53
+ pass
54
  else:
55
+ model = load_model()
56
+ loaded_model = model
57
+ tokenizer = MBart50TokenizerFast.from_pretrained("SnypzZz/Llama2-13b-Language-translate", src_lang=src_lang_code)
58
+ #model_inputs = tokenizer(text, return_tensors="pt")
59
+ loaded_model_inputs = tokenizer(text, return_tensors="pt")
60
+ # translate
61
+ generated_tokens = loaded_model.generate(
62
+ **loaded_model_inputs,
63
+ forced_bos_token_id=tokenizer.lang_code_to_id[dest_lang_code]
64
+ )
65
+ output = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
66
+ print(output)
67
+ return output[0]
 
 
 
 
 
68
 
69
 
70
  def main_translation(text,dest_lang_code,src_lang_code):