dipesh1701 commited on
Commit
a9c115a
1 Parent(s): b0ef2d7
Files changed (1) hide show
  1. app.py +12 -8
app.py CHANGED
@@ -1,11 +1,12 @@
1
  import torch
2
  import gradio as gr
3
  import time
 
4
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
5
  from flores200_codes import flores_codes
6
 
7
  # Load models and tokenizers once during initialization
8
- def load_models():
9
  model_name_dict = {
10
  "nllb-distilled-600M": "facebook/nllb-200-distilled-600M",
11
  }
@@ -14,8 +15,8 @@ def load_models():
14
 
15
  for call_name, real_name in model_name_dict.items():
16
  print("\tLoading model:", call_name)
17
- model = AutoModelForSeq2SeqLM.from_pretrained(real_name)
18
- tokenizer = AutoTokenizer.from_pretrained(real_name)
19
  model_dict[call_name] = {
20
  "model": model,
21
  "tokenizer": tokenizer,
@@ -53,11 +54,11 @@ def translate_text(source_lang, target_lang, input_text, model_dict):
53
  }
54
  return translated_result
55
 
56
- if __name__ == "__main__":
57
  print("\tInitializing models")
58
 
59
  # Load models and tokenizers
60
- model_dict = load_models() # Ensure that this line initializes model_dict correctly
61
 
62
  lang_codes = list(flores_codes.keys())
63
  inputs = [
@@ -76,11 +77,14 @@ if __name__ == "__main__":
76
  examples = [["English", "Nepali", "Hello, how are you?"]]
77
 
78
  gr.Interface(
79
- fn=translate_text,
80
- inputs=inputs,
81
- outputs=outputs,
82
  title=title,
83
  description=app_description,
84
  examples=examples,
85
  examples_per_page=50,
86
  ).launch()
 
 
 
 
1
  import torch
2
  import gradio as gr
3
  import time
4
+ import asyncio
5
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
6
  from flores200_codes import flores_codes
7
 
8
  # Load models and tokenizers once during initialization
9
+ async def load_models():
10
  model_name_dict = {
11
  "nllb-distilled-600M": "facebook/nllb-200-distilled-600M",
12
  }
 
15
 
16
  for call_name, real_name in model_name_dict.items():
17
  print("\tLoading model:", call_name)
18
+ model = await asyncio.to_thread(AutoModelForSeq2SeqLM.from_pretrained, real_name)
19
+ tokenizer = await asyncio.to_thread(AutoTokenizer.from_pretrained, real_name)
20
  model_dict[call_name] = {
21
  "model": model,
22
  "tokenizer": tokenizer,
 
54
  }
55
  return translated_result
56
 
57
+ async def main():
58
  print("\tInitializing models")
59
 
60
  # Load models and tokenizers
61
+ model_dict = await load_models()
62
 
63
  lang_codes = list(flores_codes.keys())
64
  inputs = [
 
77
  examples = [["English", "Nepali", "Hello, how are you?"]]
78
 
79
  gr.Interface(
80
+ translate_text,
81
+ inputs,
82
+ outputs,
83
  title=title,
84
  description=app_description,
85
  examples=examples,
86
  examples_per_page=50,
87
  ).launch()
88
+
89
+ if __name__ == "__main__":
90
+ asyncio.run(main())