imperialwool commited on
Commit
968018c
1 Parent(s): b8ccea4

Update gradio_app.py

Browse files
Files changed (1) hide show
  1. gradio_app.py +7 -4
gradio_app.py CHANGED
@@ -4,18 +4,21 @@ from llama_cpp import Llama
4
  import gradio as gr
5
  import psutil
6
 
7
- # Initing things
8
- llm = Llama(model_path="./model.bin") # LLaMa model
9
- llama_model_name = "TheBloke/Llama-2-13B-chat-GGUF"
10
  translator_tokenizer = M2M100Tokenizer.from_pretrained( # tokenizer for translator
11
  "facebook/m2m100_418M", cache_dir="translator/"
12
  )
 
13
  translator_model = M2M100ForConditionalGeneration.from_pretrained( # translator model
14
  "facebook/m2m100_418M", cache_dir="translator/"
15
  )
16
  print("! SETTING MODEL IN EVALUATION MODE !")
17
  translator_model.eval()
18
- print("! DONE !")
 
 
 
19
 
20
  # Preparing things to work
21
  translator_tokenizer.src_lang = "en"
 
4
  import gradio as gr
5
  import psutil
6
 
7
+ # Initing things
8
+ print("! DOWNLOADING TOKENIZER AND SETTING ALL UP !")
 
9
  translator_tokenizer = M2M100Tokenizer.from_pretrained( # tokenizer for translator
10
  "facebook/m2m100_418M", cache_dir="translator/"
11
  )
12
+ print("! DOWNLOADING MODEL AND SETTING ALL UP !")
13
  translator_model = M2M100ForConditionalGeneration.from_pretrained( # translator model
14
  "facebook/m2m100_418M", cache_dir="translator/"
15
  )
16
  print("! SETTING MODEL IN EVALUATION MODE !")
17
  translator_model.eval()
18
+ print("! INITING LLAMA MODEL !")
19
+ llm = Llama(model_path="./model.bin") # LLaMa model
20
+ llama_model_name = "TheBloke/Llama-2-13B-chat-GGUF"
21
+ print("! INITING DONE !")
22
 
23
  # Preparing things to work
24
  translator_tokenizer.src_lang = "en"