aka7774 commited on
Commit
21e6f0a
1 Parent(s): 8cefb2e

Update fn.py

Browse files
Files changed (1) hide show
  1. fn.py +5 -8
fn.py CHANGED
@@ -9,14 +9,10 @@ torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
9
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
10
 
11
  model = None
12
- model_size = None
13
  pipe = None
14
 
15
- def load_model(_model_size):
16
- global model_size, model, pipe
17
-
18
- if _model_size and model_size != _model_size:
19
- model_size = _model_size
20
 
21
  # load model
22
  model = AutoModelForSpeechSeq2Seq.from_pretrained(model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True)
@@ -33,9 +29,10 @@ def load_model(_model_size):
33
  )
34
 
35
  def speech_to_text(audio_file, _model_size = None):
36
- global model_size, model, pipe
37
 
38
- load_model(_model_size)
 
39
 
40
  # run inference
41
  result = pipe(audio_file)
 
9
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
10
 
11
  model = None
 
12
  pipe = None
13
 
14
+ def load_model():
15
+ global model, pipe
 
 
 
16
 
17
  # load model
18
  model = AutoModelForSpeechSeq2Seq.from_pretrained(model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True)
 
29
  )
30
 
31
  def speech_to_text(audio_file, _model_size = None):
32
+ global model, pipe
33
 
34
+ if not model:
35
+ load_model()
36
 
37
  # run inference
38
  result = pipe(audio_file)