wgetdd commited on
Commit
a21cd2b
β€’
1 Parent(s): 66a80e1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -17,7 +17,7 @@ QA_TOKEN_ID = 50295 # token for qa
17
  device = "cuda" if torch.cuda.is_available() else "cpu"
18
  clip_embed = 768
19
  phi_embed = 2560
20
- compute_type = "float16"
21
  audio_batch_size = 16
22
 
23
  class SimpleResBlock(nn.Module):
@@ -38,7 +38,7 @@ clip_model = CLIPVisionModel.from_pretrained(clip_model_name).to(device)
38
  projection = torch.nn.Linear(clip_embed, phi_embed).to(device)
39
  resblock = SimpleResBlock(phi_embed).to(device)
40
  phi_model = AutoModelForCausalLM.from_pretrained(phi_model_name,trust_remote_code=True).to(device)
41
- audio_model = whisperx.load_model("tiny", device, compute_type=compute_type)
42
 
43
  # load weights
44
  model_to_merge = PeftModel.from_pretrained(phi_model,'./model_chkpt/lora_adaptor')
 
17
  device = "cuda" if torch.cuda.is_available() else "cpu"
18
  clip_embed = 768
19
  phi_embed = 2560
20
+ #compute_type = "float16"
21
  audio_batch_size = 16
22
 
23
  class SimpleResBlock(nn.Module):
 
38
  projection = torch.nn.Linear(clip_embed, phi_embed).to(device)
39
  resblock = SimpleResBlock(phi_embed).to(device)
40
  phi_model = AutoModelForCausalLM.from_pretrained(phi_model_name,trust_remote_code=True).to(device)
41
+ audio_model = whisperx.load_model("tiny", device)
42
 
43
  # load weights
44
  model_to_merge = PeftModel.from_pretrained(phi_model,'./model_chkpt/lora_adaptor')