shangeth commited on
Commit
e913b2a
1 Parent(s): c19a8d3

checkpoint update

Browse files
Files changed (2) hide show
  1. config.json +6 -1
  2. model.py +1 -0
config.json CHANGED
@@ -1,13 +1,18 @@
1
  {
 
 
 
2
  "audio_enc_dim": 1024,
3
  "audio_encoder_name": "microsoft/wavlm-large",
4
  "audio_processor_name": "microsoft/wavlm-base",
5
  "auto_map": {
6
- "AutoConfig": "config.SpeechLLMModelConfig"
 
7
  },
8
  "llm_dim": 2048,
9
  "llm_model_checkpoint": "hf_repo/llm_model_checkpoint",
10
  "llm_model_name": "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
11
  "model_type": "custom_model",
 
12
  "transformers_version": "4.41.2"
13
  }
 
1
  {
2
+ "architectures": [
3
+ "SpeechLLMModel"
4
+ ],
5
  "audio_enc_dim": 1024,
6
  "audio_encoder_name": "microsoft/wavlm-large",
7
  "audio_processor_name": "microsoft/wavlm-base",
8
  "auto_map": {
9
+ "AutoConfig": "config.SpeechLLMModelConfig",
10
+ "AutoModel": "model.SpeechLLMModel"
11
  },
12
  "llm_dim": 2048,
13
  "llm_model_checkpoint": "hf_repo/llm_model_checkpoint",
14
  "llm_model_name": "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
15
  "model_type": "custom_model",
16
+ "torch_dtype": "float32",
17
  "transformers_version": "4.41.2"
18
  }
model.py CHANGED
@@ -114,6 +114,7 @@ Output:'''
114
  out = self.llm_model.generate(
115
  inputs_embeds=combined_embeds,
116
  max_new_tokens=max_new_tokens,
 
117
  ).cpu().tolist()[0]
118
 
119
  output_text = self.llm_tokenizer.decode(out, skip_special_tokens=True)
 
114
  out = self.llm_model.generate(
115
  inputs_embeds=combined_embeds,
116
  max_new_tokens=max_new_tokens,
117
+ pad_token_id=self.llm_tokenizer.pad_token_id
118
  ).cpu().tolist()[0]
119
 
120
  output_text = self.llm_tokenizer.decode(out, skip_special_tokens=True)