Feature Extraction
Transformers
Safetensors
English
custom_model
multi-modal
conversational
speechllm
speech2text
custom_code
shangeth commited on
Commit
0d15b92
1 Parent(s): f4b700f

Delete MyModel.py

Browse files
Files changed (1) hide show
  1. MyModel.py +0 -101
MyModel.py DELETED
@@ -1,101 +0,0 @@
1
- import torch
2
- from torch import nn
3
- import torchaudio
4
- from transformers import PreTrainedModel, AutoModelForCausalLM, AutoTokenizer, HubertModel, AutoProcessor
5
- from .MyConfig import CustomModelConfig
6
- from peft import LoraConfig, get_peft_model
7
-
8
- class HubertXCNNEnoder(nn.Module):
9
- def __init__(self, audio_enc_dim, llm_dim):
10
- super().__init__()
11
- self.encoder = HubertModel.from_pretrained('facebook/hubert-xlarge-ll60k')
12
- for param in self.encoder.parameters():
13
- param.requires_grad = False
14
-
15
- self.cnn = nn.Sequential(
16
- nn.ReLU(),
17
- nn.Conv1d(audio_enc_dim, llm_dim // 2, kernel_size=5, stride=1, padding=0),
18
- nn.ReLU(),
19
- nn.Conv1d(llm_dim // 2, llm_dim, kernel_size=5, stride=2, padding=0),
20
- nn.ReLU(),
21
- nn.Conv1d(llm_dim, llm_dim, kernel_size=3, stride=1, padding=0),
22
- )
23
-
24
- def forward(self, x):
25
- x = self.encoder(x).last_hidden_state
26
- x = self.cnn(x.transpose(1, 2)).transpose(1, 2)
27
- return x
28
-
29
- class CustomModel(PreTrainedModel):
30
- config_class = CustomModelConfig
31
-
32
- def __init__(self, config):
33
- super().__init__(config)
34
- self.audio_processor = AutoProcessor.from_pretrained("facebook/hubert-large-ls960-ft")
35
- self.audio_encoder = HubertXCNNEnoder(config.audio_enc_dim, config.llm_dim)
36
- self.llm_model = AutoModelForCausalLM.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0")
37
- self.llm_tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0")
38
-
39
- peft_config = LoraConfig(
40
- r=4,
41
- lora_alpha=8,
42
- target_modules=['q_proj', 'k_proj', 'v_proj', 'o_proj', 'up_proj', 'down_proj', 'gate_proj'],
43
- lora_dropout=0.05,
44
- task_type="CAUSAL_LM",
45
- )
46
- self.llm_model = get_peft_model(self.llm_model, peft_config)
47
-
48
- def encode(self, mel, pre_tokenized_ids, post_tokenized_ids, output_tokenized_ids):
49
- batch_size = mel.shape[0]
50
-
51
- with torch.no_grad():
52
- speech_embeds = self.audio_encoder(mel)
53
- embedder = self.llm_model.model.model.embed_tokens
54
- pre_prompt_embeds = embedder(pre_tokenized_ids)
55
- post_prompt_embeds = embedder(post_tokenized_ids)
56
- output_prompt_embeds = embedder(output_tokenized_ids)
57
-
58
- combined_embeds = torch.cat([pre_prompt_embeds, speech_embeds, post_prompt_embeds, output_prompt_embeds], dim=1)
59
- atts = torch.ones(combined_embeds.size()[:-1], dtype=torch.long).to(combined_embeds.device)
60
-
61
- input_token_length = pre_tokenized_ids.shape[1] + speech_embeds.shape[1] + post_tokenized_ids.shape[1]
62
- label_ids = torch.cat([
63
- torch.ones([batch_size, input_token_length], device=combined_embeds.device) * -100,
64
- output_tokenized_ids
65
- ], 1).to(combined_embeds.device).to(torch.int64)
66
- return combined_embeds, atts, label_ids
67
-
68
- def forward(self, wav_tensor, pre_tokenized_ids, post_tokenized_ids, output_tokenized_ids, attention_mask=None):
69
- combined_embeds, atts, label_ids = self.encode(wav_tensor, pre_tokenized_ids, post_tokenized_ids, output_tokenized_ids)
70
- outputs = self.llm_model(inputs_embeds=combined_embeds, attention_mask=attention_mask)
71
- return outputs
72
-
73
- def generate_meta(self, audio_path, instruction="Give me the following information about the audio [Transcript]", max_new_tokens=2000):
74
- pre_speech_prompt = f'''Instruction:
75
- {instruction}
76
-
77
- Input:
78
- <speech>'''
79
- post_speech_prompt = f'''</speech>
80
-
81
- Output:'''
82
- output_prompt = '\n<s>'
83
-
84
- with torch.no_grad():
85
- wav_tensor, sr = torchaudio.load(audio_path)
86
- wav_tensor = self.audio_processor(wav_tensor.squeeze(), return_tensors="pt", sampling_rate=16000).input_values
87
-
88
- pre_tokenized_ids = self.llm_tokenizer(pre_speech_prompt, padding="do_not_pad", return_tensors='pt', truncation=False, add_special_tokens=False)["input_ids"]
89
- post_tokenized_ids = self.llm_tokenizer(post_speech_prompt, padding="do_not_pad", return_tensors='pt', truncation=False, add_special_tokens=False)["input_ids"]
90
- output_tokenized_ids = self.llm_tokenizer(output_prompt, padding="do_not_pad", return_tensors='pt', truncation=False, add_special_tokens=False)["input_ids"]
91
-
92
- combined_embeds, atts, label_ids = self.encode(wav_tensor, pre_tokenized_ids, post_tokenized_ids, output_tokenized_ids)
93
-
94
- out = self.llm_model.generate(
95
- inputs_embeds=combined_embeds,
96
- max_new_tokens=max_new_tokens,
97
- ).cpu().tolist()[0]
98
-
99
- output_text = self.llm_tokenizer.decode(out, skip_special_tokens=False)
100
- return output_text
101
-