Spaces:
Runtime error
Runtime error
import torch | |
from torch import nn | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
from peft import LoraConfig, get_peft_model, PeftModel | |
import pytorch_lightning as pl | |
from model import HubertXCNNEnoder | |
class SpeechLLMLightning(pl.LightningModule): | |
def __init__(self, audio_enc_dim=512, llm_dim=2048, llm_name="TinyLlama/TinyLlama-1.1B-Chat-v1.0"): | |
super().__init__() | |
self.save_hyperparameters() | |
self.audio_enc_dim = audio_enc_dim | |
self.llm_dim = llm_dim | |
self.llm_name = llm_name | |
self.audio_encoder = HubertXCNNEnoder(self.audio_enc_dim, self.llm_dim) | |
self.llm_tokenizer = AutoTokenizer.from_pretrained(self.llm_name) | |
self.llm_tokenizer.pad_token = self.llm_tokenizer.eos_token | |
self.llm_model = AutoModelForCausalLM.from_pretrained( | |
self.llm_name, | |
device_map="auto", | |
) | |
peft_config = LoraConfig( | |
r=4, | |
lora_alpha=8, | |
target_modules=['q_proj', 'k_proj', 'v_proj', 'o_proj', 'up_proj', 'down_proj', 'gate_proj'], | |
lora_dropout=0.05, | |
task_type="CAUSAL_LM", | |
) | |
self.llm_model = get_peft_model(self.llm_model, peft_config) | |
self.llm_model.print_trainable_parameters() | |
for param in self.llm_model.parameters(): | |
param.requires_grad = False | |
self.audio_encoder.eval() | |
self.llm_model.eval() | |
def encode(self, mel, pre_tokenized_ids, post_tokenized_ids, output_tokenized_ids): | |
batch_size = mel.shape[0] | |
speech_embeds = self.audio_encoder(mel) | |
embedder = self.llm_model.model.model.embed_tokens | |
pre_prompt_embeds = embedder(pre_tokenized_ids) | |
post_prompt_embeds = embedder(post_tokenized_ids) | |
output_prompt_embeds = embedder(output_tokenized_ids) | |
combined_embeds = torch.cat([pre_prompt_embeds, speech_embeds, post_prompt_embeds, output_prompt_embeds], dim=1) | |
atts = torch.ones(combined_embeds.size()[:-1], dtype=torch.long).to(combined_embeds.device) | |
input_token_length = pre_tokenized_ids.shape[1] + speech_embeds.shape[1] + post_tokenized_ids.shape[1] | |
label_ids = torch.cat([ | |
torch.ones([batch_size, input_token_length], device=combined_embeds.device)*-100, | |
output_tokenized_ids | |
], 1).to(combined_embeds.device).to(torch.int64) | |
return combined_embeds, atts, label_ids | |
def forward(self, embeds, atts, label_ids): | |
return self.llm_model( | |
inputs_embeds=embeds, | |
attention_mask=atts, | |
labels=label_ids, | |
) |