|
from transformers import PreTrainedModel, HubertModel
|
|
import torch.nn as nn
|
|
import torch
|
|
from .configuration_emotion_classifier import EmotionClassifierConfig
|
|
|
|
|
|
class EmotionClassifierHuBERT(PreTrainedModel):
|
|
config_class = EmotionClassifierConfig
|
|
|
|
def __init__(self, config):
|
|
super().__init__(config)
|
|
self.hubert = HubertModel.from_pretrained("facebook/hubert-large-ls960-ft")
|
|
self.conv1 = nn.Conv1d(in_channels=1024, out_channels=512, kernel_size=3, padding=1)
|
|
self.conv2 = nn.Conv1d(in_channels=512, out_channels=256, kernel_size=3, padding=1)
|
|
self.transformer_encoder = nn.TransformerEncoderLayer(d_model=256, nhead=8)
|
|
self.bilstm = nn.LSTM(input_size=256, hidden_size=config.hidden_size, num_layers=2, batch_first=True, bidirectional=True)
|
|
self.fc = nn.Linear(config.hidden_size * 2, config.num_classes)
|
|
|
|
def forward(self, x):
|
|
with torch.no_grad():
|
|
features = self.hubert(x).last_hidden_state
|
|
features = features.transpose(1, 2)
|
|
x = torch.relu(self.conv1(features))
|
|
x = torch.relu(self.conv2(x))
|
|
x = x.transpose(1, 2)
|
|
x = self.transformer_encoder(x)
|
|
x, _ = self.bilstm(x)
|
|
x = self.fc(x[:, -1, :])
|
|
return x
|
|
|