3loi commited on
Commit
e680b4f
1 Parent(s): 6d4d82e

Upload model

Browse files
Files changed (3) hide show
  1. config.json +21 -0
  2. pipeline_utils.py +171 -0
  3. pytorch_model.bin +3 -0
config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "SERModel"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "pipeline_utils.SERConfig",
7
+ "AutoModelForAudioClassification": "pipeline_utils.SERModel"
8
+ },
9
+ "classifier_dropout_prob": 0.5,
10
+ "classifier_hidden_layers": 1,
11
+ "hidden_size": 1024,
12
+ "mean": -8.278621631819787e-05,
13
+ "model_type": "ser",
14
+ "num_attention_heads": 16,
15
+ "num_classes": 8,
16
+ "num_hidden_layers": 24,
17
+ "ssl_type": "microsoft/wavlm-large",
18
+ "std": 0.08485510250851999,
19
+ "torch_dtype": "float32",
20
+ "transformers_version": "4.34.0.dev0"
21
+ }
pipeline_utils.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from transformers import AutoModel
5
+ from transformers.modeling_utils import PreTrainedModel ,PretrainedConfig
6
+
7
+
8
+ class Pooling(nn.Module):
9
+ def __init__(self):
10
+ super().__init__()
11
+ def compute_length_from_mask(self, mask):
12
+ """
13
+ mask: (batch_size, T)
14
+ Assuming that the sampling rate is 16kHz, the frame shift is 20ms
15
+ """
16
+ wav_lens = torch.sum(mask, dim=1) # (batch_size, )
17
+ feat_lens = torch.div(wav_lens-1, 16000*0.02, rounding_mode="floor") + 1
18
+ feat_lens = feat_lens.int().tolist()
19
+ return feat_lens
20
+
21
+ def forward(self, x, mask):
22
+ raise NotImplementedError
23
+
24
+ class MeanPooling(Pooling):
25
+ def __init__(self):
26
+ super().__init__()
27
+ def forward(self, xs, mask):
28
+ """
29
+ xs: (batch_size, T, feat_dim)
30
+ mask: (batch_size, T)
31
+
32
+ => output: (batch_size, feat_dim)
33
+ """
34
+ feat_lens = self.compute_length_from_mask(mask)
35
+ pooled_list = []
36
+ for x, feat_len in zip(xs, feat_lens):
37
+ pooled = torch.mean(x[:feat_len], dim=0) # (feat_dim, )
38
+ pooled_list.append(pooled)
39
+ pooled = torch.stack(pooled_list, dim=0) # (batch_size, feat_dim)
40
+ return pooled
41
+
42
+
43
+ class AttentiveStatisticsPooling(Pooling):
44
+ """
45
+ AttentiveStatisticsPooling
46
+ Paper: Attentive Statistics Pooling for Deep Speaker Embedding
47
+ Link: https://arxiv.org/pdf/1803.10963.pdf
48
+ """
49
+ def __init__(self, input_size):
50
+ super().__init__()
51
+ self._indim = input_size
52
+ self.sap_linear = nn.Linear(input_size, input_size)
53
+ self.attention = nn.Parameter(torch.FloatTensor(input_size, 1))
54
+ torch.nn.init.normal_(self.attention, mean=0, std=1)
55
+
56
+ def forward(self, xs, mask):
57
+ """
58
+ xs: (batch_size, T, feat_dim)
59
+ mask: (batch_size, T)
60
+
61
+ => output: (batch_size, feat_dim*2)
62
+ """
63
+ feat_lens = self.compute_length_from_mask(mask)
64
+ pooled_list = []
65
+ for x, feat_len in zip(xs, feat_lens):
66
+ x = x[:feat_len].unsqueeze(0)
67
+ h = torch.tanh(self.sap_linear(x))
68
+ w = torch.matmul(h, self.attention).squeeze(dim=2)
69
+ w = F.softmax(w, dim=1).view(x.size(0), x.size(1), 1)
70
+ mu = torch.sum(x * w, dim=1)
71
+ rh = torch.sqrt((torch.sum((x**2) * w, dim=1) - mu**2).clamp(min=1e-5))
72
+ x = torch.cat((mu, rh), 1).squeeze(0)
73
+ pooled_list.append(x)
74
+ return torch.stack(pooled_list)
75
+
76
+
77
+
78
+
79
+ class EmotionRegression(nn.Module):
80
+ def __init__(self, *args, **kwargs):
81
+ super(EmotionRegression, self).__init__()
82
+ input_dim = args[0]
83
+ hidden_dim = args[1]
84
+ num_layers = args[2]
85
+ output_dim = args[3]
86
+ p = kwargs.get("dropout", 0.5)
87
+
88
+ self.fc=nn.ModuleList([
89
+ nn.Sequential(
90
+ nn.Linear(input_dim, hidden_dim), nn.LayerNorm(hidden_dim), nn.ReLU(), nn.Dropout(p)
91
+ )
92
+ ])
93
+ for lidx in range(num_layers-1):
94
+ self.fc.append(
95
+ nn.Sequential(
96
+ nn.Linear(hidden_dim, hidden_dim), nn.LayerNorm(hidden_dim), nn.ReLU(), nn.Dropout(p)
97
+ )
98
+ )
99
+ self.out = nn.Sequential(
100
+ nn.Linear(hidden_dim, output_dim)
101
+ )
102
+
103
+ self.inp_drop = nn.Dropout(p)
104
+ def get_repr(self, x):
105
+ h = self.inp_drop(x)
106
+ for lidx, fc in enumerate(self.fc):
107
+ h=fc(h)
108
+ return h
109
+
110
+ def forward(self, x):
111
+ h=self.get_repr(x)
112
+ result = self.out(h)
113
+ return result
114
+
115
+
116
+ class SERConfig(PretrainedConfig):
117
+ model_type = "ser"
118
+
119
+ def __init__(
120
+ self,
121
+ num_classes: int = 8,
122
+ num_attention_heads = 16,
123
+ num_hidden_layers = 24,
124
+ hidden_size = 1024,
125
+ classifier_hidden_layers = 1,
126
+ classifier_dropout_prob = 0.5,
127
+ ssl_type= "microsoft/wavlm-large",
128
+ torch_dtype= "float32",
129
+ mean= -8.278621631819787e-05,
130
+ std=0.08485510250851999,
131
+ **kwargs,
132
+ ):
133
+ self.num_classes = num_classes
134
+ self.num_attention_heads = num_attention_heads
135
+ self.num_hidden_layers = num_hidden_layers
136
+ self.hidden_size = hidden_size
137
+ self.classifier_hidden_layers = classifier_hidden_layers
138
+ self.classifier_dropout_prob = classifier_dropout_prob
139
+ self.ssl_type = ssl_type
140
+ self.torch_dtype = torch_dtype
141
+
142
+ self.mean = mean
143
+ self.std = std
144
+ super().__init__(**kwargs)
145
+
146
+ class SERModel(PreTrainedModel):
147
+ config_class = SERConfig
148
+
149
+ def __init__(self, config):
150
+ super().__init__(config)
151
+ self.ssl_model = AutoModel.from_pretrained(config.ssl_type)
152
+ self.ssl_model.freeze_feature_encoder()
153
+
154
+ self.pool_model = AttentiveStatisticsPooling(config.hidden_size)
155
+
156
+ self.ser_model = EmotionRegression(config.hidden_size*2,
157
+ config.hidden_size,
158
+ config.classifier_hidden_layers,
159
+ config.num_classes,
160
+ dropout=config.classifier_dropout_prob)
161
+
162
+
163
+ def forward(self, x, mask):
164
+ ssl = self.ssl_model(x, attention_mask=mask).last_hidden_state
165
+
166
+ ssl = self.pool_model(ssl, mask)
167
+
168
+ pred = self.ser_model(ssl)
169
+
170
+ return pred
171
+
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:446f71c92a67b69977c50b065a0e418c37fa20aba1d2e44ecb1190d97f9c0cbb
3
+ size 1274614289