Upload EcapaTdnnForSequenceClassification
Browse files- config.json +6 -2
- tdnn_attention.py +1 -1
config.json
CHANGED
@@ -1,11 +1,14 @@
|
|
1 |
{
|
2 |
-
"_attn_implementation_autoset": true,
|
3 |
"angular": true,
|
4 |
"angular_margin": 0.2,
|
5 |
"angular_scale": 30,
|
|
|
|
|
|
|
6 |
"attention_channels": 128,
|
7 |
"auto_map": {
|
8 |
-
"AutoConfig": "configuration_ecapa_tdnn.EcapaTdnnConfig"
|
|
|
9 |
},
|
10 |
"bos_token_id": 1,
|
11 |
"decoder_config": {
|
@@ -2577,6 +2580,7 @@
|
|
2577 |
},
|
2578 |
"time_masks": 5,
|
2579 |
"time_width": 0.03,
|
|
|
2580 |
"transformers_version": "4.48.3",
|
2581 |
"use_torchaudio": true,
|
2582 |
"use_vectorized_spec_augment": true,
|
|
|
1 |
{
|
|
|
2 |
"angular": true,
|
3 |
"angular_margin": 0.2,
|
4 |
"angular_scale": 30,
|
5 |
+
"architectures": [
|
6 |
+
"EcapaTdnnForSequenceClassification"
|
7 |
+
],
|
8 |
"attention_channels": 128,
|
9 |
"auto_map": {
|
10 |
+
"AutoConfig": "configuration_ecapa_tdnn.EcapaTdnnConfig",
|
11 |
+
"AutoModelForAudioClassification": "modeling_ecapa_tdnn.EcapaTdnnForSequenceClassification"
|
12 |
},
|
13 |
"bos_token_id": 1,
|
14 |
"decoder_config": {
|
|
|
2580 |
},
|
2581 |
"time_masks": 5,
|
2582 |
"time_width": 0.03,
|
2583 |
+
"torch_dtype": "float32",
|
2584 |
"transformers_version": "4.48.3",
|
2585 |
"use_torchaudio": true,
|
2586 |
"use_vectorized_spec_augment": true,
|
tdnn_attention.py
CHANGED
@@ -276,7 +276,7 @@ class TdnnSeModule(nn.Module):
|
|
276 |
def forward(self, inputs, length=None):
|
277 |
x = self.group_tdnn_block(inputs)
|
278 |
x = self.se_layer(x, length)
|
279 |
-
return x +
|
280 |
|
281 |
|
282 |
class Res2NetBlock(nn.Module):
|
|
|
276 |
def forward(self, inputs, length=None):
|
277 |
x = self.group_tdnn_block(inputs)
|
278 |
x = self.se_layer(x, length)
|
279 |
+
return x + inputs
|
280 |
|
281 |
|
282 |
class Res2NetBlock(nn.Module):
|