csikasote commited on
Commit
50eaedc
·
verified ·
1 Parent(s): 125c021

End of training

Browse files
README.md CHANGED
@@ -3,6 +3,8 @@ library_name: transformers
3
  license: apache-2.0
4
  base_model: facebook/wav2vec2-xls-r-300m
5
  tags:
 
 
6
  - generated_from_trainer
7
  metrics:
8
  - wer
@@ -16,10 +18,10 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # xls-r-300m-nyagen-combined-hp-tuning-test-model
18
 
19
- This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: 0.2689
22
- - Wer: 0.2675
23
 
24
  ## Model description
25
 
 
3
  license: apache-2.0
4
  base_model: facebook/wav2vec2-xls-r-300m
5
  tags:
6
+ - automatic-speech-recognition
7
+ - nyagen
8
  - generated_from_trainer
9
  metrics:
10
  - wer
 
18
 
19
  # xls-r-300m-nyagen-combined-hp-tuning-test-model
20
 
21
+ This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the NYAGEN - NA dataset.
22
  It achieves the following results on the evaluation set:
23
  - Loss: 0.2689
24
+ - Wer: 0.2677
25
 
26
  ## Model description
27
 
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 22.225352112676056,
3
+ "eval_loss": 0.26893913745880127,
4
+ "eval_runtime": 27.6963,
5
+ "eval_samples": 334,
6
+ "eval_samples_per_second": 12.059,
7
+ "eval_steps_per_second": 1.516,
8
+ "eval_wer": 0.2677412133422879,
9
+ "total_flos": 1.9575417036880396e+19,
10
+ "train_loss": 16.2585947265625,
11
+ "train_runtime": 7398.0938,
12
+ "train_samples": 2840,
13
+ "train_samples_per_second": 11.516,
14
+ "train_steps_per_second": 0.178
15
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 22.225352112676056,
3
+ "eval_loss": 0.26893913745880127,
4
+ "eval_runtime": 27.6963,
5
+ "eval_samples": 334,
6
+ "eval_samples_per_second": 12.059,
7
+ "eval_steps_per_second": 1.516,
8
+ "eval_wer": 0.2677412133422879
9
+ }
runs/Jan26_00-13-00_srvrocgpu011.uct.ac.za/events.out.tfevents.1737917431.srvrocgpu011.uct.ac.za ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8db23d09f4cd9d9fc396f34fe453f23641052156188e964024aa2b049ebf5959
3
+ size 358
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 22.225352112676056,
3
+ "total_flos": 1.9575417036880396e+19,
4
+ "train_loss": 16.2585947265625,
5
+ "train_runtime": 7398.0938,
6
+ "train_samples": 2840,
7
+ "train_samples_per_second": 11.516,
8
+ "train_steps_per_second": 0.178
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.24032442271709442,
3
+ "best_model_checkpoint": "/scratch/skscla001/speech/results/xls-r-300m-nyagen-combined-hp-tuning-test-model/checkpoint-600",
4
+ "epoch": 22.225352112676056,
5
+ "eval_steps": 100,
6
+ "global_step": 1000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 2.2253521126760565,
13
+ "eval_loss": 2.900092124938965,
14
+ "eval_runtime": 27.3693,
15
+ "eval_samples_per_second": 12.203,
16
+ "eval_steps_per_second": 1.535,
17
+ "eval_wer": 1.0,
18
+ "step": 100
19
+ },
20
+ {
21
+ "epoch": 4.450704225352113,
22
+ "eval_loss": 0.837561309337616,
23
+ "eval_runtime": 27.5078,
24
+ "eval_samples_per_second": 12.142,
25
+ "eval_steps_per_second": 1.527,
26
+ "eval_wer": 0.7848668009850012,
27
+ "step": 200
28
+ },
29
+ {
30
+ "epoch": 6.676056338028169,
31
+ "eval_loss": 0.30867961049079895,
32
+ "eval_runtime": 27.4066,
33
+ "eval_samples_per_second": 12.187,
34
+ "eval_steps_per_second": 1.532,
35
+ "eval_wer": 0.4329527647190508,
36
+ "step": 300
37
+ },
38
+ {
39
+ "epoch": 8.901408450704226,
40
+ "eval_loss": 0.25156790018081665,
41
+ "eval_runtime": 27.5616,
42
+ "eval_samples_per_second": 12.118,
43
+ "eval_steps_per_second": 1.524,
44
+ "eval_wer": 0.37653906424893663,
45
+ "step": 400
46
+ },
47
+ {
48
+ "epoch": 11.112676056338028,
49
+ "grad_norm": 9.6257905960083,
50
+ "learning_rate": 0.0009213002275800941,
51
+ "loss": 30.7137,
52
+ "step": 500
53
+ },
54
+ {
55
+ "epoch": 11.112676056338028,
56
+ "eval_loss": 0.26118895411491394,
57
+ "eval_runtime": 27.5705,
58
+ "eval_samples_per_second": 12.114,
59
+ "eval_steps_per_second": 1.523,
60
+ "eval_wer": 0.3434072084172823,
61
+ "step": 500
62
+ },
63
+ {
64
+ "epoch": 13.338028169014084,
65
+ "eval_loss": 0.24032442271709442,
66
+ "eval_runtime": 27.4884,
67
+ "eval_samples_per_second": 12.151,
68
+ "eval_steps_per_second": 1.528,
69
+ "eval_wer": 0.32303559435862994,
70
+ "step": 600
71
+ },
72
+ {
73
+ "epoch": 15.56338028169014,
74
+ "eval_loss": 0.24804773926734924,
75
+ "eval_runtime": 27.4749,
76
+ "eval_samples_per_second": 12.157,
77
+ "eval_steps_per_second": 1.529,
78
+ "eval_wer": 0.31564808596373406,
79
+ "step": 700
80
+ },
81
+ {
82
+ "epoch": 17.788732394366196,
83
+ "eval_loss": 0.25463205575942993,
84
+ "eval_runtime": 27.5117,
85
+ "eval_samples_per_second": 12.14,
86
+ "eval_steps_per_second": 1.527,
87
+ "eval_wer": 0.2939332885605552,
88
+ "step": 800
89
+ },
90
+ {
91
+ "epoch": 20.0,
92
+ "eval_loss": 0.2638227641582489,
93
+ "eval_runtime": 27.6123,
94
+ "eval_samples_per_second": 12.096,
95
+ "eval_steps_per_second": 1.521,
96
+ "eval_wer": 0.2919185135437654,
97
+ "step": 900
98
+ },
99
+ {
100
+ "epoch": 22.225352112676056,
101
+ "grad_norm": 2.932446241378784,
102
+ "learning_rate": 0.0003650929645045522,
103
+ "loss": 1.8035,
104
+ "step": 1000
105
+ },
106
+ {
107
+ "epoch": 22.225352112676056,
108
+ "eval_loss": 0.26894354820251465,
109
+ "eval_runtime": 27.579,
110
+ "eval_samples_per_second": 12.111,
111
+ "eval_steps_per_second": 1.523,
112
+ "eval_wer": 0.2675173494515335,
113
+ "step": 1000
114
+ },
115
+ {
116
+ "epoch": 22.225352112676056,
117
+ "step": 1000,
118
+ "total_flos": 1.9575417036880396e+19,
119
+ "train_loss": 16.2585947265625,
120
+ "train_runtime": 7398.0938,
121
+ "train_samples_per_second": 11.516,
122
+ "train_steps_per_second": 0.178
123
+ }
124
+ ],
125
+ "logging_steps": 500,
126
+ "max_steps": 1320,
127
+ "num_input_tokens_seen": 0,
128
+ "num_train_epochs": 30,
129
+ "save_steps": 400,
130
+ "stateful_callbacks": {
131
+ "EarlyStoppingCallback": {
132
+ "args": {
133
+ "early_stopping_patience": 4,
134
+ "early_stopping_threshold": 0.0
135
+ },
136
+ "attributes": {
137
+ "early_stopping_patience_counter": 2
138
+ }
139
+ },
140
+ "TrainerControl": {
141
+ "args": {
142
+ "should_epoch_stop": false,
143
+ "should_evaluate": false,
144
+ "should_log": false,
145
+ "should_save": true,
146
+ "should_training_stop": false
147
+ },
148
+ "attributes": {}
149
+ }
150
+ },
151
+ "total_flos": 1.9575417036880396e+19,
152
+ "train_batch_size": 4,
153
+ "trial_name": null,
154
+ "trial_params": null
155
+ }