ltnghia commited on
Commit
bfb21c3
·
verified ·
1 Parent(s): 878cf48

Add files using upload-large-folder tool

Browse files
Files changed (50) hide show
  1. .gitattributes +3 -59
  2. CODE_OF_CONDUCT.md +9 -0
  3. LICENSE +395 -0
  4. LICENSE-CODE +21 -0
  5. NSNet-baseline/README.md +37 -0
  6. NSNet-baseline/__pycache__/audiolib.cpython-36.pyc +0 -0
  7. NSNet-baseline/__pycache__/onnx.cpython-36.pyc +0 -0
  8. NSNet-baseline/audiolib.py +184 -0
  9. NSNet-baseline/nsnet-baseline-dnschallenge.onnx +3 -0
  10. NSNet-baseline/nsnet_eval_local.py +78 -0
  11. NSNet-baseline/onnx.py +108 -0
  12. README.md +130 -3
  13. SECURITY.md +41 -0
  14. __pycache__/audiolib.cpython-36.pyc +0 -0
  15. __pycache__/utils.cpython-36.pyc +0 -0
  16. audiolib.py +297 -0
  17. datasets/Readme.md +23 -0
  18. datasets/blind_test_set/noreverb_fileid_0.wav +3 -0
  19. datasets/blind_test_set/noreverb_fileid_1.wav +3 -0
  20. datasets/blind_test_set/noreverb_fileid_10.wav +3 -0
  21. datasets/blind_test_set/noreverb_fileid_100.wav +3 -0
  22. datasets/blind_test_set/noreverb_fileid_101.wav +3 -0
  23. datasets/blind_test_set/noreverb_fileid_102.wav +3 -0
  24. datasets/blind_test_set/noreverb_fileid_103.wav +3 -0
  25. datasets/blind_test_set/noreverb_fileid_104.wav +3 -0
  26. datasets/blind_test_set/noreverb_fileid_105.wav +3 -0
  27. datasets/blind_test_set/noreverb_fileid_106.wav +3 -0
  28. datasets/blind_test_set/noreverb_fileid_107.wav +3 -0
  29. datasets/blind_test_set/noreverb_fileid_108.wav +3 -0
  30. datasets/blind_test_set/noreverb_fileid_109.wav +3 -0
  31. datasets/blind_test_set/noreverb_fileid_11.wav +3 -0
  32. datasets/blind_test_set/noreverb_fileid_110.wav +3 -0
  33. datasets/blind_test_set/noreverb_fileid_111.wav +3 -0
  34. datasets/blind_test_set/noreverb_fileid_112.wav +3 -0
  35. datasets/blind_test_set/noreverb_fileid_113.wav +3 -0
  36. datasets/blind_test_set/noreverb_fileid_114.wav +3 -0
  37. datasets/blind_test_set/noreverb_fileid_116.wav +3 -0
  38. datasets/blind_test_set/noreverb_fileid_117.wav +3 -0
  39. datasets/blind_test_set/noreverb_fileid_119.wav +3 -0
  40. datasets/blind_test_set/noreverb_fileid_12.wav +3 -0
  41. datasets/blind_test_set/noreverb_fileid_121.wav +3 -0
  42. datasets/blind_test_set/noreverb_fileid_128.wav +3 -0
  43. docs/IS2020_noisesuppchallenge_base_paper.pdf +3 -0
  44. index.html +243 -0
  45. noisyspeech_synthesizer.cfg +66 -0
  46. noisyspeech_synthesizer_multiprocessing.py +342 -0
  47. noisyspeech_synthesizer_singleprocess.py +351 -0
  48. requirements.txt +16 -0
  49. unit_tests_synthesizer.py +186 -0
  50. utils.py +46 -0
.gitattributes CHANGED
@@ -1,59 +1,3 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mds filter=lfs diff=lfs merge=lfs -text
13
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
- *.model filter=lfs diff=lfs merge=lfs -text
15
- *.msgpack filter=lfs diff=lfs merge=lfs -text
16
- *.npy filter=lfs diff=lfs merge=lfs -text
17
- *.npz filter=lfs diff=lfs merge=lfs -text
18
- *.onnx filter=lfs diff=lfs merge=lfs -text
19
- *.ot filter=lfs diff=lfs merge=lfs -text
20
- *.parquet filter=lfs diff=lfs merge=lfs -text
21
- *.pb filter=lfs diff=lfs merge=lfs -text
22
- *.pickle filter=lfs diff=lfs merge=lfs -text
23
- *.pkl filter=lfs diff=lfs merge=lfs -text
24
- *.pt filter=lfs diff=lfs merge=lfs -text
25
- *.pth filter=lfs diff=lfs merge=lfs -text
26
- *.rar filter=lfs diff=lfs merge=lfs -text
27
- *.safetensors filter=lfs diff=lfs merge=lfs -text
28
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
- *.tar.* filter=lfs diff=lfs merge=lfs -text
30
- *.tar filter=lfs diff=lfs merge=lfs -text
31
- *.tflite filter=lfs diff=lfs merge=lfs -text
32
- *.tgz filter=lfs diff=lfs merge=lfs -text
33
- *.wasm filter=lfs diff=lfs merge=lfs -text
34
- *.xz filter=lfs diff=lfs merge=lfs -text
35
- *.zip filter=lfs diff=lfs merge=lfs -text
36
- *.zst filter=lfs diff=lfs merge=lfs -text
37
- *tfevents* filter=lfs diff=lfs merge=lfs -text
38
- # Audio files - uncompressed
39
- *.pcm filter=lfs diff=lfs merge=lfs -text
40
- *.sam filter=lfs diff=lfs merge=lfs -text
41
- *.raw filter=lfs diff=lfs merge=lfs -text
42
- # Audio files - compressed
43
- *.aac filter=lfs diff=lfs merge=lfs -text
44
- *.flac filter=lfs diff=lfs merge=lfs -text
45
- *.mp3 filter=lfs diff=lfs merge=lfs -text
46
- *.ogg filter=lfs diff=lfs merge=lfs -text
47
- *.wav filter=lfs diff=lfs merge=lfs -text
48
- # Image files - uncompressed
49
- *.bmp filter=lfs diff=lfs merge=lfs -text
50
- *.gif filter=lfs diff=lfs merge=lfs -text
51
- *.png filter=lfs diff=lfs merge=lfs -text
52
- *.tiff filter=lfs diff=lfs merge=lfs -text
53
- # Image files - compressed
54
- *.jpg filter=lfs diff=lfs merge=lfs -text
55
- *.jpeg filter=lfs diff=lfs merge=lfs -text
56
- *.webp filter=lfs diff=lfs merge=lfs -text
57
- # Video files - compressed
58
- *.mp4 filter=lfs diff=lfs merge=lfs -text
59
- *.webm filter=lfs diff=lfs merge=lfs -text
 
1
+ *.wav filter=lfs diff=lfs merge=lfs -text
2
+ NSNet-baseline/nsnet-baseline-dnschallenge.onnx filter=lfs diff=lfs merge=lfs -text
3
+ docs/IS2020_noisesuppchallenge_base_paper.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
CODE_OF_CONDUCT.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # Microsoft Open Source Code of Conduct
2
+
3
+ This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
4
+
5
+ Resources:
6
+
7
+ - [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/)
8
+ - [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
9
+ - Contact [[email protected]](mailto:[email protected]) with questions or concerns
LICENSE ADDED
@@ -0,0 +1,395 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Attribution 4.0 International
2
+
3
+ =======================================================================
4
+
5
+ Creative Commons Corporation ("Creative Commons") is not a law firm and
6
+ does not provide legal services or legal advice. Distribution of
7
+ Creative Commons public licenses does not create a lawyer-client or
8
+ other relationship. Creative Commons makes its licenses and related
9
+ information available on an "as-is" basis. Creative Commons gives no
10
+ warranties regarding its licenses, any material licensed under their
11
+ terms and conditions, or any related information. Creative Commons
12
+ disclaims all liability for damages resulting from their use to the
13
+ fullest extent possible.
14
+
15
+ Using Creative Commons Public Licenses
16
+
17
+ Creative Commons public licenses provide a standard set of terms and
18
+ conditions that creators and other rights holders may use to share
19
+ original works of authorship and other material subject to copyright
20
+ and certain other rights specified in the public license below. The
21
+ following considerations are for informational purposes only, are not
22
+ exhaustive, and do not form part of our licenses.
23
+
24
+ Considerations for licensors: Our public licenses are
25
+ intended for use by those authorized to give the public
26
+ permission to use material in ways otherwise restricted by
27
+ copyright and certain other rights. Our licenses are
28
+ irrevocable. Licensors should read and understand the terms
29
+ and conditions of the license they choose before applying it.
30
+ Licensors should also secure all rights necessary before
31
+ applying our licenses so that the public can reuse the
32
+ material as expected. Licensors should clearly mark any
33
+ material not subject to the license. This includes other CC-
34
+ licensed material, or material used under an exception or
35
+ limitation to copyright. More considerations for licensors:
36
+ wiki.creativecommons.org/Considerations_for_licensors
37
+
38
+ Considerations for the public: By using one of our public
39
+ licenses, a licensor grants the public permission to use the
40
+ licensed material under specified terms and conditions. If
41
+ the licensor's permission is not necessary for any reason--for
42
+ example, because of any applicable exception or limitation to
43
+ copyright--then that use is not regulated by the license. Our
44
+ licenses grant only permissions under copyright and certain
45
+ other rights that a licensor has authority to grant. Use of
46
+ the licensed material may still be restricted for other
47
+ reasons, including because others have copyright or other
48
+ rights in the material. A licensor may make special requests,
49
+ such as asking that all changes be marked or described.
50
+ Although not required by our licenses, you are encouraged to
51
+ respect those requests where reasonable. More_considerations
52
+ for the public:
53
+ wiki.creativecommons.org/Considerations_for_licensees
54
+
55
+ =======================================================================
56
+
57
+ Creative Commons Attribution 4.0 International Public License
58
+
59
+ By exercising the Licensed Rights (defined below), You accept and agree
60
+ to be bound by the terms and conditions of this Creative Commons
61
+ Attribution 4.0 International Public License ("Public License"). To the
62
+ extent this Public License may be interpreted as a contract, You are
63
+ granted the Licensed Rights in consideration of Your acceptance of
64
+ these terms and conditions, and the Licensor grants You such rights in
65
+ consideration of benefits the Licensor receives from making the
66
+ Licensed Material available under these terms and conditions.
67
+
68
+
69
+ Section 1 -- Definitions.
70
+
71
+ a. Adapted Material means material subject to Copyright and Similar
72
+ Rights that is derived from or based upon the Licensed Material
73
+ and in which the Licensed Material is translated, altered,
74
+ arranged, transformed, or otherwise modified in a manner requiring
75
+ permission under the Copyright and Similar Rights held by the
76
+ Licensor. For purposes of this Public License, where the Licensed
77
+ Material is a musical work, performance, or sound recording,
78
+ Adapted Material is always produced where the Licensed Material is
79
+ synched in timed relation with a moving image.
80
+
81
+ b. Adapter's License means the license You apply to Your Copyright
82
+ and Similar Rights in Your contributions to Adapted Material in
83
+ accordance with the terms and conditions of this Public License.
84
+
85
+ c. Copyright and Similar Rights means copyright and/or similar rights
86
+ closely related to copyright including, without limitation,
87
+ performance, broadcast, sound recording, and Sui Generis Database
88
+ Rights, without regard to how the rights are labeled or
89
+ categorized. For purposes of this Public License, the rights
90
+ specified in Section 2(b)(1)-(2) are not Copyright and Similar
91
+ Rights.
92
+
93
+ d. Effective Technological Measures means those measures that, in the
94
+ absence of proper authority, may not be circumvented under laws
95
+ fulfilling obligations under Article 11 of the WIPO Copyright
96
+ Treaty adopted on December 20, 1996, and/or similar international
97
+ agreements.
98
+
99
+ e. Exceptions and Limitations means fair use, fair dealing, and/or
100
+ any other exception or limitation to Copyright and Similar Rights
101
+ that applies to Your use of the Licensed Material.
102
+
103
+ f. Licensed Material means the artistic or literary work, database,
104
+ or other material to which the Licensor applied this Public
105
+ License.
106
+
107
+ g. Licensed Rights means the rights granted to You subject to the
108
+ terms and conditions of this Public License, which are limited to
109
+ all Copyright and Similar Rights that apply to Your use of the
110
+ Licensed Material and that the Licensor has authority to license.
111
+
112
+ h. Licensor means the individual(s) or entity(ies) granting rights
113
+ under this Public License.
114
+
115
+ i. Share means to provide material to the public by any means or
116
+ process that requires permission under the Licensed Rights, such
117
+ as reproduction, public display, public performance, distribution,
118
+ dissemination, communication, or importation, and to make material
119
+ available to the public including in ways that members of the
120
+ public may access the material from a place and at a time
121
+ individually chosen by them.
122
+
123
+ j. Sui Generis Database Rights means rights other than copyright
124
+ resulting from Directive 96/9/EC of the European Parliament and of
125
+ the Council of 11 March 1996 on the legal protection of databases,
126
+ as amended and/or succeeded, as well as other essentially
127
+ equivalent rights anywhere in the world.
128
+
129
+ k. You means the individual or entity exercising the Licensed Rights
130
+ under this Public License. Your has a corresponding meaning.
131
+
132
+
133
+ Section 2 -- Scope.
134
+
135
+ a. License grant.
136
+
137
+ 1. Subject to the terms and conditions of this Public License,
138
+ the Licensor hereby grants You a worldwide, royalty-free,
139
+ non-sublicensable, non-exclusive, irrevocable license to
140
+ exercise the Licensed Rights in the Licensed Material to:
141
+
142
+ a. reproduce and Share the Licensed Material, in whole or
143
+ in part; and
144
+
145
+ b. produce, reproduce, and Share Adapted Material.
146
+
147
+ 2. Exceptions and Limitations. For the avoidance of doubt, where
148
+ Exceptions and Limitations apply to Your use, this Public
149
+ License does not apply, and You do not need to comply with
150
+ its terms and conditions.
151
+
152
+ 3. Term. The term of this Public License is specified in Section
153
+ 6(a).
154
+
155
+ 4. Media and formats; technical modifications allowed. The
156
+ Licensor authorizes You to exercise the Licensed Rights in
157
+ all media and formats whether now known or hereafter created,
158
+ and to make technical modifications necessary to do so. The
159
+ Licensor waives and/or agrees not to assert any right or
160
+ authority to forbid You from making technical modifications
161
+ necessary to exercise the Licensed Rights, including
162
+ technical modifications necessary to circumvent Effective
163
+ Technological Measures. For purposes of this Public License,
164
+ simply making modifications authorized by this Section 2(a)
165
+ (4) never produces Adapted Material.
166
+
167
+ 5. Downstream recipients.
168
+
169
+ a. Offer from the Licensor -- Licensed Material. Every
170
+ recipient of the Licensed Material automatically
171
+ receives an offer from the Licensor to exercise the
172
+ Licensed Rights under the terms and conditions of this
173
+ Public License.
174
+
175
+ b. No downstream restrictions. You may not offer or impose
176
+ any additional or different terms or conditions on, or
177
+ apply any Effective Technological Measures to, the
178
+ Licensed Material if doing so restricts exercise of the
179
+ Licensed Rights by any recipient of the Licensed
180
+ Material.
181
+
182
+ 6. No endorsement. Nothing in this Public License constitutes or
183
+ may be construed as permission to assert or imply that You
184
+ are, or that Your use of the Licensed Material is, connected
185
+ with, or sponsored, endorsed, or granted official status by,
186
+ the Licensor or others designated to receive attribution as
187
+ provided in Section 3(a)(1)(A)(i).
188
+
189
+ b. Other rights.
190
+
191
+ 1. Moral rights, such as the right of integrity, are not
192
+ licensed under this Public License, nor are publicity,
193
+ privacy, and/or other similar personality rights; however, to
194
+ the extent possible, the Licensor waives and/or agrees not to
195
+ assert any such rights held by the Licensor to the limited
196
+ extent necessary to allow You to exercise the Licensed
197
+ Rights, but not otherwise.
198
+
199
+ 2. Patent and trademark rights are not licensed under this
200
+ Public License.
201
+
202
+ 3. To the extent possible, the Licensor waives any right to
203
+ collect royalties from You for the exercise of the Licensed
204
+ Rights, whether directly or through a collecting society
205
+ under any voluntary or waivable statutory or compulsory
206
+ licensing scheme. In all other cases the Licensor expressly
207
+ reserves any right to collect such royalties.
208
+
209
+
210
+ Section 3 -- License Conditions.
211
+
212
+ Your exercise of the Licensed Rights is expressly made subject to the
213
+ following conditions.
214
+
215
+ a. Attribution.
216
+
217
+ 1. If You Share the Licensed Material (including in modified
218
+ form), You must:
219
+
220
+ a. retain the following if it is supplied by the Licensor
221
+ with the Licensed Material:
222
+
223
+ i. identification of the creator(s) of the Licensed
224
+ Material and any others designated to receive
225
+ attribution, in any reasonable manner requested by
226
+ the Licensor (including by pseudonym if
227
+ designated);
228
+
229
+ ii. a copyright notice;
230
+
231
+ iii. a notice that refers to this Public License;
232
+
233
+ iv. a notice that refers to the disclaimer of
234
+ warranties;
235
+
236
+ v. a URI or hyperlink to the Licensed Material to the
237
+ extent reasonably practicable;
238
+
239
+ b. indicate if You modified the Licensed Material and
240
+ retain an indication of any previous modifications; and
241
+
242
+ c. indicate the Licensed Material is licensed under this
243
+ Public License, and include the text of, or the URI or
244
+ hyperlink to, this Public License.
245
+
246
+ 2. You may satisfy the conditions in Section 3(a)(1) in any
247
+ reasonable manner based on the medium, means, and context in
248
+ which You Share the Licensed Material. For example, it may be
249
+ reasonable to satisfy the conditions by providing a URI or
250
+ hyperlink to a resource that includes the required
251
+ information.
252
+
253
+ 3. If requested by the Licensor, You must remove any of the
254
+ information required by Section 3(a)(1)(A) to the extent
255
+ reasonably practicable.
256
+
257
+ 4. If You Share Adapted Material You produce, the Adapter's
258
+ License You apply must not prevent recipients of the Adapted
259
+ Material from complying with this Public License.
260
+
261
+
262
+ Section 4 -- Sui Generis Database Rights.
263
+
264
+ Where the Licensed Rights include Sui Generis Database Rights that
265
+ apply to Your use of the Licensed Material:
266
+
267
+ a. for the avoidance of doubt, Section 2(a)(1) grants You the right
268
+ to extract, reuse, reproduce, and Share all or a substantial
269
+ portion of the contents of the database;
270
+
271
+ b. if You include all or a substantial portion of the database
272
+ contents in a database in which You have Sui Generis Database
273
+ Rights, then the database in which You have Sui Generis Database
274
+ Rights (but not its individual contents) is Adapted Material; and
275
+
276
+ c. You must comply with the conditions in Section 3(a) if You Share
277
+ all or a substantial portion of the contents of the database.
278
+
279
+ For the avoidance of doubt, this Section 4 supplements and does not
280
+ replace Your obligations under this Public License where the Licensed
281
+ Rights include other Copyright and Similar Rights.
282
+
283
+
284
+ Section 5 -- Disclaimer of Warranties and Limitation of Liability.
285
+
286
+ a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
287
+ EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
288
+ AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
289
+ ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
290
+ IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
291
+ WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
292
+ PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
293
+ ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
294
+ KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
295
+ ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
296
+
297
+ b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
298
+ TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
299
+ NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
300
+ INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
301
+ COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
302
+ USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
303
+ ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
304
+ DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
305
+ IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
306
+
307
+ c. The disclaimer of warranties and limitation of liability provided
308
+ above shall be interpreted in a manner that, to the extent
309
+ possible, most closely approximates an absolute disclaimer and
310
+ waiver of all liability.
311
+
312
+
313
+ Section 6 -- Term and Termination.
314
+
315
+ a. This Public License applies for the term of the Copyright and
316
+ Similar Rights licensed here. However, if You fail to comply with
317
+ this Public License, then Your rights under this Public License
318
+ terminate automatically.
319
+
320
+ b. Where Your right to use the Licensed Material has terminated under
321
+ Section 6(a), it reinstates:
322
+
323
+ 1. automatically as of the date the violation is cured, provided
324
+ it is cured within 30 days of Your discovery of the
325
+ violation; or
326
+
327
+ 2. upon express reinstatement by the Licensor.
328
+
329
+ For the avoidance of doubt, this Section 6(b) does not affect any
330
+ right the Licensor may have to seek remedies for Your violations
331
+ of this Public License.
332
+
333
+ c. For the avoidance of doubt, the Licensor may also offer the
334
+ Licensed Material under separate terms or conditions or stop
335
+ distributing the Licensed Material at any time; however, doing so
336
+ will not terminate this Public License.
337
+
338
+ d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
339
+ License.
340
+
341
+
342
+ Section 7 -- Other Terms and Conditions.
343
+
344
+ a. The Licensor shall not be bound by any additional or different
345
+ terms or conditions communicated by You unless expressly agreed.
346
+
347
+ b. Any arrangements, understandings, or agreements regarding the
348
+ Licensed Material not stated herein are separate from and
349
+ independent of the terms and conditions of this Public License.
350
+
351
+
352
+ Section 8 -- Interpretation.
353
+
354
+ a. For the avoidance of doubt, this Public License does not, and
355
+ shall not be interpreted to, reduce, limit, restrict, or impose
356
+ conditions on any use of the Licensed Material that could lawfully
357
+ be made without permission under this Public License.
358
+
359
+ b. To the extent possible, if any provision of this Public License is
360
+ deemed unenforceable, it shall be automatically reformed to the
361
+ minimum extent necessary to make it enforceable. If the provision
362
+ cannot be reformed, it shall be severed from this Public License
363
+ without affecting the enforceability of the remaining terms and
364
+ conditions.
365
+
366
+ c. No term or condition of this Public License will be waived and no
367
+ failure to comply consented to unless expressly agreed to by the
368
+ Licensor.
369
+
370
+ d. Nothing in this Public License constitutes or may be interpreted
371
+ as a limitation upon, or waiver of, any privileges and immunities
372
+ that apply to the Licensor or You, including from the legal
373
+ processes of any jurisdiction or authority.
374
+
375
+
376
+ =======================================================================
377
+
378
+ Creative Commons is not a party to its public
379
+ licenses. Notwithstanding, Creative Commons may elect to apply one of
380
+ its public licenses to material it publishes and in those instances
381
+ will be considered the “Licensor.” The text of the Creative Commons
382
+ public licenses is dedicated to the public domain under the CC0 Public
383
+ Domain Dedication. Except for the limited purpose of indicating that
384
+ material is shared under a Creative Commons public license or as
385
+ otherwise permitted by the Creative Commons policies published at
386
+ creativecommons.org/policies, Creative Commons does not authorize the
387
+ use of the trademark "Creative Commons" or any other trademark or logo
388
+ of Creative Commons without its prior written consent including,
389
+ without limitation, in connection with any unauthorized modifications
390
+ to any of its public licenses or any other arrangements,
391
+ understandings, or agreements concerning use of licensed material. For
392
+ the avoidance of doubt, this paragraph does not form part of the
393
+ public licenses.
394
+
395
+ Creative Commons may be contacted at creativecommons.org.
LICENSE-CODE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) Microsoft Corporation.
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE
NSNet-baseline/README.md ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Noise Suppression Net (NSNet) baseline inference script
2
+
3
+ * As a baseline for Interspeech 2020 Deep Noise Suppression challenge, we will use the recently developed SE method based on Recurrent Neural Network (RNN). For ease of reference, we will call this method as Noise Suppression Net (NSNet). The details about this method can be found in the [published paper](https://arxiv.org/pdf/2001.10601.pdf)
4
+ * This method uses log power spectra as input to predict the enhancement gain per frame using a learning machine based on Gated Recurrent Units (GRU) and fully connected layers. Please refer to the paper for more details of the method.
5
+ * NSNet is computationally efficient. It only takes 0.16ms to enhance a 20ms frame on an Intel quad core i5 machine using the ONNX run time v1.1 .
6
+
7
+ ## Prerequisites
8
+ - Python 3.0 and above
9
+ - pysoundfile (pip install pysoundfile)
10
+ - onnxruntime (pip install onnxruntime)
11
+
12
+ ## Files:
13
+ - nsnet_eval_local.py - Main script that calls onnx.py
14
+ - onnx.py - Frame based inference
15
+ - audiolib.py - Required audio libraries for inference
16
+ - nsnet-baseline-dnschallenge.onnx - Trained NSNet ONNX model used for inference
17
+
18
+ ## Usage:
19
+ From the NSNet-baseline directory, run nsnet_eval_local.py with the following required arguments:
20
+ - --noisyspeechdir "Specify the path to noisy speech files that you want to enhance"
21
+ - --enhanceddir "Specify the path to a directory where you want to store the enhanced clips"
22
+ - --modelpath "Specify the path to the onnx model provided"
23
+
24
+ Use default values for the rest. Run to enhance the clips.
25
+
26
+ ## Citation:
27
+ The baseline NSNet noise suppression:<br />
28
+ ```BibTex
29
+ @INPROCEEDINGS{9054254, author={Y. {Xia} and S. {Braun} and C. K. A. {Reddy}
30
+ and H. {Dubey} and R. {Cutler} and I. {Tashev}},
31
+ booktitle={ICASSP 2020 - 2020 IEEE International Conference on Acoustics,
32
+ Speech and Signal Processing (ICASSP)},
33
+ title={Weighted Speech Distortion Losses for Neural-Network-Based Real-Time Speech Enhancement},
34
+ year={2020}, volume={}, number={}, pages={871-875},}
35
+ ```
36
+
37
+ Y. Xia, S. Braun, C. K. A. Reddy, H. Dubey, R. Cutler and I. Tashev, "Weighted Speech Distortion Losses for Neural-Network-Based Real-Time Speech Enhancement," ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), Barcelona, Spain, 2020, pp. 871-875.
NSNet-baseline/__pycache__/audiolib.cpython-36.pyc ADDED
Binary file (4.68 kB). View file
 
NSNet-baseline/__pycache__/onnx.cpython-36.pyc ADDED
Binary file (3.26 kB). View file
 
NSNet-baseline/audiolib.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Functions for audio featurization.
4
+ """
5
+
6
+ import os
7
+ import math
8
+ import logging
9
+
10
+ import numpy as np
11
+ import soundfile as sf
12
+ import librosa
13
+
14
+ SIGMA_EPS = 1e-12
15
+
16
+
17
+ def stft(frame, _sr, wind, _hop, nfft, synth=False, zphase=False):
18
+ if not zphase:
19
+ return np.fft.rfft(frame, n=nfft)
20
+ fsize = len(wind)
21
+ woff = (fsize - (fsize % 2)) // 2
22
+ zp = np.zeros(nfft - fsize)
23
+ return np.fft.rfft(np.concatenate((frame[woff:], zp, frame[:woff])))
24
+
25
+
26
+ def istft(frame, _sr, wind, nfft, zphase=False):
27
+ frame = np.fft.irfft(frame, nfft)
28
+ if zphase:
29
+ fsize = len(wind)
30
+ frame = np.roll(frame, (fsize - (fsize % 2)) // 2)[:fsize]
31
+ return frame
32
+
33
+
34
+ def onlineMVN_perframe(
35
+ frame_feature, frame_counter, mu, sigmasquare,
36
+ frameshift=0.01, tauFeat=3., tauFeatInit=0.1, t_init=0.1):
37
+ """Online mean and variance normalization (per frequency)"""
38
+
39
+ n_init_frames = math.ceil(t_init / frameshift)
40
+ alpha_feat_init = math.exp(-frameshift / tauFeatInit)
41
+ alpha_feat = math.exp(-frameshift / tauFeat)
42
+
43
+ if frame_counter < n_init_frames:
44
+ alpha = alpha_feat_init
45
+ else:
46
+ alpha = alpha_feat
47
+
48
+ mu = alpha * mu + (1 - alpha) * frame_feature
49
+ sigmasquare = alpha * sigmasquare + (1 - alpha) * frame_feature**2
50
+ sigma = np.sqrt(np.maximum(sigmasquare - mu**2, SIGMA_EPS)) # limit for sqrt
51
+ norm_feature = (frame_feature - mu) / sigma
52
+ frame_counter += 1
53
+
54
+ return norm_feature, mu, sigmasquare, frame_counter
55
+
56
+
57
+ def magphasor(complexspec):
58
+ """Decompose a complex spectrogram into magnitude and unit phasor.
59
+ m, p = magphasor(c) such that c == m * p.
60
+ """
61
+ mspec = np.abs(complexspec)
62
+ pspec = np.empty_like(complexspec)
63
+ zero_mag = mspec == 0. # fix zero-magnitude
64
+ pspec[zero_mag] = 1.
65
+ pspec[~zero_mag] = complexspec[~zero_mag] / mspec[~zero_mag]
66
+ return mspec, pspec
67
+
68
+
69
+ def logpow(sig, floor=-30.):
70
+ """Compute log power of complex spectrum.
71
+
72
+ Floor any -`np.inf` value to (nonzero minimum + `floor`) dB.
73
+ If all values are 0s, floor all values to -80 dB.
74
+ """
75
+ log10e = np.log10(np.e)
76
+ pspec = sig.real**2 + sig.imag**2
77
+ zeros = pspec == 0
78
+ logp = np.empty_like(pspec)
79
+ if np.any(~zeros):
80
+ logp[~zeros] = np.log(pspec[~zeros])
81
+ logp[zeros] = np.log(pspec[~zeros].min()) + floor / 10 / log10e
82
+ else:
83
+ logp.fill(-80 / 10 / log10e)
84
+
85
+ return logp
86
+
87
+
88
+ def hamming(wsize, hop=None):
89
+ "Compute the Hamming window"
90
+
91
+ if hop is None:
92
+ return np.hamming(wsize)
93
+
94
+ # For perfect OLA reconstruction in time
95
+ if wsize % 2: # Fix endpoint problem for odd-size window
96
+ wind = np.hamming(wsize)
97
+ wind[0] /= 2.
98
+ wind[-1] /= 2.
99
+ else: # even-size window
100
+ wind = np.hamming(wsize + 1)
101
+ wind = wind[:-1]
102
+
103
+ assert tnorm(wind, hop), \
104
+ "[wsize:{}, hop:{}] violates COLA in time.".format(wsize, hop)
105
+
106
+ return wind
107
+
108
+
109
+ def tnorm(wind, hop):
110
+ amp = tcola(wind, hop)
111
+ if amp is None:
112
+ return False
113
+ wind /= amp
114
+ return True
115
+
116
+
117
+ def tcola(wind, _hop):
118
+ wsize = len(wind)
119
+ hsize = 160
120
+ buff = wind.copy() # holds OLA buffer and account for time=0
121
+ for wi in range(hsize, wsize, hsize): # window moving forward
122
+ wj = wi + wsize
123
+ buff[wi:] += wind[:wsize - wi]
124
+ for wj in range(wsize - hsize, 0, -hsize): # window moving backward
125
+ wi = wj - wsize
126
+ buff[:wj] += wind[wsize - wj:]
127
+
128
+ if np.allclose(buff, buff[0]):
129
+ return buff[0]
130
+
131
+ return None
132
+
133
+
134
+ def audioread(path, sr=None, start=0, stop=None, mono=True, norm=False):
135
+
136
+ path = os.path.abspath(path)
137
+ if not os.path.exists(path):
138
+ logging.error('File does not exist: %s', path)
139
+ raise ValueError("[{}] does not exist!".format(path))
140
+
141
+ try:
142
+ x, xsr = sf.read(path, start=start, stop=stop)
143
+ except RuntimeError: # fix for sph pcm-embedded shortened v2
144
+ logging.warning('Audio type not supported for file %s. Trying sph2pipe...', path)
145
+
146
+ if len(x.shape) == 1: # mono
147
+ if sr and xsr != sr:
148
+ print("Resampling to sampling rate:", sr)
149
+ x = librosa.resample(x, xsr, sr)
150
+ xsr = sr
151
+ if norm:
152
+ print("Normalization input data")
153
+ x /= np.max(np.abs(x))
154
+ return x, xsr
155
+
156
+ # multi-channel
157
+ x = x.T
158
+ if sr and xsr != sr:
159
+ x = librosa.resample(x, xsr, sr, axis=1)
160
+ xsr = sr
161
+ if mono:
162
+ x = x.sum(axis=0) / x.shape[0]
163
+ if norm:
164
+ for chan in range(x.shape[0]):
165
+ x[chan, :] /= np.max(np.abs(x[chan, :]))
166
+
167
+ return x, xsr
168
+
169
+
170
+ def audiowrite(data, sr, outpath, norm=False):
171
+
172
+ logging.debug("Writing to: %s", outpath)
173
+
174
+ if np.max(np.abs(data)) == 0: # in case all entries are 0s
175
+ logging.warning("All-zero output! Something is not quite right,"
176
+ " check your input audio clip and model.")
177
+
178
+ outpath = os.path.abspath(outpath)
179
+ outdir = os.path.dirname(outpath)
180
+
181
+ if not os.path.exists(outdir):
182
+ os.makedirs(outdir)
183
+
184
+ sf.write(outpath, data, sr)
NSNet-baseline/nsnet-baseline-dnschallenge.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:983adad31c7b42534f84253d43fafce44d755cfab70d9c5822e8f737aee37169
3
+ size 5040632
NSNet-baseline/nsnet_eval_local.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Runnable script to invoke
4
+ noise_suppression.nsnet.inference.onnx
5
+ """
6
+
7
+ import os
8
+ import glob
9
+ import logging
10
+ import pathlib
11
+ import concurrent.futures
12
+ import argparse
13
+ import onnx as ns_onnx
14
+
15
+ # pylint: disable=too-few-public-methods
16
+ class Worker:
17
+ """
18
+ Delayed constructor of NSNetInference to make sure each
19
+ multiprocessing worker has its own instance of the ONNX model.
20
+ """
21
+ nsnet = None
22
+
23
+ def __init__(self, *args):
24
+ self.args = args
25
+
26
+ def __call__(self, fname):
27
+ if Worker.nsnet is None:
28
+ # pylint: disable=no-value-for-parameter
29
+ Worker.nsnet = ns_onnx.NSNetInference(*self.args)
30
+ logging.debug("NSNet/ONNX: process file %s", fname)
31
+ Worker.nsnet(fname)
32
+
33
+
34
+ def _main():
35
+
36
+ parser = argparse.ArgumentParser(description='NSNet Noise Suppressor inference', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
37
+
38
+ parser.add_argument('--noisyspeechdir', required=True, help="Input directory with noisy WAV files")
39
+ parser.add_argument('--enhanceddir', required=True, help="Output directory to save enhanced WAV files")
40
+ parser.add_argument('--modelpath', required=True, help="ONNX model to use for inference")
41
+ parser.add_argument('--window_length', type=float, default=0.02)
42
+ parser.add_argument('--hopfraction', type=float, default=0.5)
43
+ parser.add_argument('--dft_size', type=int, default=512)
44
+ parser.add_argument('--sampling_rate', type=int, default=16000)
45
+ parser.add_argument('--spectral_floor', type=float, default=-120.0)
46
+ parser.add_argument('--timesignal_floor', type=float, default=1e-12)
47
+ parser.add_argument('--audioformat', default="*.wav")
48
+ parser.add_argument('--num_workers', type=int, default=4,
49
+ help="Number of OS processes to run in parallel")
50
+ parser.add_argument('--chunksize', type=int, default=1,
51
+ help="Number of files per worker to process in one batch")
52
+
53
+ args = parser.parse_args()
54
+
55
+ logging.info("NSNet inference args: %s", args)
56
+
57
+ input_filelist = glob.glob(os.path.join(args.noisyspeechdir, args.audioformat))
58
+ pathlib.Path(args.enhanceddir).mkdir(parents=True, exist_ok=True)
59
+
60
+ worker = Worker(args.modelpath, args.window_length, args.hopfraction,
61
+ args.dft_size, args.sampling_rate, args.enhanceddir)
62
+
63
+ logging.debug("NSNet local workers start with %d input files", len(input_filelist))
64
+
65
+ # with concurrent.futures.ThreadPoolExecutor(max_workers=args.num_workers) as executor:
66
+ # executor.map(worker, input_filelist, chunksize=args.chunksize)
67
+ for fname in input_filelist:
68
+ worker(fname)
69
+
70
+ logging.info("NSNet local workers complete")
71
+
72
+
73
+ logging.basicConfig(
74
+ format='%(asctime)s %(levelname)s %(message)s',
75
+ level=logging.DEBUG) # Use logging.WARNING in prod
76
+
77
+ if __name__ == '__main__':
78
+ _main()
NSNet-baseline/onnx.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Run NSNet inference using onnxruntime.
3
+ """
4
+
5
+ import os
6
+ import math
7
+ import logging
8
+
9
+ import numpy as np
10
+ import soundfile as sf
11
+ import onnxruntime
12
+
13
+ import audiolib
14
+
15
+
16
+ # pylint: disable=too-few-public-methods
17
+ class NSNetInference:
18
+ "Apply NSNet ONNX model to WAV files"
19
+
20
+ def __init__(self, model_path, window_length, hop_fraction,
21
+ dft_size, sampling_rate, output_dir=None,
22
+ spectral_floor=-120.0, timesignal_floor=1e-12):
23
+ self.hop_fraction = hop_fraction
24
+ self.dft_size = dft_size
25
+ self.sampling_rate = sampling_rate
26
+ self.output_dir = output_dir
27
+ self.spectral_floor = spectral_floor
28
+ self.timesignal_floor = timesignal_floor
29
+ self.framesize = int(window_length * sampling_rate)
30
+ self.wind = audiolib.hamming(self.framesize, hop=hop_fraction)
31
+ self.model = onnxruntime.InferenceSession(model_path)
32
+
33
+ # pylint: disable=too-many-locals,invalid-name
34
+ def __call__(self, noisy_speech_filename, output_dir=None):
35
+ "Apply NSNet model to one file and produce an output file with clean speech."
36
+
37
+ enhanced_filename = os.path.join(output_dir or self.output_dir,
38
+ os.path.basename(noisy_speech_filename))
39
+
40
+ logging.info("NSNet inference: %s", noisy_speech_filename)
41
+ sig, sample_rate = sf.read(noisy_speech_filename)
42
+
43
+ ssize = len(sig)
44
+ print('ssize:', ssize)
45
+ fsize = len(self.wind)
46
+ hsize = int(self.hop_fraction * self.framesize)
47
+
48
+ sstart = hsize - fsize
49
+ print('sstart:', sstart)
50
+ send = ssize
51
+ nframe = math.ceil((send - sstart) / hsize)
52
+ zpleft = -sstart
53
+ zpright = (nframe - 1) * hsize + fsize - zpleft - ssize
54
+
55
+ if zpleft > 0 or zpright > 0:
56
+ sigpad = np.zeros(ssize + zpleft + zpright)
57
+ sigpad[zpleft:len(sigpad)-zpright] = sig
58
+ else:
59
+ sigpad = sig
60
+
61
+ sout = np.zeros(nframe * hsize)
62
+ x_old = np.zeros(hsize)
63
+
64
+ model_input_names = [inp.name for inp in self.model.get_inputs()]
65
+ model_inputs = {
66
+ inp.name: np.zeros(
67
+ [dim if isinstance(dim, int) else 1 for dim in inp.shape],
68
+ dtype=np.float32)
69
+ for inp in self.model.get_inputs()[1:]}
70
+
71
+ mu = None
72
+ sigmasquare = None
73
+ frame_count = 0
74
+
75
+ for frame_sampleindex in range(0, nframe * hsize, hsize):
76
+
77
+ # second frame starts from mid-of first frame and goes until frame-size
78
+ sigpadframe = sigpad[frame_sampleindex:frame_sampleindex + fsize] * self.wind
79
+
80
+ xmag, xphs = audiolib.magphasor(audiolib.stft(
81
+ sigpadframe, self.sampling_rate, self.wind,
82
+ self.hop_fraction, self.dft_size, synth=True, zphase=False))
83
+
84
+ feat = audiolib.logpow(xmag, floor=self.spectral_floor)
85
+
86
+ if frame_sampleindex == 0:
87
+ mu = feat
88
+ sigmasquare = feat**2
89
+
90
+ norm_feat, mu, sigmasquare, frame_count = audiolib.onlineMVN_perframe(
91
+ feat, frame_counter=frame_count, mu=mu, sigmasquare=sigmasquare,
92
+ frameshift=0.01, tauFeat=3., tauFeatInit=0.1, t_init=0.1)
93
+
94
+ norm_feat = norm_feat[np.newaxis, np.newaxis, :]
95
+
96
+ model_inputs['input'] = np.float32(norm_feat)
97
+ model_outputs = self.model.run(None, model_inputs)
98
+ model_inputs = dict(zip(model_input_names, model_outputs))
99
+
100
+ mask = model_outputs[0].squeeze()
101
+ x_enh = audiolib.istft(
102
+ (xmag * mask) * xphs, sample_rate, self.wind, self.dft_size, zphase=False)
103
+
104
+ sout[frame_sampleindex:frame_sampleindex + hsize] = x_old + x_enh[0:hsize]
105
+ x_old = x_enh[hsize:fsize]
106
+
107
+ xfinal = sout
108
+ audiolib.audiowrite(xfinal, sample_rate, enhanced_filename, norm=False)
README.md CHANGED
@@ -1,3 +1,130 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Deep Noise Suppression (DNS) Challenge - Interspeech 2020
2
+
3
+ This repository contains the datasets and scripts required for the DNS challenge. For more details about the challenge, please visit https://dns-challenge.azurewebsites.net/ and refer to our [paper](https://arxiv.org/ftp/arxiv/papers/2001/2001.08662.pdf).
4
+
5
+ ## Repo details:
6
+ * The **datasets** directory contains the clean speech and noise clips.
7
+ * The **NSNet-baseline** directory contains the inference scripts and the ONNX model for the baseline Speech Enhancer called **Noise Suppression Net (NSNet)**
8
+ * **noisyspeech_synthesizer_singleprocess.py** - is used to synthesize noisy-clean speech pairs for training purposes.
9
+ * **noisyspeech_synthesizer.cfg** - is the configuration file used to synthesize the data. Users are required to accurately specify different parameters.
10
+ * **audiolib.py** - contains modules required to synthesize datasets
11
+ * **utils.py** - contains some utility functions required to synthesize the data
12
+ * **unit_tests_synthesizer.py** - contains the unit tests to ensure sanity of the data
13
+
14
+ ## Prerequisites
15
+ - Python 3.0 and above
16
+ - Soundfile (pip install pysoundfile), librosa
17
+
18
+ ## Usage:
19
+ 1. Install librosa
20
+ ```
21
+ pip install librosa
22
+ ```
23
+ 2. Install Git Large File Storage for faster download of the datasets.
24
+ ```
25
+ git lfs install
26
+ git lfs track "*.wav"
27
+ git add .gitattributes
28
+ ```
29
+ 3. Clone the repository.
30
+ ```
31
+ git clone https://github.com/microsoft/DNS-Challenge DNS-Challenge
32
+ ```
33
+ 4. Edit **noisyspeech_synthesizer.cfg** to include the paths to clean speech and noise directories. Also, specify the paths to the destination directories and store logs.
34
+ 5. Create dataset
35
+ ```
36
+ python noisyspeech_synthesizer_multiprocessing.py
37
+ ```
38
+
39
+ ## Citation:
40
+ For the datasets and the DNS challenge:<br />
41
+
42
+ ```BibTex
43
+ @article{reddy2020interspeech,
44
+ title={The INTERSPEECH 2020 Deep Noise Suppression Challenge: Datasets, Subjective Testing Framework, and Challenge Results},
45
+ author={Reddy, Chandan KA and Gopal, Vishak and Cutler, Ross and Beyrami, Ebrahim and Cheng, Roger and Dubey, Harishchandra and Matusevych, Sergiy and Aichner, Robert and Aazami, Ashkan and Braun, Sebastian and others},
46
+ journal={arXiv preprint arXiv:2005.13981},
47
+ year={2020}
48
+ }
49
+ ```
50
+
51
+ The baseline NSNet noise suppression:<br />
52
+ ```BibTex
53
+ @INPROCEEDINGS{9054254, author={Y. {Xia} and S. {Braun} and C. K. A. {Reddy}
54
+ and H. {Dubey} and R. {Cutler} and I. {Tashev}},
55
+ booktitle={ICASSP 2020 - 2020 IEEE International Conference on Acoustics,
56
+ Speech and Signal Processing (ICASSP)},
57
+ title={Weighted Speech Distortion Losses for Neural-Network-Based Real-Time Speech Enhancement},
58
+ year={2020}, volume={}, number={}, pages={871-875},}
59
+ ```
60
+
61
+
62
+ # Contributing
63
+
64
+ This project welcomes contributions and suggestions. Most contributions require you to agree to a
65
+ Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
66
+ the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com.
67
+
68
+ When you submit a pull request, a CLA bot will automatically determine whether you need to provide
69
+ a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions
70
+ provided by the bot. You will only need to do this once across all repos using our CLA.
71
+
72
+ This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
73
+ For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
74
+ contact [[email protected]](mailto:[email protected]) with any additional questions or comments.
75
+
76
+ # Legal Notices
77
+
78
+ Microsoft and any contributors grant you a license to the Microsoft documentation and other content
79
+ in this repository under the [Creative Commons Attribution 4.0 International Public License](https://creativecommons.org/licenses/by/4.0/legalcode),
80
+ see the [LICENSE](LICENSE) file, and grant you a license to any code in the repository under the [MIT License](https://opensource.org/licenses/MIT), see the
81
+ [LICENSE-CODE](LICENSE-CODE) file.
82
+
83
+ Microsoft, Windows, Microsoft Azure and/or other Microsoft products and services referenced in the documentation
84
+ may be either trademarks or registered trademarks of Microsoft in the United States and/or other countries.
85
+ The licenses for this project do not grant you rights to use any Microsoft names, logos, or trademarks.
86
+ Microsoft's general trademark guidelines can be found at http://go.microsoft.com/fwlink/?LinkID=254653.
87
+
88
+ Privacy information can be found at https://privacy.microsoft.com/en-us/
89
+
90
+ Microsoft and any contributors reserve all other rights, whether under their respective copyrights, patents,
91
+ or trademarks, whether by implication, estoppel or otherwise.
92
+
93
+
94
+ ## Dataset licenses
95
+ MICROSOFT PROVIDES THE DATASETS ON AN "AS IS" BASIS. MICROSOFT MAKES NO WARRANTIES, EXPRESS OR IMPLIED, GUARANTEES OR CONDITIONS WITH RESPECT TO YOUR USE OF THE DATASETS. TO THE EXTENT PERMITTED UNDER YOUR LOCAL LAW, MICROSOFT DISCLAIMS ALL LIABILITY FOR ANY DAMAGES OR LOSSES, INLCUDING DIRECT, CONSEQUENTIAL, SPECIAL, INDIRECT, INCIDENTAL OR PUNITIVE, RESULTING FROM YOUR USE OF THE DATASETS.
96
+
97
+ The datasets are provided under the original terms that Microsoft received such datasets. See below for more information about each dataset.
98
+
99
+ The datasets used in this project are licensed as follows:
100
+ 1. Clean speech:
101
+ * https://librivox.org/; License: https://librivox.org/pages/public-domain/
102
+ * PTDB-TUG: Pitch Tracking Database from Graz University of Technology https://www.spsc.tugraz.at/databases-and-tools/ptdb-tug-pitch-tracking-database-from-graz-university-of-technology.html; License: http://opendatacommons.org/licenses/odbl/1.0/
103
+ * Edinburgh 56 speaker dataset: https://datashare.is.ed.ac.uk/handle/10283/2791; License: https://datashare.is.ed.ac.uk/bitstream/handle/10283/2791/license_text?sequence=11&isAllowed=y
104
+ 2. Noise:
105
+ * Audioset: https://research.google.com/audioset/index.html; License: https://creativecommons.org/licenses/by/4.0/
106
+ * Freesound: https://freesound.org/ Only files with CC0 licenses were selected; License: https://creativecommons.org/publicdomain/zero/1.0/
107
+ * Demand: https://zenodo.org/record/1227121#.XRKKxYhKiUk; License: https://creativecommons.org/licenses/by-sa/3.0/deed.en_CA
108
+
109
+ ## Code license
110
+ MIT License
111
+
112
+ Copyright (c) Microsoft Corporation.
113
+
114
+ Permission is hereby granted, free of charge, to any person obtaining a copy
115
+ of this software and associated documentation files (the "Software"), to deal
116
+ in the Software without restriction, including without limitation the rights
117
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
118
+ copies of the Software, and to permit persons to whom the Software is
119
+ furnished to do so, subject to the following conditions:
120
+
121
+ The above copyright notice and this permission notice shall be included in all
122
+ copies or substantial portions of the Software.
123
+
124
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
125
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
126
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
127
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
128
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
129
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
130
+ SOFTWARE
SECURITY.md ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!-- BEGIN MICROSOFT SECURITY.MD V0.0.3 BLOCK -->
2
+
3
+ ## Security
4
+
5
+ Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
6
+
7
+ If you believe you have found a security vulnerability in any Microsoft-owned repository that meets Microsoft's [Microsoft's definition of a security vulnerability](https://docs.microsoft.com/en-us/previous-versions/tn-archive/cc751383(v=technet.10)) of a security vulnerability, please report it to us as described below.
8
+
9
+ ## Reporting Security Issues
10
+
11
+ **Please do not report security vulnerabilities through public GitHub issues.**
12
+
13
+ Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://msrc.microsoft.com/create-report).
14
+
15
+ If you prefer to submit without logging in, send email to [[email protected]](mailto:[email protected]). If possible, encrypt your message with our PGP key; please download it from the the [Microsoft Security Response Center PGP Key page](https://www.microsoft.com/en-us/msrc/pgp-key-msrc).
16
+
17
+ You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc).
18
+
19
+ Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
20
+
21
+ * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
22
+ * Full paths of source file(s) related to the manifestation of the issue
23
+ * The location of the affected source code (tag/branch/commit or direct URL)
24
+ * Any special configuration required to reproduce the issue
25
+ * Step-by-step instructions to reproduce the issue
26
+ * Proof-of-concept or exploit code (if possible)
27
+ * Impact of the issue, including how an attacker might exploit the issue
28
+
29
+ This information will help us triage your report more quickly.
30
+
31
+ If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://microsoft.com/msrc/bounty) page for more details about our active programs.
32
+
33
+ ## Preferred Languages
34
+
35
+ We prefer all communications to be in English.
36
+
37
+ ## Policy
38
+
39
+ Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://www.microsoft.com/en-us/msrc/cvd).
40
+
41
+ <!-- END MICROSOFT SECURITY.MD BLOCK -->
__pycache__/audiolib.cpython-36.pyc ADDED
Binary file (7.9 kB). View file
 
__pycache__/utils.cpython-36.pyc ADDED
Binary file (1.53 kB). View file
 
audiolib.py ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ @author: chkarada
4
+ """
5
+ import os
6
+ import numpy as np
7
+ import soundfile as sf
8
+ import subprocess
9
+ import glob
10
+ import librosa
11
+ import random
12
+ import tempfile
13
+
14
+ EPS = np.finfo(float).eps
15
+ np.random.seed(0)
16
+
17
+ def is_clipped(audio, clipping_threshold=0.99):
18
+ return any(abs(audio) > clipping_threshold)
19
+
20
+ def normalize(audio, target_level=-25):
21
+ '''Normalize the signal to the target level'''
22
+ rms = (audio ** 2).mean() ** 0.5
23
+ scalar = 10 ** (target_level / 20) / (rms+EPS)
24
+ audio = audio * scalar
25
+ return audio
26
+
27
+ def normalize_segmental_rms(audio, rms, target_level=-25):
28
+ '''Normalize the signal to the target level
29
+ based on segmental RMS'''
30
+ scalar = 10 ** (target_level / 20) / (rms+EPS)
31
+ audio = audio * scalar
32
+ return audio
33
+
34
+ def audioread(path, norm=False, start=0, stop=None, target_level=-25):
35
+ '''Function to read audio'''
36
+
37
+ path = os.path.abspath(path)
38
+ if not os.path.exists(path):
39
+ raise ValueError("[{}] does not exist!".format(path))
40
+ try:
41
+ audio, sample_rate = sf.read(path, start=start, stop=stop)
42
+ except RuntimeError: # fix for sph pcm-embedded shortened v2
43
+ print('WARNING: Audio type not supported')
44
+
45
+ if len(audio.shape) == 1: # mono
46
+ if norm:
47
+ rms = (audio ** 2).mean() ** 0.5
48
+ scalar = 10 ** (target_level / 20) / (rms+EPS)
49
+ audio = audio * scalar
50
+ else: # multi-channel
51
+ audio = audio.T
52
+ audio = audio.sum(axis=0)/audio.shape[0]
53
+ if norm:
54
+ audio = normalize(audio, target_level)
55
+
56
+ return audio, sample_rate
57
+
58
+
59
+ def audiowrite(destpath, audio, sample_rate=16000, norm=False, target_level=-25, \
60
+ clipping_threshold=0.99, clip_test=False):
61
+ '''Function to write audio'''
62
+
63
+ if clip_test:
64
+ if is_clipped(audio, clipping_threshold=clipping_threshold):
65
+ raise ValueError("Clipping detected in audiowrite()! " + \
66
+ destpath + " file not written to disk.")
67
+
68
+ if norm:
69
+ audio = normalize(audio, target_level)
70
+ max_amp = max(abs(audio))
71
+ if max_amp >= clipping_threshold:
72
+ audio = audio/max_amp * (clipping_threshold-EPS)
73
+
74
+ destpath = os.path.abspath(destpath)
75
+ destdir = os.path.dirname(destpath)
76
+
77
+ if not os.path.exists(destdir):
78
+ os.makedirs(destdir)
79
+
80
+ sf.write(destpath, audio, sample_rate)
81
+ return
82
+
83
+
84
+ def add_reverb(sasxExe, input_wav, filter_file, output_wav):
85
+ ''' Function to add reverb'''
86
+ command_sasx_apply_reverb = "{0} -r {1} \
87
+ -f {2} -o {3}".format(sasxExe, input_wav, filter_file, output_wav)
88
+
89
+ subprocess.call(command_sasx_apply_reverb)
90
+ return output_wav
91
+
92
+
93
+ def add_clipping(audio, max_thresh_perc=0.8):
94
+ '''Function to add clipping'''
95
+ threshold = max(abs(audio))*max_thresh_perc
96
+ audioclipped = np.clip(audio, -threshold, threshold)
97
+ return audioclipped
98
+
99
+
100
+ def adsp_filter(Adspvqe, nearEndInput, nearEndOutput, farEndInput):
101
+
102
+ command_adsp_clean = "{0} --breakOnErrors 0 --sampleRate 16000 --useEchoCancellation 0 \
103
+ --operatingMode 2 --useDigitalAgcNearend 0 --useDigitalAgcFarend 0 \
104
+ --useVirtualAGC 0 --useComfortNoiseGenerator 0 --useAnalogAutomaticGainControl 0 \
105
+ --useNoiseReduction 0 --loopbackInputFile {1} --farEndInputFile {2} \
106
+ --nearEndInputFile {3} --nearEndOutputFile {4}".format(Adspvqe,
107
+ farEndInput, farEndInput, nearEndInput, nearEndOutput)
108
+ subprocess.call(command_adsp_clean)
109
+
110
+
111
+ def snr_mixer(params, clean, noise, snr, target_level=-25, clipping_threshold=0.99):
112
+ '''Function to mix clean speech and noise at various SNR levels'''
113
+ cfg = params['cfg']
114
+ if len(clean) > len(noise):
115
+ noise = np.append(noise, np.zeros(len(clean)-len(noise)))
116
+ else:
117
+ clean = np.append(clean, np.zeros(len(noise)-len(clean)))
118
+
119
+ # Normalizing to -25 dB FS
120
+ clean = clean/(max(abs(clean))+EPS)
121
+ clean = normalize(clean, target_level)
122
+ rmsclean = (clean**2).mean()**0.5
123
+
124
+ noise = noise/(max(abs(noise))+EPS)
125
+ noise = normalize(noise, target_level)
126
+ rmsnoise = (noise**2).mean()**0.5
127
+
128
+ # Set the noise level for a given SNR
129
+ noisescalar = rmsclean / (10**(snr/20)) / (rmsnoise+EPS)
130
+ noisenewlevel = noise * noisescalar
131
+
132
+ # Mix noise and clean speech
133
+ noisyspeech = clean + noisenewlevel
134
+
135
+ # Randomly select RMS value between -15 dBFS and -35 dBFS and normalize noisyspeech with that value
136
+ # There is a chance of clipping that might happen with very less probability, which is not a major issue.
137
+ noisy_rms_level = np.random.randint(params['target_level_lower'], params['target_level_upper'])
138
+ rmsnoisy = (noisyspeech**2).mean()**0.5
139
+ scalarnoisy = 10 ** (noisy_rms_level / 20) / (rmsnoisy+EPS)
140
+ noisyspeech = noisyspeech * scalarnoisy
141
+ clean = clean * scalarnoisy
142
+ noisenewlevel = noisenewlevel * scalarnoisy
143
+
144
+ # Final check to see if there are any amplitudes exceeding +/- 1. If so, normalize all the signals accordingly
145
+ if is_clipped(noisyspeech):
146
+ noisyspeech_maxamplevel = max(abs(noisyspeech))/(clipping_threshold-EPS)
147
+ noisyspeech = noisyspeech/noisyspeech_maxamplevel
148
+ clean = clean/noisyspeech_maxamplevel
149
+ noisenewlevel = noisenewlevel/noisyspeech_maxamplevel
150
+ noisy_rms_level = int(20*np.log10(scalarnoisy/noisyspeech_maxamplevel*(rmsnoisy+EPS)))
151
+
152
+ return clean, noisenewlevel, noisyspeech, noisy_rms_level
153
+
154
+
155
+ def segmental_snr_mixer(params, clean, noise, snr, target_level=-25, clipping_threshold=0.99):
156
+ '''Function to mix clean speech and noise at various segmental SNR levels'''
157
+ cfg = params['cfg']
158
+ if len(clean) > len(noise):
159
+ noise = np.append(noise, np.zeros(len(clean)-len(noise)))
160
+ else:
161
+ clean = np.append(clean, np.zeros(len(noise)-len(clean)))
162
+ clean = clean/(max(abs(clean))+EPS)
163
+ noise = noise/(max(abs(noise))+EPS)
164
+ rmsclean, rmsnoise = active_rms(clean=clean, noise=noise)
165
+ clean = normalize_segmental_rms(clean, rms=rmsclean, target_level=target_level)
166
+ noise = normalize_segmental_rms(noise, rms=rmsnoise, target_level=target_level)
167
+ # Set the noise level for a given SNR
168
+ noisescalar = rmsclean / (10**(snr/20)) / (rmsnoise+EPS)
169
+ noisenewlevel = noise * noisescalar
170
+
171
+ # Mix noise and clean speech
172
+ noisyspeech = clean + noisenewlevel
173
+ # Randomly select RMS value between -15 dBFS and -35 dBFS and normalize noisyspeech with that value
174
+ # There is a chance of clipping that might happen with very less probability, which is not a major issue.
175
+ noisy_rms_level = np.random.randint(params['target_level_lower'], params['target_level_upper'])
176
+ rmsnoisy = (noisyspeech**2).mean()**0.5
177
+ scalarnoisy = 10 ** (noisy_rms_level / 20) / (rmsnoisy+EPS)
178
+ noisyspeech = noisyspeech * scalarnoisy
179
+ clean = clean * scalarnoisy
180
+ noisenewlevel = noisenewlevel * scalarnoisy
181
+ # Final check to see if there are any amplitudes exceeding +/- 1. If so, normalize all the signals accordingly
182
+ if is_clipped(noisyspeech):
183
+ noisyspeech_maxamplevel = max(abs(noisyspeech))/(clipping_threshold-EPS)
184
+ noisyspeech = noisyspeech/noisyspeech_maxamplevel
185
+ clean = clean/noisyspeech_maxamplevel
186
+ noisenewlevel = noisenewlevel/noisyspeech_maxamplevel
187
+ noisy_rms_level = int(20*np.log10(scalarnoisy/noisyspeech_maxamplevel*(rmsnoisy+EPS)))
188
+
189
+ return clean, noisenewlevel, noisyspeech, noisy_rms_level
190
+
191
+
192
+ def active_rms(clean, noise, fs=16000, energy_thresh=-50):
193
+ '''Returns the clean and noise RMS of the noise calculated only in the active portions'''
194
+ window_size = 100 # in ms
195
+ window_samples = int(fs*window_size/1000)
196
+ sample_start = 0
197
+ noise_active_segs = []
198
+ clean_active_segs = []
199
+
200
+ while sample_start < len(noise):
201
+ sample_end = min(sample_start + window_samples, len(noise))
202
+ noise_win = noise[sample_start:sample_end]
203
+ clean_win = clean[sample_start:sample_end]
204
+ noise_seg_rms = 20*np.log10((noise_win**2).mean()+EPS)
205
+ # Considering frames with energy
206
+ if noise_seg_rms > energy_thresh:
207
+ noise_active_segs = np.append(noise_active_segs, noise_win)
208
+ clean_active_segs = np.append(clean_active_segs, clean_win)
209
+ sample_start += window_samples
210
+
211
+ if len(noise_active_segs)!=0:
212
+ noise_rms = (noise_active_segs**2).mean()**0.5
213
+ else:
214
+ noise_rms = EPS
215
+
216
+ if len(clean_active_segs)!=0:
217
+ clean_rms = (clean_active_segs**2).mean()**0.5
218
+ else:
219
+ clean_rms = EPS
220
+
221
+ return clean_rms, noise_rms
222
+
223
+
224
+ def activitydetector(audio, fs=16000, energy_thresh=0.13, target_level=-25):
225
+ '''Return the percentage of the time the audio signal is above an energy threshold'''
226
+
227
+ audio = normalize(audio, target_level)
228
+ window_size = 50 # in ms
229
+ window_samples = int(fs*window_size/1000)
230
+ sample_start = 0
231
+ cnt = 0
232
+ prev_energy_prob = 0
233
+ active_frames = 0
234
+
235
+ a = -1
236
+ b = 0.2
237
+ alpha_rel = 0.05
238
+ alpha_att = 0.8
239
+
240
+ while sample_start < len(audio):
241
+ sample_end = min(sample_start + window_samples, len(audio))
242
+ audio_win = audio[sample_start:sample_end]
243
+ frame_rms = 20*np.log10(sum(audio_win**2)+EPS)
244
+ frame_energy_prob = 1./(1+np.exp(-(a+b*frame_rms)))
245
+
246
+ if frame_energy_prob > prev_energy_prob:
247
+ smoothed_energy_prob = frame_energy_prob*alpha_att + prev_energy_prob*(1-alpha_att)
248
+ else:
249
+ smoothed_energy_prob = frame_energy_prob*alpha_rel + prev_energy_prob*(1-alpha_rel)
250
+
251
+ if smoothed_energy_prob > energy_thresh:
252
+ active_frames += 1
253
+ prev_energy_prob = frame_energy_prob
254
+ sample_start += window_samples
255
+ cnt += 1
256
+
257
+ perc_active = active_frames/cnt
258
+ return perc_active
259
+
260
+
261
+ def resampler(input_dir, target_sr=16000, ext='*.wav'):
262
+ '''Resamples the audio files in input_dir to target_sr'''
263
+ files = glob.glob(f"{input_dir}/"+ext)
264
+ for pathname in files:
265
+ print(pathname)
266
+ try:
267
+ audio, fs = audioread(pathname)
268
+ audio_resampled = librosa.core.resample(audio, fs, target_sr)
269
+ audiowrite(pathname, audio_resampled, target_sr)
270
+ except:
271
+ continue
272
+
273
+
274
+ def audio_segmenter(input_dir, dest_dir, segment_len=10, ext='*.wav'):
275
+ '''Segments the audio clips in dir to segment_len in secs'''
276
+ files = glob.glob(f"{input_dir}/"+ext)
277
+ for i in range(len(files)):
278
+ audio, fs = audioread(files[i])
279
+
280
+ if len(audio) > (segment_len*fs) and len(audio)%(segment_len*fs) != 0:
281
+ audio = np.append(audio, audio[0 : segment_len*fs - (len(audio)%(segment_len*fs))])
282
+ if len(audio) < (segment_len*fs):
283
+ while len(audio) < (segment_len*fs):
284
+ audio = np.append(audio, audio)
285
+ audio = audio[:segment_len*fs]
286
+
287
+ num_segments = int(len(audio)/(segment_len*fs))
288
+ audio_segments = np.split(audio, num_segments)
289
+
290
+ basefilename = os.path.basename(files[i])
291
+ basename, ext = os.path.splitext(basefilename)
292
+
293
+ for j in range(len(audio_segments)):
294
+ newname = basename+'_'+str(j)+ext
295
+ destpath = os.path.join(dest_dir,newname)
296
+ audiowrite(destpath, audio_segments[j], fs)
297
+
datasets/Readme.md ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Datasets for training
2
+ Datasets will be downloaded when you clone the repository. Run **git lfs install** for faster download.
3
+ ## Clean Speech
4
+ * The clean speech dataset is derived from the public audio books dataset called Librivox.
5
+ * Librivox has recordings of volunteers reading over 10,000 public domain audio books in various languages, with majority of which are in English. In total, there are 11,350 speakers.
6
+ * A section of these recordings is of excellent quality, meaning that the speech was recorded using good quality microphones in a silent and less reverberant environments.
7
+ * But there are many audio recordings that are of poor speech quality with speech distortion, background noise and reverberation. Hence, it is important to filter the data based on speech quality.
8
+ * We used the online subjective test framework ITU-T P.808 to sort the book chapters by subjective quality.
9
+ * The audio chapters in Librivox are of variable length ranging from few seconds to several minutes.
10
+ * We sampled 10 random clips from each book chapter, each 10 seconds in duration. For each clip we had 3 ratings, and the Mean Opinion Score (MOS) across the all clips was used as the book chapter MOS.
11
+ * The upper quartile with respect to MOS was chosen as our clean speech dataset, which are top 25% of the clips with MOS as a metric.
12
+ * The upper quartile comprised of audio chapters with 4.3 ≤ MOS ≤ 5. We removed clips from speakers with less than 15 minutes of speech. The resulting dataset has 500 hours of speech from 2150 speakers.
13
+ * All the filtered clips are then split into segments of 31 seconds.
14
+
15
+ ## Noise
16
+ * The noise clips were selected from Audioset and Freesound.
17
+ * Audioset is a collection of about 2 million human-labeled 10s sound clips drawn from YouTube videos and belong to about 600 audio events.
18
+ * Like the Librivox data, certain audio event classes are overrepresented. For example, there are over a million clips with audio classes music and speech and less than 200 clips for classes such as toothbrush, creak etc.
19
+ * Approximately, 42% of the clips have single class, but the rest may have 2 to 15 labels.
20
+ * Hence, we developed a sampling approach to balance the dataset in such a way that each class has at least 500 clips.
21
+ * We also used a speech activity detector (trained classifier) to remove the clips with any kind of speech activity. The reason is to avoid suppression of speech by the noise suppression model trained to suppress speech like noise.
22
+ * The resulting dataset has about 150 audio classes and 60,000 clips. We also augmented an additional 10,000 noise clips downloaded from Freesound and DEMAND databases.
23
+ * The chosen noise types are more relevant to VOIP applications.
datasets/blind_test_set/noreverb_fileid_0.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83d84826625d87e2b9fe3a9fa415784d980fd11f7478db4726c2a39d84d15c7c
3
+ size 320044
datasets/blind_test_set/noreverb_fileid_1.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eedb91d82ecfaf483152e391537ee6ff0657b43fe44c24b7191ad88f3d0eca52
3
+ size 320044
datasets/blind_test_set/noreverb_fileid_10.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfca25e122f507f4fbb8681277092f1e8a72633f869bf723a2a02573125563a7
3
+ size 320044
datasets/blind_test_set/noreverb_fileid_100.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c29d04e43811a74cbc6f69e706b67855e7fe75217dffce929c296e6e404f1663
3
+ size 320044
datasets/blind_test_set/noreverb_fileid_101.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04dc396b0db79e79a21e794be9414ab073811b29239e1526b17d307b9dac85ca
3
+ size 320044
datasets/blind_test_set/noreverb_fileid_102.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25b66356250646d25f998479fd7f4d5c41debb969aac81c7572323dcc425e554
3
+ size 320044
datasets/blind_test_set/noreverb_fileid_103.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a6a2c701f0219a7659da57774e6f7b7074cbd520917e8ffefcdea444c14d6ca
3
+ size 320044
datasets/blind_test_set/noreverb_fileid_104.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afadfcb7ab91622f581a910f13bad96a9b8da48263109b31c5601bbdc0ade100
3
+ size 320044
datasets/blind_test_set/noreverb_fileid_105.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8851c4e5cf5e06e9071cd68610d0ee7e4e3d892b1a724f4de02e0b199461394c
3
+ size 320044
datasets/blind_test_set/noreverb_fileid_106.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66085325ae1e36e47a4f5d047ce6a8c2a1dff5123d35628af9e577b2ba8d6669
3
+ size 320044
datasets/blind_test_set/noreverb_fileid_107.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2653ac97abdd771ee984fb254e68680004124709ca7a0565feaaecc6ddafe120
3
+ size 320044
datasets/blind_test_set/noreverb_fileid_108.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6af66feaa9062c00c25354631fec314ae2db87ba0dc165289423881542254a71
3
+ size 320044
datasets/blind_test_set/noreverb_fileid_109.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23f3f72ae2a2e31ebfcd12bc16bd0bf412d8747687b077518d212a53341d7441
3
+ size 320044
datasets/blind_test_set/noreverb_fileid_11.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dea61dcdf1a76529afb475adea80235fcd0999b41aa10f4c573d81384180fa98
3
+ size 320044
datasets/blind_test_set/noreverb_fileid_110.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe9aadd300caf3fbda574a7cab7d1a5699611d5fc12c725083218b43e3655b53
3
+ size 320044
datasets/blind_test_set/noreverb_fileid_111.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5fbda6eac538df04cd22cb10b5dcc4c1285e550bfc5210084fa269ff778620e
3
+ size 320044
datasets/blind_test_set/noreverb_fileid_112.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e8a6e2154a82900654ccfaaf29ae0cf6a095d2164dec659a2597fffa77a05a6
3
+ size 320044
datasets/blind_test_set/noreverb_fileid_113.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c2c9403297bacda35e4d3267d3230cf34a55ab64a1f79e4fdb7f1e5451e3964
3
+ size 320044
datasets/blind_test_set/noreverb_fileid_114.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce486cdfae524f572ded0abc679a437860497abd42edad354ade4722e1a9167d
3
+ size 320044
datasets/blind_test_set/noreverb_fileid_116.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0ba6285afd75e4f14a993996c95ff3d2a05ef5cc75a17cc7840c9f647115cda
3
+ size 320044
datasets/blind_test_set/noreverb_fileid_117.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c99f21493d377ed166761e7ad8e726b6751b1a02a915fada430425e2e8fbbd2b
3
+ size 320044
datasets/blind_test_set/noreverb_fileid_119.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:092c39f0933d32b39fa3eaabdb663294e9ff6f3e71a7d2972f1d1c33cc2066bb
3
+ size 320044
datasets/blind_test_set/noreverb_fileid_12.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:189dd84d451c69bea77d06daa6ce2b2b8d22b81673e752cfa36e258774ccebbe
3
+ size 320044
datasets/blind_test_set/noreverb_fileid_121.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f56eade1afe283235e9f833d6d307810d4accbe833bedbad412c20945f5b5e32
3
+ size 320044
datasets/blind_test_set/noreverb_fileid_128.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93622f6c06a3edfa577f4605daf1d9ae0d588c7f3aa78db964b121d74f274ab2
3
+ size 320044
docs/IS2020_noisesuppchallenge_base_paper.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:626c1d27a6a7a89c33925c7af2d09b1c28b458502715ce38630ce0c49b1b4cf7
3
+ size 552172
index.html ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <html>
2
+ <head>
3
+ <title>Model comparison</title>
4
+ <script src="https://code.jquery.com/jquery-3.4.1.js"></script>
5
+ <meta name="viewport" content="width=device-width, initial-scale=1">
6
+ <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.4.1/css/bootstrap.min.css">
7
+ <script src="filenamesAndModels.js"></script>
8
+ </head>
9
+ <style>
10
+ .navbar {
11
+ overflow: hidden;
12
+ background-color: #333;
13
+ font-family: Arial, Helvetica, sans-serif;
14
+ }
15
+
16
+ .navbar a {
17
+ float: left;
18
+ font-size: 16px;
19
+ color: white;
20
+ text-align: center;
21
+ padding: 14px 16px;
22
+ text-decoration: none;
23
+ }
24
+
25
+ .dropdown {
26
+ float: center;
27
+ overflow: hidden;
28
+ }
29
+
30
+ .dropdown .dropbtn {
31
+ cursor: pointer;
32
+ font-size: 16px;
33
+ border: none;
34
+ outline: none;
35
+ color: white;
36
+ padding: 14px 16px;
37
+ background-color: inherit;
38
+ font-family: inherit;
39
+ margin: 0;
40
+ }
41
+
42
+ .navbar a:hover, .dropdown:hover .dropbtn, .dropbtn:focus {
43
+ background-color: red;
44
+ }
45
+
46
+ .dropdown-content {
47
+ display: none;
48
+ position: absolute;
49
+ background-color: #f9f9f9;
50
+ min-width: 160px;
51
+ box-shadow: 0px 8px 16px 0px rgba(0,0,0,0.2);
52
+ z-index: 1;
53
+ }
54
+
55
+ .dropdown-content a {
56
+ float: none;
57
+ color: black;
58
+ padding: 12px 16px;
59
+ text-decoration: none;
60
+ display: block;
61
+ text-align: left;
62
+ }
63
+
64
+ .dropdown-content a:hover {
65
+ background-color: #ddd;
66
+ }
67
+
68
+ .show {
69
+ display: block;
70
+ }
71
+ </style>
72
+ <script>
73
+
74
+
75
+
76
+ var currentCount = 0;
77
+ var currentFile = fileNames[currentCount];
78
+
79
+ console.log(currentCount);
80
+ console.log(currentFile);
81
+
82
+ var modelsCount = baseUrls.length;
83
+ var filesCount = fileNames.length;
84
+
85
+ var modifiedFiles = []
86
+
87
+ </script>
88
+ <div class="navbar">
89
+ <button class="btn btn-primary btn-lg" onclick="loadMsRecordings()">MS Recordings</button>
90
+ <button class="btn btn-primary btn-lg" onclick="loadAudiosetRecordings()">Audioset Recordings</button>
91
+ <button class="btn btn-primary btn-lg" onclick="loadReverbRecordings()">Synthetic Reverb Recordings</button>
92
+ <button class="btn btn-primary btn-lg" onclick="loadNoReverbRecordings()">Synthetic NoReverb Recordings</button>
93
+ Enter the noise type to filter on<input type="text" name="noiseType"></input><button class="btn btn-info btn-lb" onclick="searchNoiseType()">Search noise type</button>
94
+ </div>
95
+ <div class="container">
96
+ <h2>Audio Clips</h2>
97
+
98
+ <table class="table" id="table2">
99
+ <tbody>
100
+ <tr><td>Index</td><td id="index"></td></tr>
101
+ <tr><td>Progress</td><td><div class="progress"><div class="progress-bar" role="progressbar" style="width: 25%;" aria-valuenow="25" aria-valuemin="0" aria-valuemax="100">25%</div></div></td></tr>
102
+ <tr><td>Clipname</td><td id="clipname"></td></tr>
103
+ </tbody>
104
+ </table>
105
+
106
+ <div class="row">
107
+ <button class="btn btn-success btn-lg" onclick="previous()" style="margin: 10px"> Previous</button>
108
+ <button class="btn btn-primary btn-lg" onclick="next()" style="margin: 10px"> Next</button>
109
+ <button class="btn btn-primary btn-lg" onclick="skip10()" style="margin: 10px"> Skip 10</button>
110
+ <button class="btn btn-primary btn-lg" onclick="skip100()" style="margin: 10px"> Skip 100</button>
111
+ </div>
112
+
113
+ </div>
114
+
115
+ <script>
116
+
117
+ // setup
118
+
119
+ function setupIndexAndClip(){
120
+ let current = ((currentCount+1)*100/filesCount)+"%";
121
+ $("#index").html((currentCount+1)+" / "+filesCount);
122
+
123
+ if(modifiedFiles.length > 0) {
124
+ current = ((currentCount+1)*100/modifiedFiles.length)+"%";
125
+ $("#index").html((currentCount+1)+" / "+modifiedFiles.length);
126
+ }
127
+ $(".progress-bar").css("width", current);
128
+ $(".progress-bar").html(current);
129
+ $("#clipname").html(currentFile);
130
+ }
131
+
132
+ function setupSrcs(){
133
+ setupIndexAndClip();
134
+
135
+ for(let i=0; i<modelsCount; i++)
136
+ $("#clip"+i).attr("src", baseUrls[i]+currentFile);
137
+ }
138
+
139
+ function changeFileSet(prefix) {
140
+ modifiedFiles = [];
141
+
142
+ for(let i=0; i<filesCount; i++) {
143
+ if(fileNames[i].startsWith(prefix)) {
144
+ modifiedFiles.push(fileNames[i]);
145
+ }
146
+ }
147
+ currentCount = 0;
148
+ currentFile = modifiedFiles[currentCount];
149
+
150
+ setupSrcs();
151
+ }
152
+
153
+ function loadMsRecordings() {
154
+ changeFileSet("ms_");
155
+ }
156
+
157
+ function loadAudiosetRecordings() {
158
+ changeFileSet("audioset_");
159
+ }
160
+
161
+ function loadReverbRecordings() {
162
+ changeFileSet("reverb_");
163
+ }
164
+
165
+ function loadNoReverbRecordings() {
166
+ changeFileSet("noreverb_");
167
+ }
168
+
169
+ function searchNoiseType() {
170
+ modifiedFiles = [];
171
+
172
+ for(let i=0; i<filesCount; i++) {
173
+ console.log(document.getElementsByName('noiseType')[0].value);
174
+ if(fileNames[i].includes(document.getElementsByName('noiseType')[0].value)) {
175
+ modifiedFiles.push(fileNames[i]);
176
+ }
177
+ }
178
+
179
+ currentCount = 0;
180
+ if(modifiedFiles.length > 0) {
181
+ currentFile = modifiedFiles[currentCount];
182
+ } else {
183
+ currentFile = fileNames[currentCount];
184
+ }
185
+
186
+ setupSrcs();
187
+ }
188
+
189
+ function moveNextOrPrev(valueToAdd) {
190
+ if(modifiedFiles.length == 0) {
191
+ if(currentCount == (filesCount - valueToAdd))
192
+ alert("This is the last Clip. Hit 'Previous' to load the previous clip, or you may close the browser. ");
193
+ else{
194
+ currentCount = currentCount + valueToAdd;
195
+ currentFile = fileNames[currentCount];
196
+ setupSrcs();
197
+ }
198
+ } else {
199
+ if(currentCount == (modifiedFiles.length - valueToAdd))
200
+ alert("This is the last Clip. Hit 'Previous' to load the previous clip, or you may close the browser. ");
201
+ else{
202
+ currentCount = currentCount + valueToAdd;
203
+ currentFile = modifiedFiles[currentCount];
204
+ setupSrcs();
205
+ }
206
+ }
207
+ }
208
+
209
+ // set the scr to the next values on clicking next
210
+ function next(){
211
+ moveNextOrPrev(1);
212
+ }
213
+
214
+ function skip10(){
215
+ moveNextOrPrev(10);
216
+ }
217
+
218
+ function skip100(){
219
+ moveNextOrPrev(100);
220
+ }
221
+
222
+ function previous(){
223
+
224
+ if(currentCount == 0)
225
+ alert("This is the very first Clip. Hit 'Next' to load the next clip. ");
226
+ else{
227
+ currentCount--;
228
+ currentFile = fileNames[currentCount];
229
+ if(modifiedFiles.length > 0)
230
+ currentFile = modifiedFiles[currentCount];
231
+ setupSrcs();
232
+ }
233
+ }
234
+
235
+ setupIndexAndClip();
236
+
237
+ var tbody = $("#table2>tbody");
238
+ for(let i=0; i<modelsCount; i++)
239
+ tbody.append("<tr><td>"+modelsUsed[i]+"</td><td><audio controls id=clip"+i+" src='"+baseUrls[0]+currentFile+"' type='audio/wav'></audio></td></tr>");
240
+
241
+ </script>
242
+
243
+ </html>
noisyspeech_synthesizer.cfg ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Configuration for generating Noisy Speech Dataset
2
+
3
+ # - sampling_rate: Specify the sampling rate. Default is 16 kHz
4
+ # - audioformat: default is .wav
5
+ # - audio_length: Minimum Length of each audio clip (noisy and clean speech) in seconds that will be generated by augmenting utterances.
6
+ # - silence_length: Duration of silence introduced between clean speech utterances.
7
+ # - total_hours: Total number of hours of data required. Units are in hours.
8
+ # - snr_lower: Lower bound for SNR required (default: 0 dB)
9
+ # - snr_upper: Upper bound for SNR required (default: 40 dB)
10
+ # - target_level_lower: Lower bound for the target audio level before audiowrite (default: -35 dB)
11
+ # - target_level_upper: Upper bound for the target audio level before audiowrite (default: -15 dB)
12
+ # - total_snrlevels: Number of SNR levels required (default: 5, which means there are 5 levels between snr_lower and snr_upper)
13
+ # - clean_activity_threshold: Activity threshold for clean speech
14
+ # - noise_activity_threshold: Activity threshold for noise
15
+ # - fileindex_start: Starting file ID that will be used in filenames
16
+ # - fileindex_end: Last file ID that will be used in filenames
17
+ # - is_test_set: Set it to True if it is the test set, else False for the training set
18
+ # - noise_dir: Specify the directory path to all noise files
19
+ # - Speech_dir: Specify the directory path to all clean speech files
20
+ # - noisy_destination: Specify path to the destination directory to store noisy speech
21
+ # - clean_destination: Specify path to the destination directory to store clean speech
22
+ # - noise_destination: Specify path to the destination directory to store noise speech
23
+ # - log_dir: Specify path to the directory to store all the log files
24
+
25
+ # Configuration for unit tests
26
+ # - snr_test: Set to True if SNR test is required, else False
27
+ # - norm_test: Set to True if Normalization test is required, else False
28
+ # - sampling_rate_test: Set to True if Sampling Rate test is required, else False
29
+ # - clipping_test: Set to True if Clipping test is required, else False
30
+ # - unit_tests_log_dir: Specify path to the directory where you want to store logs
31
+
32
+ [noisy_speech]
33
+
34
+ sampling_rate: 16000
35
+ audioformat: *.wav
36
+ audio_length: 30
37
+ silence_length: 0.2
38
+ total_hours: 100
39
+ snr_lower: 0
40
+ snr_upper: 40
41
+ randomize_snr: True
42
+ target_level_lower: -35
43
+ target_level_upper: -15
44
+ total_snrlevels: 5
45
+ clean_activity_threshold: 0.6
46
+ noise_activity_threshold: 0.0
47
+ fileindex_start: None
48
+ fileindex_end: None
49
+ is_test_set: False
50
+ noise_dir: \datasets\noise
51
+ speech_dir: \datasets\clean
52
+ noisy_destination: \noisy
53
+ clean_destination: \clean
54
+ noise_destination: \noise
55
+ log_dir: \logs
56
+
57
+
58
+ # Unit tests config
59
+ snr_test: True
60
+ norm_test: True
61
+ sampling_rate_test = True
62
+ clipping_test = True
63
+
64
+ unit_tests_log_dir: .\unittests_logs
65
+
66
+
noisyspeech_synthesizer_multiprocessing.py ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ @author: chkarada
3
+ """
4
+
5
+ # Note that this file picks the clean speech files randomly, so it does not guarantee that all
6
+ # source files will be used
7
+
8
+
9
+ import os
10
+ import glob
11
+ import argparse
12
+ import ast
13
+ import configparser as CP
14
+ from itertools import repeat
15
+ import multiprocessing
16
+ from multiprocessing import Pool
17
+ import random
18
+ from random import shuffle
19
+ import librosa
20
+ import numpy as np
21
+ from audiolib import is_clipped, audioread, audiowrite, snr_mixer, activitydetector
22
+ import utils
23
+
24
+
25
+ PROCESSES = multiprocessing.cpu_count()
26
+ MAXTRIES = 50
27
+ MAXFILELEN = 100
28
+
29
+ np.random.seed(2)
30
+ random.seed(3)
31
+
32
+ clean_counter = None
33
+ noise_counter = None
34
+
35
+ def init(args1, args2):
36
+ ''' store the counter for later use '''
37
+ global clean_counter, noise_counter
38
+ clean_counter = args1
39
+ noise_counter = args2
40
+
41
+
42
+ def build_audio(is_clean, params, filenum, audio_samples_length=-1):
43
+ '''Construct an audio signal from source files'''
44
+
45
+ fs_output = params['fs']
46
+ silence_length = params['silence_length']
47
+ if audio_samples_length == -1:
48
+ audio_samples_length = int(params['audio_length']*params['fs'])
49
+
50
+ output_audio = np.zeros(0)
51
+ remaining_length = audio_samples_length
52
+ files_used = []
53
+ clipped_files = []
54
+
55
+ global clean_counter, noise_counter
56
+ if is_clean:
57
+ source_files = params['cleanfilenames']
58
+ idx_counter = clean_counter
59
+ else:
60
+ source_files = params['noisefilenames']
61
+ idx_counter = noise_counter
62
+
63
+ # initialize silence
64
+ silence = np.zeros(int(fs_output*silence_length))
65
+
66
+ # iterate through multiple clips until we have a long enough signal
67
+ tries_left = MAXTRIES
68
+ while remaining_length > 0 and tries_left > 0:
69
+
70
+ # read next audio file and resample if necessary
71
+ with idx_counter.get_lock():
72
+ idx_counter.value += 1
73
+ idx = idx_counter.value % np.size(source_files)
74
+
75
+ input_audio, fs_input = audioread(source_files[idx])
76
+ if fs_input != fs_output:
77
+ input_audio = librosa.resample(input_audio, fs_input, fs_output)
78
+
79
+ # if current file is longer than remaining desired length, and this is
80
+ # noise generation or this is training set, subsample it randomly
81
+ if len(input_audio) > remaining_length and (not is_clean or not params['is_test_set']):
82
+ idx_seg = np.random.randint(0, len(input_audio)-remaining_length)
83
+ input_audio = input_audio[idx_seg:idx_seg+remaining_length]
84
+
85
+ # check for clipping, and if found move onto next file
86
+ if is_clipped(input_audio):
87
+ clipped_files.append(source_files[idx])
88
+ tries_left -= 1
89
+ continue
90
+
91
+ # concatenate current input audio to output audio stream
92
+ files_used.append(source_files[idx])
93
+ output_audio = np.append(output_audio, input_audio)
94
+ remaining_length -= len(input_audio)
95
+
96
+ # add some silence if we have not reached desired audio length
97
+ if remaining_length > 0:
98
+ silence_len = min(remaining_length, len(silence))
99
+ output_audio = np.append(output_audio, silence[:silence_len])
100
+ remaining_length -= silence_len
101
+
102
+ if tries_left == 0:
103
+ print("Audio generation failed for filenum " + str(filenum))
104
+ return [], [], clipped_files
105
+
106
+ return output_audio, files_used, clipped_files
107
+
108
+
109
+ def gen_audio(is_clean, params, filenum, audio_samples_length=-1):
110
+ '''Calls build_audio() to get an audio signal, and verify that it meets the
111
+ activity threshold'''
112
+
113
+ clipped_files = []
114
+ low_activity_files = []
115
+ if audio_samples_length == -1:
116
+ audio_samples_length = int(params['audio_length']*params['fs'])
117
+ if is_clean:
118
+ activity_threshold = params['clean_activity_threshold']
119
+ else:
120
+ activity_threshold = params['noise_activity_threshold']
121
+
122
+ while True:
123
+ audio, source_files, new_clipped_files = \
124
+ build_audio(is_clean, params, filenum, audio_samples_length)
125
+
126
+ clipped_files += new_clipped_files
127
+ if len(audio) < audio_samples_length:
128
+ continue
129
+
130
+ if activity_threshold == 0.0:
131
+ break
132
+
133
+ percactive = activitydetector(audio=audio)
134
+ if percactive > activity_threshold:
135
+ break
136
+ else:
137
+ low_activity_files += source_files
138
+
139
+ return audio, source_files, clipped_files, low_activity_files
140
+
141
+
142
+ def main_gen(params, filenum):
143
+ '''Calls gen_audio() to generate the audio signals, verifies that they meet
144
+ the requirements, and writes the files to storage'''
145
+
146
+ print("Generating file #" + str(filenum))
147
+
148
+ clean_clipped_files = []
149
+ clean_low_activity_files = []
150
+ noise_clipped_files = []
151
+ noise_low_activity_files = []
152
+
153
+ while True:
154
+ # generate clean speech
155
+ clean, clean_source_files, clean_cf, clean_laf = \
156
+ gen_audio(True, params, filenum)
157
+ # generate noise
158
+ noise, noise_source_files, noise_cf, noise_laf = \
159
+ gen_audio(False, params, filenum, len(clean))
160
+
161
+ clean_clipped_files += clean_cf
162
+ clean_low_activity_files += clean_laf
163
+ noise_clipped_files += noise_cf
164
+ noise_low_activity_files += noise_laf
165
+
166
+ # mix clean speech and noise
167
+ # if specified, use specified SNR value
168
+ if not params['randomize_snr']:
169
+ snr = params['snr']
170
+ # use a randomly sampled SNR value between the specified bounds
171
+ else:
172
+ snr = np.random.randint(params['snr_lower'], params['snr_upper'])
173
+
174
+ clean_snr, noise_snr, noisy_snr, target_level = snr_mixer(params=params,
175
+ clean=clean,
176
+ noise=noise,
177
+ snr=snr)
178
+ # Uncomment the below lines if you need segmental SNR and comment the above lines using snr_mixer
179
+ #clean_snr, noise_snr, noisy_snr, target_level = segmental_snr_mixer(params=params,
180
+ # clean=clean,
181
+ # noise=noise,
182
+ # snr=snr)
183
+ # unexpected clipping
184
+ if is_clipped(clean_snr) or is_clipped(noise_snr) or is_clipped(noisy_snr):
185
+ continue
186
+ else:
187
+ break
188
+
189
+ # write resultant audio streams to files
190
+ hyphen = '-'
191
+ clean_source_filenamesonly = [i[:-4].split(os.path.sep)[-1] for i in clean_source_files]
192
+ clean_files_joined = hyphen.join(clean_source_filenamesonly)[:MAXFILELEN]
193
+ noise_source_filenamesonly = [i[:-4].split(os.path.sep)[-1] for i in noise_source_files]
194
+ noise_files_joined = hyphen.join(noise_source_filenamesonly)[:MAXFILELEN]
195
+
196
+ noisyfilename = clean_files_joined + '_' + noise_files_joined + '_snr' + \
197
+ str(snr) + '_fileid_' + str(filenum) + '.wav'
198
+ cleanfilename = 'clean_fileid_'+str(filenum)+'.wav'
199
+ noisefilename = 'noise_fileid_'+str(filenum)+'.wav'
200
+
201
+ noisypath = os.path.join(params['noisyspeech_dir'], noisyfilename)
202
+ cleanpath = os.path.join(params['clean_proc_dir'], cleanfilename)
203
+ noisepath = os.path.join(params['noise_proc_dir'], noisefilename)
204
+
205
+ audio_signals = [noisy_snr, clean_snr, noise_snr]
206
+ file_paths = [noisypath, cleanpath, noisepath]
207
+
208
+ for i in range(len(audio_signals)):
209
+ try:
210
+ audiowrite(file_paths[i], audio_signals[i], params['fs'])
211
+ except Exception as e:
212
+ print(str(e))
213
+ pass
214
+
215
+ return clean_source_files, clean_clipped_files, clean_low_activity_files, \
216
+ noise_source_files, noise_clipped_files, noise_low_activity_files
217
+
218
+
219
+ def extract_list(input_list, index):
220
+ output_list = [i[index] for i in input_list]
221
+ flat_output_list = [item for sublist in output_list for item in sublist]
222
+ flat_output_list = sorted(set(flat_output_list))
223
+ return flat_output_list
224
+
225
+
226
+ def main_body():
227
+ '''Main body of this file'''
228
+
229
+ parser = argparse.ArgumentParser()
230
+
231
+ # Configurations: read noisyspeech_synthesizer.cfg and gather inputs
232
+ parser.add_argument('--cfg', default='noisyspeech_synthesizer.cfg',
233
+ help='Read noisyspeech_synthesizer.cfg for all the details')
234
+ parser.add_argument('--cfg_str', type=str, default='noisy_speech')
235
+ args = parser.parse_args()
236
+
237
+ params = dict()
238
+ params['args'] = args
239
+ cfgpath = os.path.join(os.path.dirname(__file__), args.cfg)
240
+ assert os.path.exists(cfgpath), f'No configuration file as [{cfgpath}]'
241
+
242
+ cfg = CP.ConfigParser()
243
+ cfg._interpolation = CP.ExtendedInterpolation()
244
+ cfg.read(cfgpath)
245
+ params['cfg'] = cfg._sections[args.cfg_str]
246
+ cfg = params['cfg']
247
+
248
+ clean_dir = os.path.join(os.path.dirname(__file__), 'CleanSpeech')
249
+ if cfg['speech_dir'] != 'None':
250
+ clean_dir = cfg['speech_dir']
251
+ if not os.path.exists(clean_dir):
252
+ assert False, ('Clean speech data is required')
253
+
254
+ noise_dir = os.path.join(os.path.dirname(__file__), 'Noise')
255
+ if cfg['noise_dir'] != 'None':
256
+ noise_dir = cfg['noise_dir']
257
+ if not os.path.exists(noise_dir):
258
+ assert False, ('Noise data is required')
259
+
260
+ params['fs'] = int(cfg['sampling_rate'])
261
+ params['audioformat'] = cfg['audioformat']
262
+ params['audio_length'] = float(cfg['audio_length'])
263
+ params['silence_length'] = float(cfg['silence_length'])
264
+ params['total_hours'] = float(cfg['total_hours'])
265
+
266
+ if cfg['fileindex_start'] != 'None' and cfg['fileindex_start'] != 'None':
267
+ params['fileindex_start'] = int(cfg['fileindex_start'])
268
+ params['fileindex_end'] = int(cfg['fileindex_end'])
269
+ params['num_files'] = int(params['fileindex_end'])-int(params['fileindex_start'])
270
+ else:
271
+ params['num_files'] = int((params['total_hours']*60*60)/params['audio_length'])
272
+
273
+ print('Number of files to be synthesized:', params['num_files'])
274
+ params['is_test_set'] = utils.str2bool(cfg['is_test_set'])
275
+ params['clean_activity_threshold'] = float(cfg['clean_activity_threshold'])
276
+ params['noise_activity_threshold'] = float(cfg['noise_activity_threshold'])
277
+ params['snr_lower'] = int(cfg['snr_lower'])
278
+ params['snr_upper'] = int(cfg['snr_upper'])
279
+ params['randomize_snr'] = utils.str2bool(cfg['randomize_snr'])
280
+ params['target_level_lower'] = int(cfg['target_level_lower'])
281
+ params['target_level_upper'] = int(cfg['target_level_upper'])
282
+
283
+ if 'snr' in cfg.keys():
284
+ params['snr'] = int(cfg['snr'])
285
+ else:
286
+ params['snr'] = int((params['snr_lower'] + params['snr_upper'])/2)
287
+
288
+ params['noisyspeech_dir'] = utils.get_dir(cfg, 'noisy_destination', 'noisy')
289
+ params['clean_proc_dir'] = utils.get_dir(cfg, 'clean_destination', 'clean')
290
+ params['noise_proc_dir'] = utils.get_dir(cfg, 'noise_destination', 'noise')
291
+
292
+ if 'speech_csv' in cfg.keys() and cfg['speech_csv'] != 'None':
293
+ cleanfilenames = pd.read_csv(cfg['speech_csv'])
294
+ cleanfilenames = cleanfilenames['filename']
295
+ else:
296
+ cleanfilenames = glob.glob(os.path.join(clean_dir, params['audioformat']))
297
+ params['cleanfilenames'] = cleanfilenames
298
+ shuffle(params['cleanfilenames'])
299
+ params['num_cleanfiles'] = len(params['cleanfilenames'])
300
+
301
+ params['noisefilenames'] = glob.glob(os.path.join(noise_dir, params['audioformat']))
302
+ shuffle(params['noisefilenames'])
303
+
304
+ # Invoke multiple processes and fan out calls to main_gen() to these processes
305
+ global clean_counter, noise_counter
306
+ clean_counter = multiprocessing.Value('i', 0)
307
+ noise_counter = multiprocessing.Value('i', 0)
308
+
309
+ multi_pool = multiprocessing.Pool(processes=PROCESSES, initializer = init, initargs = (clean_counter, noise_counter, ))
310
+ fileindices = range(params['num_files'])
311
+ output_lists = multi_pool.starmap(main_gen, zip(repeat(params), fileindices))
312
+
313
+ flat_output_lists = []
314
+ num_lists = 6
315
+ for i in range(num_lists):
316
+ flat_output_lists.append(extract_list(output_lists, i))
317
+
318
+ # Create log directory if needed, and write log files of clipped and low activity files
319
+ log_dir = utils.get_dir(cfg, 'log_dir', 'Logs')
320
+
321
+ utils.write_log_file(log_dir, 'source_files.csv', flat_output_lists[0] + flat_output_lists[3])
322
+ utils.write_log_file(log_dir, 'clipped_files.csv', flat_output_lists[1] + flat_output_lists[4])
323
+ utils.write_log_file(log_dir, 'low_activity_files.csv', flat_output_lists[2] + flat_output_lists[5])
324
+
325
+ # Compute and print stats about percentange of clipped and low activity files
326
+ total_clean = len(flat_output_lists[0]) + len(flat_output_lists[1]) + len(flat_output_lists[2])
327
+ total_noise = len(flat_output_lists[3]) + len(flat_output_lists[4]) + len(flat_output_lists[5])
328
+ pct_clean_clipped = round(len(flat_output_lists[1])/total_clean*100, 1)
329
+ pct_noise_clipped = round(len(flat_output_lists[4])/total_noise*100, 1)
330
+ pct_clean_low_activity = round(len(flat_output_lists[2])/total_clean*100, 1)
331
+ pct_noise_low_activity = round(len(flat_output_lists[5])/total_noise*100, 1)
332
+
333
+ print("Of the " + str(total_clean) + " clean speech files analyzed, " + str(pct_clean_clipped) + \
334
+ "% had clipping, and " + str(pct_clean_low_activity) + "% had low activity " + \
335
+ "(below " + str(params['clean_activity_threshold']*100) + "% active percentage)")
336
+ print("Of the " + str(total_noise) + " noise files analyzed, " + str(pct_noise_clipped) + \
337
+ "% had clipping, and " + str(pct_noise_low_activity) + "% had low activity " + \
338
+ "(below " + str(params['noise_activity_threshold']*100) + "% active percentage)")
339
+
340
+
341
+ if __name__ == '__main__':
342
+ main_body()
noisyspeech_synthesizer_singleprocess.py ADDED
@@ -0,0 +1,351 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ @author: chkarada
3
+ """
4
+
5
+ # Note: This single process audio synthesizer will attempt to use each clean
6
+ # speech sourcefile once, as it does not randomly sample from these files
7
+
8
+
9
+ import os
10
+ import glob
11
+ import argparse
12
+ import ast
13
+ import configparser as CP
14
+ import random
15
+ from random import shuffle
16
+ import librosa
17
+ import numpy as np
18
+ from audiolib import audioread, audiowrite, segmental_snr_mixer, snr_mixer, \
19
+ activitydetector, is_clipped, add_clipping
20
+ import utils
21
+ import pandas as pd
22
+
23
+ MAXTRIES = 50
24
+ MAXFILELEN = 100
25
+
26
+ np.random.seed(2)
27
+ random.seed(3)
28
+
29
+ def build_audio(is_clean, params, index, audio_samples_length=-1):
30
+ '''Construct an audio signal from source files'''
31
+
32
+ fs_output = params['fs']
33
+ silence_length = params['silence_length']
34
+ if audio_samples_length == -1:
35
+ audio_samples_length = int(params['audio_length']*params['fs'])
36
+
37
+ output_audio = np.zeros(0)
38
+ remaining_length = audio_samples_length
39
+ files_used = []
40
+ clipped_files = []
41
+
42
+ if is_clean:
43
+ source_files = params['cleanfilenames']
44
+ idx = index
45
+ else:
46
+ if 'noisefilenames' in params.keys():
47
+ source_files = params['noisefilenames']
48
+ idx = index
49
+ # if noise files are organized into individual subdirectories, pick a directory randomly
50
+ else:
51
+ noisedirs = params['noisedirs']
52
+ # pick a noise category randomly
53
+ idx_n_dir = np.random.randint(0, np.size(noisedirs))
54
+ source_files = glob.glob(os.path.join(noisedirs[idx_n_dir],
55
+ params['audioformat']))
56
+ shuffle(source_files)
57
+ # pick a noise source file index randomly
58
+ idx = np.random.randint(0, np.size(source_files))
59
+
60
+ # initialize silence
61
+ silence = np.zeros(int(fs_output*silence_length))
62
+
63
+ # iterate through multiple clips until we have a long enough signal
64
+ tries_left = MAXTRIES
65
+ while remaining_length > 0 and tries_left > 0:
66
+
67
+ # read next audio file and resample if necessary
68
+ idx = (idx + 1) % np.size(source_files)
69
+ input_audio, fs_input = audioread(source_files[idx])
70
+ if fs_input != fs_output:
71
+ input_audio = librosa.resample(input_audio, fs_input, fs_output)
72
+
73
+ # if current file is longer than remaining desired length, and this is
74
+ # noise generation or this is training set, subsample it randomly
75
+ if len(input_audio) > remaining_length and (not is_clean or not params['is_test_set']):
76
+ idx_seg = np.random.randint(0, len(input_audio)-remaining_length)
77
+ input_audio = input_audio[idx_seg:idx_seg+remaining_length]
78
+
79
+ # check for clipping, and if found move onto next file
80
+ if is_clipped(input_audio):
81
+ clipped_files.append(source_files[idx])
82
+ tries_left -= 1
83
+ continue
84
+
85
+ # concatenate current input audio to output audio stream
86
+ files_used.append(source_files[idx])
87
+ output_audio = np.append(output_audio, input_audio)
88
+ remaining_length -= len(input_audio)
89
+
90
+ # add some silence if we have not reached desired audio length
91
+ if remaining_length > 0:
92
+ silence_len = min(remaining_length, len(silence))
93
+ output_audio = np.append(output_audio, silence[:silence_len])
94
+ remaining_length -= silence_len
95
+
96
+ if tries_left == 0 and not is_clean and 'noisedirs' in params.keys():
97
+ print("There are not enough non-clipped files in the " + noisedirs[idx_n_dir] + \
98
+ " directory to complete the audio build")
99
+ return [], [], clipped_files, idx
100
+
101
+ return output_audio, files_used, clipped_files, idx
102
+
103
+
104
+ def gen_audio(is_clean, params, index, audio_samples_length=-1):
105
+ '''Calls build_audio() to get an audio signal, and verify that it meets the
106
+ activity threshold'''
107
+
108
+ clipped_files = []
109
+ low_activity_files = []
110
+ if audio_samples_length == -1:
111
+ audio_samples_length = int(params['audio_length']*params['fs'])
112
+ if is_clean:
113
+ activity_threshold = params['clean_activity_threshold']
114
+ else:
115
+ activity_threshold = params['noise_activity_threshold']
116
+
117
+ while True:
118
+ audio, source_files, new_clipped_files, index = \
119
+ build_audio(is_clean, params, index, audio_samples_length)
120
+
121
+ clipped_files += new_clipped_files
122
+ if len(audio) < audio_samples_length:
123
+ continue
124
+
125
+ if activity_threshold == 0.0:
126
+ break
127
+
128
+ percactive = activitydetector(audio=audio)
129
+ if percactive > activity_threshold:
130
+ break
131
+ else:
132
+ low_activity_files += source_files
133
+
134
+ return audio, source_files, clipped_files, low_activity_files, index
135
+
136
+
137
+ def main_gen(params):
138
+ '''Calls gen_audio() to generate the audio signals, verifies that they meet
139
+ the requirements, and writes the files to storage'''
140
+
141
+ clean_source_files = []
142
+ clean_clipped_files = []
143
+ clean_low_activity_files = []
144
+ noise_source_files = []
145
+ noise_clipped_files = []
146
+ noise_low_activity_files = []
147
+
148
+ clean_index = 0
149
+ noise_index = 0
150
+ file_num = params['fileindex_start']
151
+
152
+ while file_num <= params['fileindex_end']:
153
+ # generate clean speech
154
+ clean, clean_sf, clean_cf, clean_laf, clean_index = \
155
+ gen_audio(True, params, clean_index)
156
+ # generate noise
157
+ noise, noise_sf, noise_cf, noise_laf, noise_index = \
158
+ gen_audio(False, params, noise_index, len(clean))
159
+
160
+ clean_clipped_files += clean_cf
161
+ clean_low_activity_files += clean_laf
162
+ noise_clipped_files += noise_cf
163
+ noise_low_activity_files += noise_laf
164
+
165
+ # mix clean speech and noise
166
+ # if specified, use specified SNR value
167
+ if not params['randomize_snr']:
168
+ snr = params['snr']
169
+ # use a randomly sampled SNR value between the specified bounds
170
+ else:
171
+ snr = np.random.randint(params['snr_lower'], params['snr_upper'])
172
+
173
+ clean_snr, noise_snr, noisy_snr, target_level = snr_mixer(params=params,
174
+ clean=clean,
175
+ noise=noise,
176
+ snr=snr)
177
+ # Uncomment the below lines if you need segmental SNR and comment the above lines using snr_mixer
178
+ #clean_snr, noise_snr, noisy_snr, target_level = segmental_snr_mixer(params=params,
179
+ # clean=clean,
180
+ # noise=noise,
181
+ # snr=snr)
182
+ # unexpected clipping
183
+ if is_clipped(clean_snr) or is_clipped(noise_snr) or is_clipped(noisy_snr):
184
+ print("Warning: File #" + str(file_num) + " has unexpected clipping, " + \
185
+ "returning without writing audio to disk")
186
+ continue
187
+
188
+ clean_source_files += clean_sf
189
+ noise_source_files += noise_sf
190
+
191
+ # write resultant audio streams to files
192
+ hyphen = '-'
193
+ clean_source_filenamesonly = [i[:-4].split(os.path.sep)[-1] for i in clean_sf]
194
+ clean_files_joined = hyphen.join(clean_source_filenamesonly)[:MAXFILELEN]
195
+ noise_source_filenamesonly = [i[:-4].split(os.path.sep)[-1] for i in noise_sf]
196
+ noise_files_joined = hyphen.join(noise_source_filenamesonly)[:MAXFILELEN]
197
+
198
+ noisyfilename = clean_files_joined + '_' + noise_files_joined + '_snr' + \
199
+ str(snr) + '_tl' + str(target_level) + '_fileid_' + str(file_num) + '.wav'
200
+ cleanfilename = 'clean_fileid_'+str(file_num)+'.wav'
201
+ noisefilename = 'noise_fileid_'+str(file_num)+'.wav'
202
+
203
+ noisypath = os.path.join(params['noisyspeech_dir'], noisyfilename)
204
+ cleanpath = os.path.join(params['clean_proc_dir'], cleanfilename)
205
+ noisepath = os.path.join(params['noise_proc_dir'], noisefilename)
206
+
207
+ audio_signals = [noisy_snr, clean_snr, noise_snr]
208
+ file_paths = [noisypath, cleanpath, noisepath]
209
+
210
+ file_num += 1
211
+ for i in range(len(audio_signals)):
212
+ try:
213
+ audiowrite(file_paths[i], audio_signals[i], params['fs'])
214
+ except Exception as e:
215
+ print(str(e))
216
+
217
+
218
+ return clean_source_files, clean_clipped_files, clean_low_activity_files, \
219
+ noise_source_files, noise_clipped_files, noise_low_activity_files
220
+
221
+
222
+ def main_body():
223
+ '''Main body of this file'''
224
+
225
+ parser = argparse.ArgumentParser()
226
+
227
+ # Configurations: read noisyspeech_synthesizer.cfg and gather inputs
228
+ parser.add_argument('--cfg', default='noisyspeech_synthesizer.cfg',
229
+ help='Read noisyspeech_synthesizer.cfg for all the details')
230
+ parser.add_argument('--cfg_str', type=str, default='noisy_speech')
231
+ args = parser.parse_args()
232
+
233
+ params = dict()
234
+ params['args'] = args
235
+ cfgpath = os.path.join(os.path.dirname(__file__), args.cfg)
236
+ assert os.path.exists(cfgpath), f'No configuration file as [{cfgpath}]'
237
+
238
+ cfg = CP.ConfigParser()
239
+ cfg._interpolation = CP.ExtendedInterpolation()
240
+ cfg.read(cfgpath)
241
+ params['cfg'] = cfg._sections[args.cfg_str]
242
+ cfg = params['cfg']
243
+
244
+ clean_dir = os.path.join(os.path.dirname(__file__), 'CleanSpeech')
245
+ if cfg['speech_dir'] != 'None':
246
+ clean_dir = cfg['speech_dir']
247
+ if not os.path.exists(clean_dir):
248
+ assert False, ('Clean speech data is required')
249
+
250
+ noise_dir = os.path.join(os.path.dirname(__file__), 'Noise')
251
+ if cfg['noise_dir'] != 'None':
252
+ noise_dir = cfg['noise_dir']
253
+ if not os.path.exists:
254
+ assert False, ('Noise data is required')
255
+
256
+ params['fs'] = int(cfg['sampling_rate'])
257
+ params['audioformat'] = cfg['audioformat']
258
+ params['audio_length'] = float(cfg['audio_length'])
259
+ params['silence_length'] = float(cfg['silence_length'])
260
+ params['total_hours'] = float(cfg['total_hours'])
261
+
262
+ if cfg['fileindex_start'] != 'None' and cfg['fileindex_start'] != 'None':
263
+ params['num_files'] = int(cfg['fileindex_end'])-int(cfg['fileindex_start'])
264
+ params['fileindex_start'] = int(cfg['fileindex_start'])
265
+ params['fileindex_end'] = int(cfg['fileindex_end'])
266
+ else:
267
+ params['num_files'] = int((params['total_hours']*60*60)/params['audio_length'])
268
+ params['fileindex_start'] = 0
269
+ params['fileindex_end'] = params['num_files']
270
+
271
+ print('Number of files to be synthesized:', params['num_files'])
272
+
273
+ params['is_test_set'] = utils.str2bool(cfg['is_test_set'])
274
+ params['clean_activity_threshold'] = float(cfg['clean_activity_threshold'])
275
+ params['noise_activity_threshold'] = float(cfg['noise_activity_threshold'])
276
+ params['snr_lower'] = int(cfg['snr_lower'])
277
+ params['snr_upper'] = int(cfg['snr_upper'])
278
+
279
+ params['randomize_snr'] = utils.str2bool(cfg['randomize_snr'])
280
+ params['target_level_lower'] = int(cfg['target_level_lower'])
281
+ params['target_level_upper'] = int(cfg['target_level_upper'])
282
+
283
+ if 'snr' in cfg.keys():
284
+ params['snr'] = int(cfg['snr'])
285
+ else:
286
+ params['snr'] = int((params['snr_lower'] + params['snr_upper'])/2)
287
+
288
+ params['noisyspeech_dir'] = utils.get_dir(cfg, 'noisy_destination', 'noisy')
289
+ params['clean_proc_dir'] = utils.get_dir(cfg, 'clean_destination', 'clean')
290
+ params['noise_proc_dir'] = utils.get_dir(cfg, 'noise_destination', 'noise')
291
+
292
+ if 'speech_csv' in cfg.keys() and cfg['speech_csv'] != 'None':
293
+ cleanfilenames = pd.read_csv(cfg['speech_csv'])
294
+ cleanfilenames = cleanfilenames['filename']
295
+ else:
296
+ cleanfilenames = glob.glob(os.path.join(clean_dir, params['audioformat']))
297
+ params['cleanfilenames'] = cleanfilenames
298
+ shuffle(params['cleanfilenames'])
299
+ params['num_cleanfiles'] = len(params['cleanfilenames'])
300
+ # If there are .wav files in noise_dir directory, use those
301
+ # If not, that implies that the noise files are organized into subdirectories by type,
302
+ # so get the names of the non-excluded subdirectories
303
+ if 'noise_csv' in cfg.keys() and cfg['noise_csv'] != 'None':
304
+ noisefilenames = pd.read_csv(cfg['noise_csv'])
305
+ noisefilenames = noisefilenames['filename']
306
+ else:
307
+ noisefilenames = glob.glob(os.path.join(noise_dir, params['audioformat']))
308
+
309
+ if len(noisefilenames)!=0:
310
+ shuffle(noisefilenames)
311
+ params['noisefilenames'] = noisefilenames
312
+ else:
313
+ noisedirs = glob.glob(os.path.join(noise_dir, '*'))
314
+ if cfg['noise_types_excluded'] != 'None':
315
+ dirstoexclude = cfg['noise_types_excluded'].split(',')
316
+ for dirs in dirstoexclude:
317
+ noisedirs.remove(dirs)
318
+ shuffle(noisedirs)
319
+ params['noisedirs'] = noisedirs
320
+
321
+ # Call main_gen() to generate audio
322
+ clean_source_files, clean_clipped_files, clean_low_activity_files, \
323
+ noise_source_files, noise_clipped_files, noise_low_activity_files = main_gen(params)
324
+
325
+ # Create log directory if needed, and write log files of clipped and low activity files
326
+ log_dir = utils.get_dir(cfg, 'log_dir', 'Logs')
327
+
328
+ utils.write_log_file(log_dir, 'source_files.csv', clean_source_files + noise_source_files)
329
+ utils.write_log_file(log_dir, 'clipped_files.csv', clean_clipped_files + noise_clipped_files)
330
+ utils.write_log_file(log_dir, 'low_activity_files.csv', \
331
+ clean_low_activity_files + noise_low_activity_files)
332
+
333
+ # Compute and print stats about percentange of clipped and low activity files
334
+ total_clean = len(clean_source_files) + len(clean_clipped_files) + len(clean_low_activity_files)
335
+ total_noise = len(noise_source_files) + len(noise_clipped_files) + len(noise_low_activity_files)
336
+ pct_clean_clipped = round(len(clean_clipped_files)/total_clean*100, 1)
337
+ pct_noise_clipped = round(len(noise_clipped_files)/total_noise*100, 1)
338
+ pct_clean_low_activity = round(len(clean_low_activity_files)/total_clean*100, 1)
339
+ pct_noise_low_activity = round(len(noise_low_activity_files)/total_noise*100, 1)
340
+
341
+ print("Of the " + str(total_clean) + " clean speech files analyzed, " + \
342
+ str(pct_clean_clipped) + "% had clipping, and " + str(pct_clean_low_activity) + \
343
+ "% had low activity " + "(below " + str(params['clean_activity_threshold']*100) + \
344
+ "% active percentage)")
345
+ print("Of the " + str(total_noise) + " noise files analyzed, " + str(pct_noise_clipped) + \
346
+ "% had clipping, and " + str(pct_noise_low_activity) + "% had low activity " + \
347
+ "(below " + str(params['noise_activity_threshold']*100) + "% active percentage)")
348
+
349
+
350
+ if __name__ == '__main__':
351
+ main_body()
requirements.txt ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ numpy
2
+ soundfile
3
+ librosa
4
+ tempfile
5
+ random
6
+ argparse
7
+ ast
8
+ configparser
9
+ itertools
10
+ multiprocessing
11
+ glob
12
+ os
13
+ pandas
14
+ math
15
+ logging
16
+ onnxruntime
unit_tests_synthesizer.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import soundfile as sf
3
+ import glob
4
+ import argparse
5
+ import os
6
+ import utils
7
+ import configparser as CP
8
+
9
+ LOW_ENERGY_THRESH = -60
10
+
11
+ def test_snr(clean, noise, expected_snr, snrtolerance=2):
12
+ '''Test for SNR
13
+ Note: It is not applicable for Segmental SNR'''
14
+ rmsclean = (clean**2).mean()**0.5
15
+ rmsnoise = (noise**2).mean()**0.5
16
+ actual_snr = 20*np.log10(rmsclean/rmsnoise)
17
+ return actual_snr > (expected_snr-snrtolerance) and actual_snr < (expected_snr+snrtolerance)
18
+
19
+ def test_normalization(audio, expected_rms=-25, normtolerance=2):
20
+ '''Test for Normalization
21
+ Note: Set it to False if different target levels are used'''
22
+ rmsaudio = (audio**2).mean()**0.5
23
+ rmsaudiodb = 20*np.log10(rmsaudio)
24
+ return rmsaudiodb > (expected_rms-normtolerance) and rmsaudiodb < (expected_rms+normtolerance)
25
+
26
+ def test_samplingrate(sr, expected_sr=16000):
27
+ '''Test to ensure all clips have same sampling rate'''
28
+ return expected_sr == sr
29
+
30
+ def test_clipping(audio, num_consecutive_samples=3, clipping_threshold=0.01):
31
+ '''Test to detect clipping'''
32
+ clipping = False
33
+ for i in range(0, len(audio)-num_consecutive_samples-1):
34
+ audioseg = audio[i:i+num_consecutive_samples]
35
+ if abs(max(audioseg)-min(audioseg)) < clipping_threshold or abs(max(audioseg)) >= 1:
36
+ clipping = True
37
+ break
38
+ return clipping
39
+
40
+ def test_zeros_beg_end(audio, num_zeros=16000, low_energy_thresh=LOW_ENERGY_THRESH):
41
+ '''Test if there are zeros in the beginning and the end of the signal'''
42
+ beg_segment_energy = 20*np.log10(audio[:num_zeros]**2).mean()**0.5
43
+ end_segment_energy = 20*np.log10(audio[-num_zeros:]**2).mean()**0.5
44
+ return beg_segment_energy < low_energy_thresh or end_segment_energy < low_energy_thresh
45
+
46
+ def adsp_filtering_test(adsp, without_adsp):
47
+ diff = adsp - without_adsp
48
+ if any(val >0.0001 for val in diff):
49
+
50
+
51
+ if __name__=='__main__':
52
+ parser = argparse.ArgumentParser()
53
+ parser.add_argument('--cfg', default='noisyspeech_synthesizer.cfg')
54
+ parser.add_argument('--cfg_str', type=str, default='noisy_speech')
55
+
56
+ args = parser.parse_args()
57
+
58
+ cfgpath = os.path.join(os.path.dirname(__file__), args.cfg)
59
+ assert os.path.exists(cfgpath), f'No configuration file as [{cfgpath}]'
60
+
61
+ cfg = CP.ConfigParser()
62
+ cfg._interpolation = CP.ExtendedInterpolation()
63
+ cfg.read(cfgpath)
64
+ cfg = cfg._sections[args.cfg_str]
65
+
66
+ noisydir = cfg['noisy_train']
67
+ cleandir = cfg['clean_train']
68
+ noisedir = cfg['noise_train']
69
+ audioformat = cfg['audioformat']
70
+
71
+ # List of noisy speech files
72
+ noisy_speech_filenames_big = glob.glob(os.path.join(noisydir, audioformat))
73
+ noisy_speech_filenames = noisy_speech_filenames_big[0:10]
74
+ # Initialize the lists
75
+ noisy_filenames_list = []
76
+ clean_filenames_list = []
77
+ noise_filenames_list = []
78
+ snr_results_list =[]
79
+ clean_norm_results_list = []
80
+ noise_norm_results_list = []
81
+ noisy_norm_results_list = []
82
+ clean_sr_results_list = []
83
+ noise_sr_results_list = []
84
+ noisy_sr_results_list = []
85
+ clean_clipping_results_list = []
86
+ noise_clipping_results_list = []
87
+ noisy_clipping_results_list = []
88
+
89
+ skipped_string = 'Skipped'
90
+ # Initialize the counters for stats
91
+ total_clips = len(noisy_speech_filenames)
92
+
93
+
94
+ for noisypath in noisy_speech_filenames:
95
+ # To do: add right paths to clean filename and noise filename
96
+ noisy_filename = os.path.basename(noisypath)
97
+ clean_filename = 'clean_fileid_'+os.path.splitext(noisy_filename)[0].split('fileid_')[1]+'.wav'
98
+ cleanpath = os.path.join(cleandir, clean_filename)
99
+ noise_filename = 'noise_fileid_'+os.path.splitext(noisy_filename)[0].split('fileid_')[1]+'.wav'
100
+ noisepath = os.path.join(noisedir, noise_filename)
101
+
102
+ noisy_filenames_list.append(noisy_filename)
103
+ clean_filenames_list.append(clean_filename)
104
+ noise_filenames_list.append(noise_filename)
105
+
106
+ # Read clean, noise and noisy signals
107
+ clean_signal, fs_clean = sf.read(cleanpath)
108
+ noise_signal, fs_noise = sf.read(noisepath)
109
+ noisy_signal, fs_noisy = sf.read(noisypath)
110
+
111
+ # SNR Test
112
+ # To do: add right path split to extract SNR
113
+ if utils.str2bool(cfg['snr_test']):
114
+ snr = int(noisy_filename.split('_snr')[1].split('_')[0])
115
+ snr_results_list.append(str(test_snr(clean=clean_signal, \
116
+ noise=noise_signal, expected_snr=snr)))
117
+ else:
118
+ snr_results_list.append(skipped_string)
119
+
120
+ # Normalization test
121
+ if utils.str2bool(cfg['norm_test']):
122
+ tl = int(noisy_filename.split('_tl')[1].split('_')[0])
123
+ clean_norm_results_list.append(str(test_normalization(clean_signal)))
124
+ noise_norm_results_list.append(str(test_normalization(noise_signal)))
125
+ noisy_norm_results_list.append(str(test_normalization(noisy_signal, expected_rms=tl)))
126
+ else:
127
+ clean_norm_results_list.append(skipped_string)
128
+ noise_norm_results_list.append(skipped_string)
129
+ noisy_norm_results_list.append(skipped_string)
130
+
131
+ # Sampling rate test
132
+ if utils.str2bool(cfg['sampling_rate_test']):
133
+ clean_sr_results_list.append(str(test_samplingrate(sr=fs_clean)))
134
+ noise_sr_results_list.append(str(test_samplingrate(sr=fs_noise)))
135
+ noisy_sr_results_list.append(str(test_samplingrate(sr=fs_noisy)))
136
+ else:
137
+ clean_sr_results_list.append(skipped_string)
138
+ noise_sr_results_list.append(skipped_string)
139
+ noisy_sr_results_list.append(skipped_string)
140
+
141
+ # Clipping test
142
+ if utils.str2bool(cfg['clipping_test']):
143
+ clean_clipping_results_list.append(str(test_clipping(audio=clean_signal)))
144
+ noise_clipping_results_list.append(str(test_clipping(audio=noise_signal)))
145
+ noisy_clipping_results_list.append(str(test_clipping(audio=noisy_signal)))
146
+ else:
147
+ clean_clipping_results_list.append(skipped_string)
148
+ noise_clipping_results_list.append(skipped_string)
149
+ noisy_clipping_results_list.append(skipped_string)
150
+
151
+ # Stats
152
+ pc_snr_passed = round(snr_results_list.count('True')/total_clips*100, 1)
153
+ pc_clean_norm_passed = round(clean_norm_results_list.count('True')/total_clips*100, 1)
154
+ pc_noise_norm_passed = round(noise_norm_results_list.count('True')/total_clips*100, 1)
155
+ pc_noisy_norm_passed = round(noisy_norm_results_list.count('True')/total_clips*100, 1)
156
+ pc_clean_sr_passed = round(clean_sr_results_list.count('True')/total_clips*100, 1)
157
+ pc_noise_sr_passed = round(noise_sr_results_list.count('True')/total_clips*100, 1)
158
+ pc_noisy_sr_passed = round(noisy_sr_results_list.count('True')/total_clips*100, 1)
159
+ pc_clean_clipping_passed = round(clean_clipping_results_list.count('True')/total_clips*100, 1)
160
+ pc_noise_clipping_passed = round(noise_clipping_results_list.count('True')/total_clips*100, 1)
161
+ pc_noisy_clipping_passed = round(noisy_clipping_results_list.count('True')/total_clips*100, 1)
162
+
163
+ print('% clips that passed SNR test:', pc_snr_passed)
164
+
165
+ print('% clean clips that passed Normalization tests:', pc_clean_norm_passed)
166
+ print('% noise clips that passed Normalization tests:', pc_noise_norm_passed)
167
+ print('% noisy clips that passed Normalization tests:', pc_noisy_norm_passed)
168
+
169
+ print('% clean clips that passed Sampling Rate tests:', pc_clean_sr_passed)
170
+ print('% noise clips that passed Sampling Rate tests:', pc_noise_sr_passed)
171
+ print('% noisy clips that passed Sampling Rate tests:', pc_noisy_sr_passed)
172
+
173
+ print('% clean clips that passed Clipping tests:', pc_clean_clipping_passed)
174
+ print('% noise clips that passed Clipping tests:', pc_noise_clipping_passed)
175
+ print('% noisy clips that passed Clipping tests:', pc_noisy_clipping_passed)
176
+
177
+ log_dir = utils.get_dir(cfg, 'unit_tests_log_dir', 'Unit_tests_logs')
178
+
179
+ if not os.path.exists(log_dir):
180
+ log_dir = os.path.join(os.path.dirname(__file__), 'Unit_tests_logs')
181
+ os.makedirs(log_dir)
182
+
183
+ utils.write_log_file(log_dir, 'unit_test_results.csv', [noisy_filenames_list, clean_filenames_list, \
184
+ noise_filenames_list, snr_results_list, clean_norm_results_list, noise_norm_results_list, \
185
+ noisy_norm_results_list, clean_sr_results_list, noise_sr_results_list, noisy_sr_results_list, \
186
+ clean_clipping_results_list, noise_clipping_results_list, noisy_clipping_results_list])
utils.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Created on Fri Nov 1 10:28:41 2019
4
+
5
+ @author: rocheng
6
+ """
7
+ import os
8
+ import csv
9
+ from shutil import copyfile
10
+ import glob
11
+
12
+ def get_dir(cfg, param_name, new_dir_name):
13
+ '''Helper function to retrieve directory name if it exists,
14
+ create it if it doesn't exist'''
15
+
16
+ if param_name in cfg:
17
+ dir_name = cfg[param_name]
18
+ else:
19
+ dir_name = os.path.join(os.path.dirname(__file__), new_dir_name)
20
+ if not os.path.exists(dir_name):
21
+ os.makedirs(dir_name)
22
+ return dir_name
23
+
24
+
25
+ def write_log_file(log_dir, log_filename, data):
26
+ '''Helper function to write log file'''
27
+ data = zip(*data)
28
+ with open(os.path.join(log_dir, log_filename), mode='w', newline='') as csvfile:
29
+ csvwriter = csv.writer(csvfile, delimiter=' ',
30
+ quotechar='|', quoting=csv.QUOTE_MINIMAL)
31
+ for row in data:
32
+ csvwriter.writerow([row])
33
+
34
+
35
+ def str2bool(string):
36
+ return string.lower() in ("yes", "true", "t", "1")
37
+
38
+
39
+ def rename_copyfile(src_path, dest_dir, prefix='', ext='*.wav'):
40
+ srcfiles = glob.glob(f"{src_path}/"+ext)
41
+ for i in range(len(srcfiles)):
42
+ dest_path = os.path.join(dest_dir, prefix+'_'+os.path.basename(srcfiles[i]))
43
+ copyfile(srcfiles[i], dest_path)
44
+
45
+
46
+