Datasets:

Modalities:
Audio
Text
Formats:
parquet
Languages:
French
ArXiv:
DOI:
Libraries:
Datasets
Dask
License:
This view is limited to 50 files because it contains too many changes.  See the raw diff here.
Files changed (50) hide show
  1. .gitattributes +55 -0
  2. README.md +0 -584
  3. speech_clean/test-00000-of-00032.parquet → data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiN_00008590.rf64 +2 -2
  4. speechless_noisy/train-00002-of-00050.parquet → data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiN_00008591.rf64 +2 -2
  5. speech_clean/test-00001-of-00032.parquet → data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiN_00008592.rf64 +2 -2
  6. speech_clean/test-00002-of-00032.parquet → data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiN_00008593.rf64 +2 -2
  7. speech_clean/test-00003-of-00032.parquet → data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiN_00008594.rf64 +2 -2
  8. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiN_00008595.rf64 +3 -0
  9. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008437.rf64 +3 -0
  10. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008438.rf64 +3 -0
  11. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008440.rf64 +3 -0
  12. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008442.rf64 +3 -0
  13. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008443.rf64 +3 -0
  14. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008444.rf64 +3 -0
  15. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008445.rf64 +3 -0
  16. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008451.rf64 +3 -0
  17. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008453.rf64 +3 -0
  18. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008454.rf64 +3 -0
  19. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008459.rf64 +3 -0
  20. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008460.rf64 +3 -0
  21. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008462.rf64 +3 -0
  22. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008463.rf64 +3 -0
  23. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008464.rf64 +3 -0
  24. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008465.rf64 +3 -0
  25. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008468.rf64 +3 -0
  26. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008469.rf64 +3 -0
  27. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008470.rf64 +3 -0
  28. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008473.rf64 +3 -0
  29. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008474.rf64 +3 -0
  30. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008475.rf64 +3 -0
  31. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008477.rf64 +3 -0
  32. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008481.rf64 +3 -0
  33. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008486.rf64 +3 -0
  34. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008487.rf64 +3 -0
  35. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008489.rf64 +3 -0
  36. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008490.rf64 +3 -0
  37. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008492.rf64 +3 -0
  38. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008495.rf64 +3 -0
  39. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008496.rf64 +3 -0
  40. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008499.rf64 +3 -0
  41. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008500.rf64 +3 -0
  42. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008502.rf64 +3 -0
  43. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008503.rf64 +3 -0
  44. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008506.rf64 +3 -0
  45. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008510.rf64 +3 -0
  46. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008512.rf64 +3 -0
  47. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008513.rf64 +3 -0
  48. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008516.rf64 +3 -0
  49. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008517.rf64 +3 -0
  50. data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008520.rf64 +3 -0
.gitattributes CHANGED
@@ -53,3 +53,58 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiN_00008590.rf64 filter=lfs diff=lfs merge=lfs -text
57
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiN_00008591.rf64 filter=lfs diff=lfs merge=lfs -text
58
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiN_00008592.rf64 filter=lfs diff=lfs merge=lfs -text
59
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiN_00008593.rf64 filter=lfs diff=lfs merge=lfs -text
60
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiN_00008594.rf64 filter=lfs diff=lfs merge=lfs -text
61
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiN_00008595.rf64 filter=lfs diff=lfs merge=lfs -text
62
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008437.rf64 filter=lfs diff=lfs merge=lfs -text
63
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008438.rf64 filter=lfs diff=lfs merge=lfs -text
64
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008440.rf64 filter=lfs diff=lfs merge=lfs -text
65
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008442.rf64 filter=lfs diff=lfs merge=lfs -text
66
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008443.rf64 filter=lfs diff=lfs merge=lfs -text
67
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008444.rf64 filter=lfs diff=lfs merge=lfs -text
68
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008445.rf64 filter=lfs diff=lfs merge=lfs -text
69
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008451.rf64 filter=lfs diff=lfs merge=lfs -text
70
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008453.rf64 filter=lfs diff=lfs merge=lfs -text
71
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008454.rf64 filter=lfs diff=lfs merge=lfs -text
72
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008459.rf64 filter=lfs diff=lfs merge=lfs -text
73
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008460.rf64 filter=lfs diff=lfs merge=lfs -text
74
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008462.rf64 filter=lfs diff=lfs merge=lfs -text
75
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008463.rf64 filter=lfs diff=lfs merge=lfs -text
76
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008464.rf64 filter=lfs diff=lfs merge=lfs -text
77
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008465.rf64 filter=lfs diff=lfs merge=lfs -text
78
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008468.rf64 filter=lfs diff=lfs merge=lfs -text
79
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008469.rf64 filter=lfs diff=lfs merge=lfs -text
80
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008470.rf64 filter=lfs diff=lfs merge=lfs -text
81
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008473.rf64 filter=lfs diff=lfs merge=lfs -text
82
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008474.rf64 filter=lfs diff=lfs merge=lfs -text
83
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008475.rf64 filter=lfs diff=lfs merge=lfs -text
84
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008477.rf64 filter=lfs diff=lfs merge=lfs -text
85
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008481.rf64 filter=lfs diff=lfs merge=lfs -text
86
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008486.rf64 filter=lfs diff=lfs merge=lfs -text
87
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008487.rf64 filter=lfs diff=lfs merge=lfs -text
88
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008489.rf64 filter=lfs diff=lfs merge=lfs -text
89
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008490.rf64 filter=lfs diff=lfs merge=lfs -text
90
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008492.rf64 filter=lfs diff=lfs merge=lfs -text
91
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008495.rf64 filter=lfs diff=lfs merge=lfs -text
92
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008496.rf64 filter=lfs diff=lfs merge=lfs -text
93
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008499.rf64 filter=lfs diff=lfs merge=lfs -text
94
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008500.rf64 filter=lfs diff=lfs merge=lfs -text
95
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008502.rf64 filter=lfs diff=lfs merge=lfs -text
96
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008503.rf64 filter=lfs diff=lfs merge=lfs -text
97
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008506.rf64 filter=lfs diff=lfs merge=lfs -text
98
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008510.rf64 filter=lfs diff=lfs merge=lfs -text
99
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008512.rf64 filter=lfs diff=lfs merge=lfs -text
100
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008513.rf64 filter=lfs diff=lfs merge=lfs -text
101
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008516.rf64 filter=lfs diff=lfs merge=lfs -text
102
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008517.rf64 filter=lfs diff=lfs merge=lfs -text
103
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008520.rf64 filter=lfs diff=lfs merge=lfs -text
104
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008521.rf64 filter=lfs diff=lfs merge=lfs -text
105
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008522.rf64 filter=lfs diff=lfs merge=lfs -text
106
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008524.rf64 filter=lfs diff=lfs merge=lfs -text
107
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008528.rf64 filter=lfs diff=lfs merge=lfs -text
108
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008531.rf64 filter=lfs diff=lfs merge=lfs -text
109
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008534.rf64 filter=lfs diff=lfs merge=lfs -text
110
+ data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008542.rf64 filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,587 +1,3 @@
1
  ---
2
- annotations_creators:
3
- - expert-generated
4
- language_creators:
5
- - crowdsourced
6
- - expert-generated
7
- language:
8
- - fr
9
  license: cc-by-4.0
10
- multilinguality:
11
- - monolingual
12
- size_categories:
13
- - 100K<n<1M
14
- source_datasets: []
15
- task_categories:
16
- - audio-to-audio
17
- - automatic-speech-recognition
18
- - audio-classification
19
- - text-to-speech
20
- task_ids:
21
- - speaker-identification
22
- pretty_name: Vibravox
23
- viewer: true
24
- dataset_info:
25
- - config_name: speech_clean
26
- features:
27
- - name: audio.headset_microphone
28
- dtype: audio
29
- - name: audio.forehead_accelerometer
30
- dtype: audio
31
- - name: audio.soft_in_ear_microphone
32
- dtype: audio
33
- - name: audio.rigid_in_ear_microphone
34
- dtype: audio
35
- - name: audio.temple_vibration_pickup
36
- dtype: audio
37
- - name: audio.throat_microphone
38
- dtype: audio
39
- - name: gender
40
- dtype: string
41
- - name: speaker_id
42
- dtype: string
43
- - name: sentence_id
44
- dtype: int64
45
- - name: duration
46
- dtype: float64
47
- - name: raw_text
48
- dtype: string
49
- - name: normalized_text
50
- dtype: string
51
- - name: phonemized_text
52
- dtype: string
53
- splits:
54
- - name: train
55
- num_bytes: 109247789463.0
56
- num_examples: 20981
57
- - name: validation
58
- num_bytes: 12896618986.0
59
- num_examples: 2523
60
- - name: test
61
- num_bytes: 15978915932.0
62
- num_examples: 3064
63
- download_size: 136955541722
64
- dataset_size: 138123324381.0
65
- - config_name: speech_noisy
66
- features:
67
- - name: audio.headset_microphone
68
- dtype: audio
69
- - name: audio.forehead_accelerometer
70
- dtype: audio
71
- - name: audio.soft_in_ear_microphone
72
- dtype: audio
73
- - name: audio.rigid_in_ear_microphone
74
- dtype: audio
75
- - name: audio.temple_vibration_pickup
76
- dtype: audio
77
- - name: audio.throat_microphone
78
- dtype: audio
79
- - name: gender
80
- dtype: string
81
- - name: speaker_id
82
- dtype: string
83
- - name: sentence_id
84
- dtype: int64
85
- - name: duration
86
- dtype: float64
87
- - name: raw_text
88
- dtype: string
89
- - name: normalized_text
90
- dtype: string
91
- - name: phonemized_text
92
- dtype: string
93
- splits:
94
- - name: train
95
- num_bytes: 6522270562.0
96
- num_examples: 1220
97
- - name: validation
98
- num_bytes: 706141725.0
99
- num_examples: 132
100
- - name: test
101
- num_bytes: 937186370.0
102
- num_examples: 175
103
- download_size: 8156941693
104
- dataset_size: 8165598657.0
105
- - config_name: speechless_clean
106
- features:
107
- - name: audio.headset_microphone
108
- dtype: audio
109
- - name: audio.forehead_accelerometer
110
- dtype: audio
111
- - name: audio.soft_in_ear_microphone
112
- dtype: audio
113
- - name: audio.rigid_in_ear_microphone
114
- dtype: audio
115
- - name: audio.temple_vibration_pickup
116
- dtype: audio
117
- - name: audio.throat_microphone
118
- dtype: audio
119
- - name: gender
120
- dtype: string
121
- - name: speaker_id
122
- dtype: string
123
- - name: duration
124
- dtype: float64
125
- splits:
126
- - name: train
127
- num_bytes: 9285823162.0
128
- num_examples: 149
129
- - name: validation
130
- num_bytes: 1121767128.0
131
- num_examples: 18
132
- - name: test
133
- num_bytes: 1308782974.0
134
- num_examples: 21
135
- download_size: 10651939843
136
- dataset_size: 11716373264.0
137
- - config_name: speechless_noisy
138
- features:
139
- - name: audio.headset_microphone
140
- dtype: audio
141
- - name: audio.forehead_accelerometer
142
- dtype: audio
143
- - name: audio.soft_in_ear_microphone
144
- dtype: audio
145
- - name: audio.rigid_in_ear_microphone
146
- dtype: audio
147
- - name: audio.temple_vibration_pickup
148
- dtype: audio
149
- - name: audio.throat_microphone
150
- dtype: audio
151
- - name: gender
152
- dtype: string
153
- - name: speaker_id
154
- dtype: string
155
- - name: duration
156
- dtype: float64
157
- splits:
158
- - name: train
159
- num_bytes: 24723250192.0
160
- num_examples: 149
161
- - name: validation
162
- num_bytes: 2986606278.0
163
- num_examples: 18
164
- - name: test
165
- num_bytes: 3484522468.0
166
- num_examples: 21
167
- download_size: 30881658818
168
- dataset_size: 31194378938.0
169
- configs:
170
- - config_name: speech_clean
171
- data_files:
172
- - split: train
173
- path: speech_clean/train-*
174
- - split: validation
175
- path: speech_clean/validation-*
176
- - split: test
177
- path: speech_clean/test-*
178
- - config_name: speech_noisy
179
- data_files:
180
- - split: train
181
- path: speech_noisy/train-*
182
- - split: validation
183
- path: speech_noisy/validation-*
184
- - split: test
185
- path: speech_noisy/test-*
186
- - config_name: speechless_clean
187
- data_files:
188
- - split: train
189
- path: speechless_clean/train-*
190
- - split: validation
191
- path: speechless_clean/validation-*
192
- - split: test
193
- path: speechless_clean/test-*
194
- - config_name: speechless_noisy
195
- data_files:
196
- - split: train
197
- path: speechless_noisy/train-*
198
- - split: validation
199
- path: speechless_noisy/validation-*
200
- - split: test
201
- path: speechless_noisy/test-*
202
  ---
203
-
204
-
205
-
206
- # Dataset Card for VibraVox
207
-
208
- <p align="center">
209
- <img src="https://cdn-uploads.huggingface.co/production/uploads/65302a613ecbe51d6a6ddcec/zhB1fh-c0pjlj-Tr4Vpmr.png" style="object-fit:contain; width:280px; height:280px;" >
210
- </p>
211
-
212
- ---
213
-
214
- 👀 While waiting for the [TooBigContentError issue](https://github.com/huggingface/dataset-viewer/issues/2215) to be resolved by the HuggingFace team, you can explore the dataset viewer of [vibravox-test](https://huggingface.co/datasets/Cnam-LMSSC/vibravox-test)
215
- which has exactly the same architecture.
216
-
217
- ## DATASET SUMMARY
218
-
219
- The [VibraVox dataset](https://vibravox.cnam.fr) is a general purpose audio dataset of french speech captured with body-conduction transducers.
220
- This dataset can be used for various audio machine learning tasks :
221
- - **Automatic Speech Recognition (ASR)** (Speech-to-Text , Speech-to-Phoneme)
222
- - **Audio Bandwidth Extension (BWE)**
223
- - **Speaker Verification (SPKV)** / identification
224
- - **Voice cloning**
225
- - etc ...
226
-
227
-
228
- ### Dataset usage
229
-
230
- VibraVox contains 4 subsets, corresponding to different situations tailored for specific tasks. To load a specific subset simply use the following command (```subset``` can be any of the following : ``` "speech_clean" ``` , ``` "speech_noisy" ``` , ``` "speechless_clean" ``` , ``` "speechless_noisy" ```):
231
-
232
- ```python
233
- from datasets import load_dataset
234
- subset = "speech_clean"
235
- vibravox = load_dataset("Cnam-LMSSC/vibravox", subset)
236
- ```
237
-
238
- The dataset is also compatible with the `streaming` mode:
239
-
240
- ```python
241
- from datasets import load_dataset
242
- subset = "speech_clean"
243
- vibravox = load_dataset("Cnam-LMSSC/vibravox", subset, streaming=True)
244
- ```
245
-
246
- ### Citations, links and details
247
-
248
-
249
- - **Homepage:** For more information about the project, visit our project page on [https://vibravox.cnam.fr](https://vibravox.cnam.fr)
250
- - **Github repository:** [jhauret/vibravox](https://github.com/jhauret/vibravox) : Source code for ASR, BWE and SPKV tasks using the Vibravox dataset
251
- - **Point of Contact:** [Julien Hauret](https://www.linkedin.com/in/julienhauret/) and [Éric Bavu](https://acoustique.cnam.fr/contacts/bavu/en/#contact)
252
- - **Curated by:** [AVA Team](https://lmssc.cnam.fr/fr/recherche/identification-localisation-synthese-de-sources-acoustiques-et-vibratoires) of the [LMSSC Research Laboratory](https://lmssc.cnam.fr)
253
- - **Funded by:** [Agence Nationale Pour la Recherche / AHEAD Project](https://anr.fr/en/funded-projects-and-impact/funded-projects/project/funded/project/b2d9d3668f92a3b9fbbf7866072501ef-5aac4914c7/?tx_anrprojects_funded%5Bcontroller%5D=Funded&cHash=fa352121b44b60bf6a5917180d5205e6)
254
- - **Language:** French
255
- - **Download size** : 186.64 GB
256
- - **Total audio duration** : 38.31 hours (x6 audio channels)
257
- - **Number of speech utterances** : 28,095
258
- - **License:** Creative Commons Attributions 4.0
259
-
260
- I you use the Vibravox dataset for research, **cite this paper** :
261
-
262
- ```bibtex
263
- @article{jhauret-et-al-2024-vibravox,
264
- title={{Vibravox: A Dataset of French Speech Captured with Body-conduction Audio Sensors}},
265
- author={Hauret, Julien and Olivier, Malo and Joubaud, Thomas and Langrenne, Christophe and
266
- Poir{\'e}e, Sarah and Zimpfer, Véronique and Bavu, {\'E}ric},
267
- year={2024},
268
- eprint={2407.11828},
269
- archivePrefix={arXiv},
270
- primaryClass={eess.AS},
271
- url={https://arxiv.org/abs/2407.11828},
272
- }
273
- ```
274
-
275
- **and this repository**, which is linked to a DOI :
276
-
277
- ```bibtex
278
- @misc{cnamlmssc2024vibravoxdataset,
279
- author={Hauret, Julien and Olivier, Malo and Langrenne, Christophe and
280
- Poir{\'e}e, Sarah and Bavu, {\'E}ric},
281
- title = { {Vibravox} (Revision 7990b7d) },
282
- year = 2024,
283
- url = { https://huggingface.co/datasets/Cnam-LMSSC/vibravox },
284
- doi = { 10.57967/hf/2727 },
285
- publisher = { Hugging Face }
286
- }
287
- ```
288
-
289
- ---
290
-
291
- ## SUPPORTED TASKS
292
- <!-- and Leaderboards -->
293
-
294
- ### Automatic-speech-recognition
295
-
296
- - The model is presented with an audio file and asked to transcribe the audio file to written text (either normalized text of phonemized text). The most common evaluation metrics are the word error rate (WER), character error rate (CER), or phoneme error rate (PER).
297
- - **Training code:** An example of implementation for the speech-to-phoneme task using [wav2vec2.0](https://arxiv.org/abs/2006.11477) is available on the [Vibravox Github repository](https://github.com/jhauret/vibravox).
298
- - **Trained models:** We also provide trained models for the speech-to-phoneme task for each of the 6 speech sensors of the Vibravox dataset on Huggingface at [Cnam-LMSSC/vibravox_phonemizers](https://huggingface.co/Cnam-LMSSC/vibravox_phonemizers)
299
-
300
- ### Bandwidth-extension
301
-
302
- - Also known as audio super-resolution, which is required to enhance the audio quality of body-conducted captured speech. The model is presented with a pair of audio clips (from a body-conducted captured speech, and from the corresponding clean, full bandwidth airborne-captured speech), and asked to enhance the audio by denoising and regenerating mid and high frequencies from low frequency content only.
303
- - **Training code:** An example of implementation of this task using [Configurable EBEN](https://ieeexplore.ieee.org/document/10244161) ([arXiv link](https://arxiv.org/abs/2303.10008)) is available on the [Vibravox Github repository](https://github.com/jhauret/vibravox).
304
- - **Trained models:** We also provide trained models for the BWE task for each of the 6 speech sensors of the Vibravox dataset on Huggingface at [Cnam-LMSSC/vibravox_EBEN_bwe_models](https://huggingface.co/Cnam-LMSSC/vibravox_EBEN_bwe_models).
305
- - **BWE-Enhanced dataset:** An EBEN-enhanced version of the `test`splits of the Vibravox dataset, generated using these 6 bwe models, is also available on Huggingface at [Cnam-LMSSC/vibravox_enhanced_by_EBEN](https://huggingface.co/datasets/Cnam-LMSSC/vibravox_enhanced_by_EBEN).
306
-
307
- ### Speaker-verification
308
-
309
- - Given an input audio clip and a reference audio clip of a known speaker, the model's objective is to compare the two clips and verify if they are from the same individual. This often involves extracting embeddings from a deep neural network trained on a large dataset of voices. The model then measures the similarity between these feature sets using techniques like cosine similarity or a learned distance metric. This task is crucial in applications requiring secure access control, such as biometric authentication systems, where a person's voice acts as a unique identifier.
310
- - **Testing code:** An example of implementation of this task using a pretrained [ECAPA2 model](https://arxiv.org/abs/2401.08342) is available on the [Vibravox Github repository](https://github.com/jhauret/vibravox).
311
-
312
-
313
- ### Adding your models for supported tasks or contributing for new tasks
314
-
315
- Feel free to contribute at the [Vibravox Github repository](https://github.com/jhauret/vibravox), by following the [contributor guidelines](https://github.com/jhauret/vibravox/blob/main/CONTRIBUTING.md).
316
-
317
- ---
318
-
319
- ## DATASET DETAILS
320
-
321
- ### Dataset Description
322
-
323
- VibraVox ([vibʁavɔks]) is a GDPR-compliant dataset scheduled released in June 2024. It includes speech recorded simultaneously using multiple audio and vibration sensors (from top to bottom on the following figure) :
324
-
325
- - a forehead miniature vibration sensor (green)
326
- - an in-ear comply foam-embedded microphone (red)
327
- - an in-ear rigid earpiece-embedded microphone (blue)
328
- - a temple vibration pickup (cyan)
329
- - a headset microphone located near the mouth (purple)
330
- - a laryngophone (orange)
331
-
332
- The technology and references of each sensor is described and documented in [the dataset creation](#dataset-creation) section and [https://vibravox.cnam.fr/documentation/hardware/](https://vibravox.cnam.fr/documentation/hardware).
333
-
334
- <p align="center">
335
- <img src="https://cdn-uploads.huggingface.co/production/uploads/6390fc80e6d656eb421bab69/P-_IWM3IMED5RBS3Lhydc.png" />
336
- </p>
337
-
338
- ### Goals
339
-
340
- The VibraVox speech corpus has been recorded with 200 participants under various acoustic conditions imposed by a [5th order ambisonics spatialization sphere](https://vibravox.cnam.fr/documentation/hardware/sphere/index.html).
341
-
342
- VibraVox aims at serving as a valuable resource for advancing the field of **body-conducted speech analysis** and facilitating the development of **robust communication systems for real-world applications**.
343
-
344
- Unlike traditional microphones, which rely on airborne sound waves, body-conduction sensors capture speech signals directly from the body, offering advantages in noisy environments by eliminating the capture of ambient noise. Although body-conduction sensors have been available for decades, their limited bandwidth has restricted their widespread usage. However, this may be the awakening of this technology to a wide public for speech capture and communication in noisy environments.
345
-
346
- ### Data / sensor mapping
347
-
348
- Even if the names of the columns in Vibravox dataset are self-explanatory, here is the mapping, with informations on the positioning of sensors and their technology :
349
-
350
- | Vibravox dataset column name | Sensor | Location | Technology |
351
- |:------------------------------------ |:------------------------------------------ |:---------------- |:-------------------------------------------------- |
352
- | ```audio.headset_microphone``` | Headset microphone | Near the mouth | Cardioid electrodynamic microphone |
353
- | ```audio.throat_microphone``` | Laryngophone | Throat / Larynx | Piezoelectric sensor |
354
- | ```audio.soft_in_ear_microphone``` | In-ear soft foam-embedded microphone | Right ear canal | Omnidirectional electret condenser microphone |
355
- | ```audio.rigid_in_ear_microphone``` | In-ear rigid earpiece-embedded microphone | Left ear-canal | Omnidirectional MEMS microphone |
356
- | ```audio.forehead_accelerometer``` | Forehead vibration sensor | Frontal bone | One-axis accelerometer |
357
- | ```audio.temple_vibration_pickup``` | Temple vibration pickup | Zygomatic bone | Figure of-eight pre-polarized condenser transducer |
358
-
359
-
360
- ---
361
-
362
- ## DATASET STRUCTURE
363
-
364
- ### Subsets
365
-
366
- Each of the 4 subsets contain **6 columns of audio data**, corresponding to the 5 different body conduction sensors, plus the standard headset microphone.
367
-
368
- Recording was carried out simultaneously on all 6 sensors, **audio files being sampled at 48 kHz and encoded as .wav PCM32 files**.
369
-
370
- The 4 subsets correspond to :
371
-
372
- - **```speech_clean```** : the speaker reads sentences sourced from the French Wikipedia. This split contains the most data for training for various tasks.
373
-
374
- - **```speech_noisy```** : the speaker reads sentences sourced from the French Wikipedia, in a noisy environment based on ambisonic recordings replayed in a spatialization sphere equipped with 56 loudspeakers surrounding the speaker. This will primarily serve to test the different systems (Speech Enhancement, Automatic Speech Recognition, Speaker Verification) that will be developed based on the recordings from the first three phases. It is primarily intended for testing the various systems (speech enhancement, automatic speech recognition, speaker verification) that will be developed on the basis of the recordings from ```speech_clean```.
375
-
376
- - **```speechless_clean```** : wearer of the devices remains speechless in a complete silence, but are free to move their bodies and faces, and can swallow and breathe naturally. This configuration can be conveniently used to generate synthetic datasets with realistic physiological (and sensor-inherent) noise captured by body-conduction sensors. These samples can be valuable for tasks such as heart rate tracking or simply analyzing the noise properties of the various microphones, but also to generate synthetic datasets with realistic physiological (and sensor-inherent) noise captured by body-conduction sensors.
377
-
378
- - **```speechless_noisy```** : wearer of the devices remains speechless in a noisy environment created using [AudioSet](https://research.google.com/audioset/) noise samples. These samples have been selected from relevant classes, normalized in loudness, pseudo-spatialized and are played from random directions around the participant using [5th order ambisonic 3D sound spatializer](https://vibravox.cnam.fr/documentation/hardware/sphere/index.html) equipped with 56 loudspeakers. The objective of this split is to gather background noises that can be combined with the `speech_clean` recordings to maintain a clean reference. This allows to use those samples for **realistic data-augmentation** using noise captured by body-conduction sensors, with the inherent attenuation of each sensors on different device wearers.
379
-
380
-
381
- ### Splits
382
-
383
- All the subsets are available in 3 splits (train, validation and test), with a standard 80% / 10% / 10% repartition, without overlapping any speaker in each split.
384
-
385
- The speakers / participants in specific splits are the same for each subset, thus allowing to:
386
-
387
- - use the `speechless_noisy` for data augmentation for example
388
- - test on the `speech_noisy` testset your models trained on the `speech_clean` trainset without having to worry that a speaker would have been presented in the training phase.
389
-
390
- ### Data Fields
391
-
392
- In non-streaming mode (default), the path value of all dataset. Audio dictionnary points to the locally extracted audio. In streaming mode, the path is the relative path of an audio inside its archive (as files are not downloaded and extracted locally).
393
-
394
- **Common Data Fields for all subsets :**
395
-
396
- * `audio.headset_microphone` (datasets.Audio) - a dictionary containing the path to the audio recorded by the headset microphone, the decoded (mono) audio array, and the sampling rate.
397
- * `audio.forehead_accelerometer` (datasets.Audio) - a dictionary containing the path to the audio recorded by the forehead miniature accelerometer, the decoded (mono) audio array, and the sampling rate.
398
- * `audio.soft_in_ear_microphone` (datasets.Audio) - a dictionary containing the path to the audio recorded by the in-ear soft foam-embedded microphone, the decoded (mono) audio array, and the sampling rate.
399
- * `audio.rigid_in_ear_microphone` (datasets.Audio) - a dictionary containing the path to the audio recorded by the in-ear rigid earpiece-embedded microphone, the decoded (mono) audio array, and the sampling rate.
400
- * `audio.temple_vibration_pickup` (datasets.Audio) - a dictionary containing the path to the audio recorded by the temple vibration pickup, the decoded (mono) audio array, and the sampling rate.
401
- * `audio.throat_microphone` (datasets.Audio) - a dictionary containing the path to the audio recorded by the piezeoelectric laryngophone, the decoded (mono) audio array, and the sampling rate.
402
- * `gender` (string) - gender of speaker (```male```or ```female```)
403
- * `speaker_id` (string) - encrypted id of speaker
404
- * `duration` (float32) - the audio length in seconds.
405
-
406
-
407
- **Extra Data Fields for `speech_clean` and `speech_noisy` splits:**
408
-
409
- For **speech** subsets, the datasets has columns corresponding to the pronounced sentences, which are absent of the **speechless** subsets :
410
-
411
- * `sentence_id` (int) - id of the pronounced sentence
412
- * `raw_text` (string) - audio segment text (cased and with punctuation preserved)
413
- * `normalized_text` (string) - audio segment normalized text (lower cased, no punctuation, diacritics replaced by standard 26 french alphabet letters, plus 3 accented characters : é,è,ê and ç -- which hold phonetic significance -- and the space character, which corresponds to 31 possible characters : ``` [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'ç', 'è', 'é', 'ê'] ```).
414
- * `phonemes` (string) - audio segment phonemized text using exclusively the strict french IPA (33) characters
415
-
416
-
417
- ### Phonemes list and tokenizer
418
-
419
- - The strict french IPA characters used in Vibravox are : ``` [' ', 'a', 'b', 'd', 'e', 'f', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 's', 't', 'u', 'v', 'w', 'y', 'z', 'ø', 'ŋ', 'œ', 'ɑ', 'ɔ', 'ə', 'ɛ', 'ɡ', 'ɲ', 'ʁ', 'ʃ', 'ʒ', '̃'] ```.
420
- - For convience and research reproducibility, we provide a tokenizer for speech-to-phonemes tasks that corresponds to those phonemes at [https://huggingface.co/Cnam-LMSSC/vibravox-phonemes-tokenizer](https://huggingface.co/Cnam-LMSSC/vibravox-phonemes-tokenizer).
421
-
422
-
423
- ### Examples of data Instances
424
-
425
- #### `speech_clean` or `speech_noisy` splits:
426
-
427
- ```python
428
- {
429
- 'audio.headset_mic': {
430
- 'path': '02472_headset_mic.wav',
431
- 'array': array([ 0.00045776, 0.00039673, 0.0005188 , ..., -0.00149536,
432
- -0.00094604, 0.00036621]),
433
- 'sampling_rate': 48000},
434
- 'audio.forehead_accelerometer': {
435
- 'path': '02472_forehead_accelerometer.wav',
436
- 'array': array([ 0.0010376 , -0.00045776, -0.00085449, ..., -0.00491333,
437
- -0.00524902, -0.00302124]),
438
- 'sampling_rate': 48000},
439
- 'audio.soft_in_ear_mic': {
440
- 'path': '02472_soft_in_ear_mic.wav',
441
- 'array': array([-0.06472778, -0.06384277, -0.06292725, ..., -0.02133179,
442
- -0.0213623 , -0.02145386]),
443
- 'sampling_rate': 48000},
444
- 'audio.rigid_in_ear_mic': {
445
- 'path': '02472_rigid_in_ear_mic.wav',
446
- 'array': array([-0.01824951, -0.01821899, -0.01812744, ..., -0.00387573,
447
- -0.00427246, -0.00439453]),
448
- 'sampling_rate': 48000},
449
- 'audio.temple_vibration_pickup':{
450
- 'path': '02472_temple_vibration_pickup.wav',
451
- 'array': array([-0.0177002 , -0.01791382, -0.01745605, ..., 0.01098633,
452
- 0.01260376, 0.01220703]),
453
- 'sampling_rate': 48000},
454
- 'audio.laryngophone': {
455
- 'path': '02472_laryngophone.wav',
456
- 'array': array([-2.44140625e-04, -3.05175781e-05, 2.13623047e-04, ...,
457
- 4.88281250e-04, 4.27246094e-04, 3.66210938e-04]),
458
- 'sampling_rate': 48000},
459
- 'gender': 'female',
460
- 'speaker_id': 'qt4TPMEPwF',
461
- 'sentence_id': 2472,
462
- 'duration': 4.5,
463
- 'raw_text': "Cette mémoire utilise le changement de phase du verre pour enregistrer l'information.",
464
- 'normalized_text': 'cette mémoire utilise le changement de phase du verre pour enregistrer l information',
465
- 'phonemized_text': 'sɛt memwaʁ ytiliz lə ʃɑ̃ʒmɑ̃ də faz dy vɛʁ puʁ ɑ̃ʁʒistʁe lɛ̃fɔʁmasjɔ̃'
466
- }
467
- ```
468
-
469
- #### `speechless_clean` or `speechless_noisy` splits
470
-
471
- (thus missing the text-related fields)
472
-
473
- ```python
474
- {
475
- 'audio.headset_mic': {
476
- 'path': 'jMngOy7BdQ_headset_mic.wav',
477
- 'array': array([-1.92260742e-03, -2.44140625e-03, -2.99072266e-03, ...,
478
- 0.00000000e+00, 3.05175781e-05, -3.05175781e-05]),
479
- 'sampling_rate': 48000},
480
- 'audio.forehead_accelerometer': {
481
- 'path': 'jMngOy7BdQ_forehead_accelerometer.wav',
482
- 'array': array([-0.0032959 , -0.00259399, 0.00177002, ..., -0.00073242,
483
- -0.00076294, -0.0005188 ]),
484
- 'sampling_rate': 48000},
485
- 'audio.soft_in_ear_mic': {
486
- 'path': 'jMngOy7BdQ_soft_in_ear_mic.wav',
487
- 'array': array([0.00653076, 0.00671387, 0.00683594, ..., 0.00045776, 0.00042725,
488
- 0.00042725]),
489
- 'sampling_rate': 48000},
490
- 'audio.rigid_in_ear_mic': {
491
- 'path': 'jMngOy7BdQ_rigid_in_ear_mic.wav',
492
- 'array': array([ 1.05895996e-02, 1.03759766e-02, 1.05590820e-02, ...,
493
- 0.00000000e+00, -3.05175781e-05, -9.15527344e-05]),
494
- 'sampling_rate': 48000},
495
- 'audio.temple_vibration_pickup': {
496
- 'path': 'jMngOy7BdQ_temple_vibration_pickup.wav',
497
- 'array': array([-0.00082397, -0.0020752 , -0.0012207 , ..., -0.00738525,
498
- -0.00814819, -0.00579834]), 'sampling_rate': 48000},
499
- 'audio.laryngophone': {
500
- 'path': 'jMngOy7BdQ_laryngophone.wav',
501
- 'array': array([ 0.00000000e+00, 3.05175781e-05, 1.83105469e-04, ...,
502
- -6.10351562e-05, -1.22070312e-04, -9.15527344e-05]),
503
- 'sampling_rate': 48000},
504
- 'gender': 'male',
505
- 'speaker_id': 'jMngOy7BdQ',
506
- 'duration': 54.097
507
- }
508
- ```
509
-
510
-
511
- ---
512
-
513
- ## DATA STATISTICS
514
-
515
- ### Speakers gender balance
516
-
517
- To increase the representativeness and inclusivity of the dataset, a deliberate effort was made to recruit a diverse and gender-balanced group of speakers. The overall gender repartition in terms of number of speakers included in the dataset is **51.6% female participants / 48.4% male participants for all subsets**.
518
-
519
- ### Speakers age balance
520
-
521
-
522
- | Gender | Mean age (years) | Median age (years) | Min age (years) | Max age (years) |
523
- |:------------|:-----------------|:--------------------|:-------------------|:--------------------|
524
- | Female | 25.9 | 22 | 19 | 59 |
525
- | Male | 31.4 | 27 | 18 | 82 |
526
- | **All** | **28.55** | **25** | **18** | **82** |
527
-
528
-
529
-
530
- ### Audio data
531
-
532
-
533
- | Subset | Split | Audio duration (hours) | Number of audio clips | Download size | Number of Speakers <br> (Female/Male) | F/M Gender repartition <br> (audio duration) | Mean audio duration (s) | Median audio duration (s) | Max audio duration (s) | Min audio duration (s) |
534
- |:-------------------|:---------------------------------------|:--------------------------------|:-----------------------------------|:------------------------------------|:---------------------------------------|:---------------------------------------------------------|:----------------------------------|:---------------------------------|:------------------------------------|:-------------------------------|
535
- | `speech_clean` | `train` <br> `validation` <br> `test` | 6x20.94 <br> 6x2.42 <br> 6x3.03 | 6x20,981 <br> 6x2,523 <br> 6x3,064 | 108.32GB <br> 12.79GB <br> 15.84GB | 77F/72M <br> 9F/9M <br> 11F/10M | 52.46%/47.54% <br> 52.13%/47.87% <br> 55.74%/44.26% | 3.59 <br> 3.46 <br> 3.56 | 3.50 <br> 3.38 <br> 3.48 | 12.20 <br> 9.44 <br> 9.58 | 0.52 <br> 0.66 <br> 0.58 |
536
- | `speech_noisy` | `train` <br> `validation` <br> `test` | 6x1.26 <br> 6x0.13 <br> 6x0.18 | 6x1,220 <br> 6x132 <br> 6x175 | 6.52GB <br> 0.71GB <br> 0.94GB | 77F/72M <br> 9F/9M <br> 11F/10M | 54.31%/45.69% <br> 56.61%/43.39% <br> 55.54%/44.46% | 3.71 <br> 3.67 <br> 3.66 | 3.64 <br> 3.47 <br> 3.70 | 8.66 <br> 7.36 <br> 6.88 | 0.46 <br> 1.10 <br> 1.00 |
537
- | `speechless_clean` | `train` <br> `validation` <br> `test` | 6x2.24 <br> 6x0.27 <br> 6x0.32 | 6x149 <br> 6x18 <br> 6x21 | 8.44GB <br> 1.02GB <br> 1.19GB | 77F/72M <br> 9F/9M <br> 11F/10M | 51.68%/48.32% <br> 50.00%/50.00% <br> 52.38%/47.62% | 54.10 <br> 54.10 <br> 54.10 | 54.10 <br> 54.10 <br> 54.10 | 54.10 <br> 54.10 <br> 54.10 | 53.99 <br> 54.05 <br> 54.10 |
538
- | `speechless_noisy` | `train` <br> `validation` <br> `test` | 6x5.96 <br> 6x0.72 <br> 6x0.84 | 6x149 <br> 6x18 <br> 6x21 | 24.48GB <br> 2.96GB <br> 3.45GB | 77F/72M <br> 9F/9M <br> 11F/10M | 51.68%/48.32% <br> 50.00%/50.00% <br> 52.38%/47.62% | 144.03 <br> 144.03 <br> 144.04 | 144.03 <br> 144.03 <br> 144.03 | 144.17 <br> 144.05 <br> 144.05 | 143.84 <br> 143.94 <br> 144.03 |
539
- | **Total** | | **6x38.31** | **6x28,471** | **186.64GB** | **97F/91M** | **52.55%/47.45%** | | | | |
540
-
541
-
542
- ---
543
-
544
- ## DATASET CREATION
545
-
546
- ### Textual source data
547
-
548
- The text read by all participants is collected from the French Wikipedia subset of Common voice ( [link1](https://github.com/common-voice/common-voice/blob/6e43e7e61318bf4605b59379e3f35ba5333d7a29/server/data/fr/wiki-1.fr.txt) [link2](https://github.com/common-voice/common-voice/blob/6e43e7e61318bf4605b59379e3f35ba5333d7a29/server/data/fr/wiki-2.fr.txt) ) . We applied some additional filters to these textual datasets in order to create a simplified dataset with a minimum number of tokens and to reduce the uncertainty of the pronunciation of some proper names. We therefore removed all proper names except common first names and the list of french towns. We also removed any utterances that contain numbers, Greek letters, math symbols, or that are syntactically incorrect.
549
-
550
- All lines of the textual source data from Wikipedia-extracted textual dataset has then been phonemized using the [bootphon/phonemizer](https://github.com/bootphon/phonemizer) and manually edited to only keep strict french IPA characters.
551
-
552
- ### Audio Data Collection
553
-
554
-
555
- #### Sensors positioning and documentation
556
-
557
-
558
- | **Sensor** | **Image** | **Transducer** | **Online documentation** |
559
- |:---------------------------|:---------------------|:-------------|:----------------------------------------------------------------------------------------------------------------------|
560
- | Reference headset microphone | ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6390fc80e6d656eb421bab69/iVYX1_7wAdZb4oDrc9v6l.png) | Shure WH20 | [See documentation on vibravox.cnam.fr](https://vibravox.cnam.fr/documentation/hardware/sensors/airborne/index.html) |
561
- | In-ear comply foam-embedded microphone |![image/png](https://cdn-uploads.huggingface.co/production/uploads/6390fc80e6d656eb421bab69/Uf1VOwx-kxPiYY1oMW5pz.png)| Knowles FG-23329-P07 | [See documentation on vibravox.cnam.fr](https://vibravox.cnam.fr/documentation/hardware/sensors/soft_inear/index.html) |
562
- | In-ear rigid earpiece-embedded microphone |![image/png](https://cdn-uploads.huggingface.co/production/uploads/6390fc80e6d656eb421bab69/EBY9dIKFN8GDaDXUuhp7n.png)| Knowles SPH1642HT5H | [See documentation on vibravox.cnam.fr](https://vibravox.cnam.fr/documentation/hardware/sensors/rigid_inear/index.html) |
563
- | Forehead miniature vibration sensor |![image/png](https://cdn-uploads.huggingface.co/production/uploads/6390fc80e6d656eb421bab69/2zHrN-7OpbH-zJTqASZ7J.png)| Knowles BU23173-000 | [See documentation on vibravox.cnam.fr](https://vibravox.cnam.fr/documentation/hardware/sensors/forehead/index.html) |
564
- | Temple vibration pickup |![image/png](https://cdn-uploads.huggingface.co/production/uploads/6390fc80e6d656eb421bab69/wAcTQlmzvl0O4kNyA3MnC.png)| AKG C411 | [See documentation on vibravox.cnam.fr](https://vibravox.cnam.fr/documentation/hardware/sensors/temple/index.html) |
565
- | Laryngophone | ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6390fc80e6d656eb421bab69/4SGNSgXYc6hBJcI1cRXY_.png)| iXRadio XVTM822D-D35 | [See documentation on vibravox.cnam.fr](https://vibravox.cnam.fr/documentation/hardware/sensors/throat/index.html) |
566
-
567
-
568
- #### Recorded audio data post-processing
569
-
570
- Across the sentences collected from the participants, a small number of audio clips exhibited various shortcomings. Despite researchers monitoring and validating each recording individually, the process was not entirely foolproof : mispronounced sentences, sensors shifting from their initial positions, or more significant microphone malfunctions occasionally occurred. In instances where sensors were functional but not ideally positioned—such as when the participant's ear canal was too small for the rigid in-ear microphone to achieve proper acoustic sealing—we chose to retain samples where the bandwidth was slightly narrower than desired. This decision was made to enhance the robustness of our models against the effects of misplaced sensors.
571
-
572
- To address those occasional shortcomings and offer a high-quality dataset, we implemented a series of 3 automatic filters to retain only the best audio from the speech_clean subset. We preserved only those sentences where all sensors were in optimal recording condition, adhering to predefined criteria, defined in [our paper](https://arxiv.org/abs/2407.11828) :
573
-
574
-
575
- - The first filter uses a pre-trained ASR model run on the headset microphone data, which allows to address discrepancies between the labeled transcription and actual pronunciation, ensuring high-quality labels for the speech-to-phoneme task.
576
- - The second filter confirms that the sensor is functioning correctly by verifying that speech exhibits higher energy than silence, thereby identifying potentially unreliable recordings with low vocal energy levels or sensor malfunction.
577
- - The third filter detects sensitivity drift in the sensors, which can occur due to electronic malfunctions or mechanical blockages in the transducer.
578
- - If an audio clip passes all filters, it is not immediately added to the dataset. Instead, VAD-generated timestamps from [whisper-timestamped](https://github.com/linto-ai/whisper-timestamped) are used, extending them by 0.3 seconds on both sides. This method helps remove mouse clicks at audio boundaries and ensures the capture of vocal segments without excluding valid speech portions.
579
-
580
- ### Personal and Sensitive Information
581
-
582
- The VibraVox dataset does not contain any data that might be considered as personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.).
583
-
584
- The `speaker_id` were generated using a powerful Fernet encryption algorithm, and the extraction of a subset of the encrypted id, guaranteeing a strict anonymisation of the voice recordings, while allowing the dataset maintainers to delete corresponding data under the right to oblivion.
585
-
586
- A [consent form](https://vibravox.cnam.fr/documentation/consent/index.html) has been signed by each participant to the VibraVox dataset. This consent form has been approved by the Cnam lawyer. All [Cnil](https://www.cnil.fr/en) requirements have been checked, including the right to oblivion during 50 years.
587
-
 
1
  ---
 
 
 
 
 
 
 
2
  license: cc-by-4.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
speech_clean/test-00000-of-00032.parquet → data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiN_00008590.rf64 RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f644a7583fa9ebbe77101ad6a0731328695669d036e4841b3fb9d665d0827eed
3
- size 513881500
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b13c8931e94fbf8fba3949eef5ebdc4b1ec314d7cf7b272c3e85e19deb8fd47b
3
+ size 10956504
speechless_noisy/train-00002-of-00050.parquet → data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiN_00008591.rf64 RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:381312272be553cec6f3b3e9dd930788f7ae20bd2bac4d25366600fb6fb6b5c1
3
- size 493149800
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74c317a2d76a58942a811ce7539c27932ff5f01ada0618c7cc19ed5a478036fb
3
+ size 6730856
speech_clean/test-00001-of-00032.parquet → data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiN_00008592.rf64 RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f96320570617d51bcd3e3ac1c9861509095a94e38381ff894ca972a354defca2
3
- size 491291300
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7fab35cd9af2c17a150f9bde6a4b65203bce246b660c2894d4b635b7ca1d5d8
3
+ size 10655392
speech_clean/test-00002-of-00032.parquet → data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiN_00008593.rf64 RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3c331e0941f531a4b1d3853f3946e3905471f33cbd75f7f5bdfd6e1a603655b9
3
- size 505420552
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1239976aa11c953638d08b0a08cc8c65bb0907d80afd2569b28427bc7ffaaa5e
3
+ size 7279404
speech_clean/test-00003-of-00032.parquet → data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiN_00008594.rf64 RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0823a18826a43d2aab2ab272734b71aabf03b2d2e905307f8b37660502763399
3
- size 509894049
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d36e3402a8dfa09a9a70c7a0d35e61aad62ee71e864fda42d8a8973ba02e0dc
3
+ size 12128500
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiN_00008595.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3160471d1e26af6b02b07c1d45616897eb0390c3db4cf4ba136c1ddc6fc64f2
3
+ size 7440572
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008437.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d24179a15231e2795d0c6464c993f5691e3f7294186a473c366e093489f4a4ce
3
+ size 10795224
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008438.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:573aadc32ad85b729ba23d77ccd1c1ce82d2a79a4b14a7e05c53259b69959f84
3
+ size 8935016
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008440.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19a6edb4e0ed0e44813882b09f3d3a0f0c28d4eac91ca7d7a48628194f1aca9a
3
+ size 10010216
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008442.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b66170230b18c92ec806264b499c132c5b57a9a2e566d4e80ccaabf1c07dcc9e
3
+ size 9698464
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008443.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9212a70ad9b63fc3bd68eadfeba5c24e6f61d37090c8b5e655a65d282fb3f932
3
+ size 7537256
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008444.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc6efb1831fabcdbeb9e3097d516eb4c911b21eb4d97ec928a8f4a81f45505e4
3
+ size 9967404
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008445.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b8cadbb200f48aa55012065ed31d4f2b9a3787b92cb5c666d5aa48690b54de9
3
+ size 8838332
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008451.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f0acd8baec07839ba367e339f2211038c5c91d8ba98af48fc145f4c7fb9ef9f
3
+ size 10042584
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008453.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e2118390f8d223b7ed1789b4033e97e24ae3201d2f3bb2e15d114ddee7c3079
3
+ size 13827176
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008454.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae68a87f685755c9e724c6a38860843ad7f3d6f71dca65426fce4257b267d339
3
+ size 11225472
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008459.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2a05e5c6b7975f5c4bdb2f357a743e9e650f2f5e495013afe6a9b98376b89e0
3
+ size 8343656
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008460.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e607f9a89b801e898725672ece579667350130c52c8ff4a269b0ac11fbad14c9
3
+ size 8795408
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008462.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d224f772618c26e17bb7b8cacff19d959dc518701f60b164dbb45d2233f07d9a
3
+ size 9440472
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008463.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d5b37cd6211c4fe9cfd609b2f5f15cd75caf99bf295b0a4ea0a3b9b7b1a9fc3
3
+ size 8902816
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008464.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cc842167dc4f31991c0c5d4e8e314f58ef22a2412c120aa971cfe332cd937cb
3
+ size 9978072
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008465.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2adfda4ec09b12f1f9ec6bdcef9541bb68554e3a47c36b9d171d97b22f3a858a
3
+ size 7773884
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008468.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed7b13fc0a7d9ee314dcfed06ef834fff1d6310a70faf13de7904fb64984406b
3
+ size 9945872
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008469.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:620823521baa7bdc489890183b1a7a483007c132de2d19aefd284fcc965a0d7e
3
+ size 7967336
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008470.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38ee5ba4947cb32e48060aaf319f3add5556e2e159a95ffd60ba1d831210e905
3
+ size 10107180
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008473.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f005d5e753362ba1dfa4784ef0a6961fb70f2733eb22ea14d82ae2733bc7a6f
3
+ size 8354492
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008474.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5092aaf4bb84bb0863a9d01cd2d3328554a80cb4093fc618297a63204969b31c
3
+ size 8343656
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008475.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3dfcd5523ae1ee4c8e90a768e11c2576e2d12c1155053334715ac33241ea7bd
3
+ size 8902928
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008477.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7073026e3abc45a4e044ebfc65a85952dbadd5c6eb55307442771eef3c0f1b92
3
+ size 9795176
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008481.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc4660e609870673ea0fdd2b594df8fbc766725ce219d2b763e45bf0378577fa
3
+ size 8924348
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008486.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d25bf40432b67b3f9881854106567222d4986235751af6226d8f43beefa2174c
3
+ size 8128616
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008487.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f84f84b264120306fee2654a1987fd6cd541a3811ae432abce4c59bdf7f65e4
3
+ size 8289980
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008489.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24416ef2b66a1c36e4e2b44e7d9ec09277e61e696eb221ab72e4b616cb7c6b76
3
+ size 8677052
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008490.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ab6c3aee065a1d2c46a58b4bb90dbfeab1d61f62128d6b6301fd6a00453f0e4
3
+ size 9601752
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008492.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fb88ec7693bd6adf5742075a27e6a591aa6c3cbec6dd16ce333b2c6b4718651
3
+ size 7752296
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008495.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7a92b12e278bab4372de93e72681a8a04a66ec2cb8113af3d5a16b11cd8af92
3
+ size 8741536
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008496.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed40605bee7e3ee91c68a1ce1c64df3b56413c2e489b74de5f91f0369d75211d
3
+ size 9257576
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008499.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7a9b82ea588e34c53fa00c9c0eb49fc9ea7d4794fdc8cd47278878b76c342ec
3
+ size 9945816
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008500.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db49fd76b48e11d87b4830aefeac149d36939bb08e2a989d8e94cf5475da17b6
3
+ size 10096344
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008502.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c01086a6eef355d522947e9c2b4398c80eec412f9f8e83b260f42d97e4d67f43
3
+ size 10741408
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008503.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c44869e3634dc26aa4a0f1a15f9e7a3e00dc6a32d17d3fdac5de4af32c012739
3
+ size 9988824
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008506.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cdb8d1f1e70fe8e9e3a414d551353ae4e812ac4dde70f4b29443b5ea035c0465
3
+ size 7601852
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008510.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0b867336320b588b8c16b29641fbfdc623077044ef20acd68f79ae5ef13b7d0
3
+ size 8999556
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008512.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd5067e64384f0f415f057b8d9937c4b426a01ba1f7805f7fc99d34dc3c4633a
3
+ size 9279192
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008513.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1e7849e7be9caa873669934c926a646a949581edba67fbaf57136c19747b60a
3
+ size 7967336
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008516.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1adfaa984405e147209e95345e5bfb3871a98db22b369a4221413d19846502b6
3
+ size 8601788
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008517.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d36f829e424e0940d20feeb14c64b8f24b4fd0570ba7f316532bc95326e4a0a
3
+ size 9451224
data/b'gAAAAABlHBrFOw6pu2xhadA5YPFMknZfHoXohCvei3Os8rG43e1a-bIfjc3mZxnSoquKkt_ebTQyyFKvH_dN3cxH_ioALk5RLaxl4HbAMuixR3e3VNJVhq0='/SiS_00008520.rf64 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3b8b6349225a1e9087ea25bfdfe1d714f50c0dd49433dfc9852224059d9a1ac
3
+ size 8548028