Datasets:
MuGeminorum
commited on
Commit
•
3f0849b
1
Parent(s):
c814e12
sync ms
Browse files- .gitignore +3 -0
- GZ_IsoTech.py +139 -0
- README.md +41 -18
- test.zip → data/record_chanyin1.jpg +2 -2
- train.zip → data/record_chanyin1.wav +2 -2
.gitignore
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
test.py
|
2 |
+
data/audio/*
|
3 |
+
data/mel/*
|
GZ_IsoTech.py
ADDED
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import random
|
3 |
+
import hashlib
|
4 |
+
import datasets
|
5 |
+
from datasets.tasks import AudioClassification
|
6 |
+
|
7 |
+
_DBNAME = os.path.basename(__file__).split(".")[0]
|
8 |
+
|
9 |
+
_DOMAIN = f"https://www.modelscope.cn/api/v1/datasets/ccmusic/{_DBNAME}/repo?Revision=master&FilePath=data"
|
10 |
+
|
11 |
+
_HOMEPAGE = f"https://www.modelscope.cn/datasets/ccmusic/{_DBNAME}"
|
12 |
+
|
13 |
+
_NAMES = {
|
14 |
+
"vibrato": "颤音",
|
15 |
+
"upward_portamento": "上滑音",
|
16 |
+
"downward_portamento": "下滑音",
|
17 |
+
"returning_portamento": "回滑音",
|
18 |
+
"glissando": "刮奏, 花指",
|
19 |
+
"tremolo": "摇指",
|
20 |
+
"harmonics": "泛音",
|
21 |
+
"plucks": "勾, 打, 抹, 托, ...",
|
22 |
+
}
|
23 |
+
|
24 |
+
_CITATION = """\
|
25 |
+
@dataset{zhaorui_liu_2021_5676893,
|
26 |
+
author = {Monan Zhou, Shenyang Xu, Zhaorui Liu, Zhaowen Wang, Feng Yu, Wei Li and Baoqiang Han},
|
27 |
+
title = {CCMusic: an Open and Diverse Database for Chinese and General Music Information Retrieval Research},
|
28 |
+
month = {mar},
|
29 |
+
year = {2024},
|
30 |
+
publisher = {HuggingFace},
|
31 |
+
version = {1.2},
|
32 |
+
url = {https://huggingface.co/ccmusic-database}
|
33 |
+
}
|
34 |
+
"""
|
35 |
+
|
36 |
+
_DESCRIPTION = """\
|
37 |
+
The raw dataset comprises 2,824 audio clips showcasing various guzheng playing techniques. Specifically, 2,328 clips were sourced from virtual sound banks, while 496 clips were performed by a skilled professional guzheng artist. These recordings encompass a comprehensive range of tones inherent to the guzheng instrument. Categorization of the clips is based on the diverse playing techniques characteristic of the guzheng, the clips are divided into eight categories: Vibrato (chanyin), Upward Portamento (shanghuayin), Downward Portamento (xiahuayin), Returning Portamento (huihuayin), Glissando (guazou, huazhi), Tremolo (yaozhi), Harmonic (fanyin), Plucks (gou, da, mo, tuo…).
|
38 |
+
|
39 |
+
Due to the pre-existing split in the raw dataset, wherein the data has been partitioned approximately in a 4:1 ratio for training and testing sets, we uphold the original data division approach. In contrast to utilizing platform-specific automated splitting mechanisms, we directly employ the pre-split data for subsequent integration steps.
|
40 |
+
"""
|
41 |
+
|
42 |
+
_URLS = {"audio": f"{_DOMAIN}/audio.zip", "mel": f"{_DOMAIN}/mel.zip"}
|
43 |
+
|
44 |
+
|
45 |
+
class GZ_IsoTech(datasets.GeneratorBasedBuilder):
|
46 |
+
def _info(self):
|
47 |
+
return datasets.DatasetInfo(
|
48 |
+
features=datasets.Features(
|
49 |
+
{
|
50 |
+
"audio": datasets.Audio(sampling_rate=22050),
|
51 |
+
"mel": datasets.Image(),
|
52 |
+
"label": datasets.features.ClassLabel(names=list(_NAMES.keys())),
|
53 |
+
"cname": datasets.Value("string"),
|
54 |
+
}
|
55 |
+
),
|
56 |
+
supervised_keys=("audio", "label"),
|
57 |
+
homepage=_HOMEPAGE,
|
58 |
+
license="mit",
|
59 |
+
citation=_CITATION,
|
60 |
+
description=_DESCRIPTION,
|
61 |
+
task_templates=[
|
62 |
+
AudioClassification(
|
63 |
+
task="audio-classification",
|
64 |
+
audio_column="audio",
|
65 |
+
label_column="label",
|
66 |
+
)
|
67 |
+
],
|
68 |
+
)
|
69 |
+
|
70 |
+
def _str2md5(self, original_string):
|
71 |
+
"""
|
72 |
+
Calculate and return the MD5 hash of a given string.
|
73 |
+
Parameters:
|
74 |
+
original_string (str): The original string for which the MD5 hash is to be computed.
|
75 |
+
Returns:
|
76 |
+
str: The hexadecimal representation of the MD5 hash.
|
77 |
+
"""
|
78 |
+
# Create an md5 object
|
79 |
+
md5_obj = hashlib.md5()
|
80 |
+
# Update the md5 object with the original string encoded as bytes
|
81 |
+
md5_obj.update(original_string.encode("utf-8"))
|
82 |
+
# Retrieve the hexadecimal representation of the MD5 hash
|
83 |
+
md5_hash = md5_obj.hexdigest()
|
84 |
+
return md5_hash
|
85 |
+
|
86 |
+
def _split_generators(self, dl_manager):
|
87 |
+
audio_files = dl_manager.download_and_extract(_URLS["audio"])
|
88 |
+
mel_files = dl_manager.download_and_extract(_URLS["mel"])
|
89 |
+
train_files, test_files = {}, {}
|
90 |
+
|
91 |
+
for path in dl_manager.iter_files([audio_files]):
|
92 |
+
fname = os.path.basename(path)
|
93 |
+
dirname = os.path.dirname(path)
|
94 |
+
splt = os.path.basename(os.path.dirname(dirname))
|
95 |
+
if fname.endswith(".wav"):
|
96 |
+
cls = f"{splt}/{os.path.basename(dirname)}/"
|
97 |
+
item_id = self._str2md5(cls + fname.split(".wa")[0])
|
98 |
+
if splt == "train":
|
99 |
+
train_files[item_id] = {"audio": path}
|
100 |
+
else:
|
101 |
+
test_files[item_id] = {"audio": path}
|
102 |
+
|
103 |
+
for path in dl_manager.iter_files([mel_files]):
|
104 |
+
fname = os.path.basename(path)
|
105 |
+
dirname = os.path.dirname(path)
|
106 |
+
splt = os.path.basename(os.path.dirname(dirname))
|
107 |
+
if fname.endswith(".jpg"):
|
108 |
+
cls = f"{splt}/{os.path.basename(dirname)}/"
|
109 |
+
item_id = self._str2md5(cls + fname.split(".jp")[0])
|
110 |
+
if splt == "train":
|
111 |
+
train_files[item_id]["mel"] = path
|
112 |
+
else:
|
113 |
+
test_files[item_id]["mel"] = path
|
114 |
+
|
115 |
+
trainset = list(train_files.values())
|
116 |
+
testset = list(test_files.values())
|
117 |
+
random.shuffle(trainset)
|
118 |
+
random.shuffle(testset)
|
119 |
+
|
120 |
+
return [
|
121 |
+
datasets.SplitGenerator(
|
122 |
+
name=datasets.Split.TRAIN,
|
123 |
+
gen_kwargs={"files": trainset},
|
124 |
+
),
|
125 |
+
datasets.SplitGenerator(
|
126 |
+
name=datasets.Split.TEST,
|
127 |
+
gen_kwargs={"files": testset},
|
128 |
+
),
|
129 |
+
]
|
130 |
+
|
131 |
+
def _generate_examples(self, files):
|
132 |
+
for i, path in enumerate(files):
|
133 |
+
pt = os.path.basename(os.path.dirname(path["audio"]))
|
134 |
+
yield i, {
|
135 |
+
"audio": path["audio"],
|
136 |
+
"mel": path["mel"],
|
137 |
+
"label": pt,
|
138 |
+
"cname": _NAMES[pt],
|
139 |
+
}
|
README.md
CHANGED
@@ -12,18 +12,19 @@ pretty_name: GZ_IsoTech Dataset
|
|
12 |
size_categories:
|
13 |
- n<1K
|
14 |
---
|
|
|
15 |
# Dataset Card for GZ_IsoTech Dataset
|
|
|
|
|
16 |
## Dataset Description
|
17 |
- **Homepage:** <https://ccmusic-database.github.io>
|
18 |
- **Repository:** <https://huggingface.co/datasets/ccmusic-database/Guzheng_Tech99>
|
19 |
- **Paper:** <https://doi.org/10.5281/zenodo.5676893>
|
20 |
-
- **Leaderboard:** <https://
|
21 |
- **Point of Contact:** <https://arxiv.org/abs/2209.08774>
|
22 |
|
23 |
### Dataset Summary
|
24 |
-
|
25 |
-
|
26 |
-
This database contains 2824 audio clips of guzheng playing techniques. Among them, 2328 pieces were collected from virtual sound banks, and 496 pieces were played and recorded by a professional guzheng performer. These clips cover almost all the tones in the range of guzheng and the most commonly used playing techniques in guzheng performance. According to the different playing techniques of guzheng, the clips are divided into 8 categories: Vibrato(chanyin), Upward Portamento(shanghuayin), Downward Portamento(xiahuayin), Returning Portamento(huihuayin), Glissando (guazou, huazhi), Tremolo(yaozhi), Harmonic(fanyin), Plucks(gou,da,mo,tuo…).
|
27 |
|
28 |
### Supported Tasks and Leaderboards
|
29 |
MIR, audio classification
|
@@ -31,19 +32,42 @@ MIR, audio classification
|
|
31 |
### Languages
|
32 |
Chinese, English
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
## Dataset Structure
|
|
|
|
|
|
|
|
|
|
|
35 |
### Data Instances
|
36 |
.zip(.flac, .csv)
|
37 |
|
38 |
### Data Fields
|
39 |
-
|
40 |
|
41 |
### Data Splits
|
42 |
train, valid, test
|
43 |
|
44 |
## Dataset Creation
|
45 |
### Curation Rationale
|
46 |
-
The Guzheng is a kind of traditional Chinese
|
47 |
|
48 |
### Source Data
|
49 |
#### Initial Data Collection and Normalization
|
@@ -54,7 +78,7 @@ Students from FD-LAMT
|
|
54 |
|
55 |
### Annotations
|
56 |
#### Annotation process
|
57 |
-
This database contains 2824 audio clips of guzheng playing techniques. Among them, 2328 pieces were collected from virtual sound banks, and 496 pieces were played and recorded by a professional guzheng performer.
|
58 |
|
59 |
#### Who are the annotators?
|
60 |
Students from FD-LAMT
|
@@ -64,7 +88,7 @@ None
|
|
64 |
|
65 |
## Considerations for Using the Data
|
66 |
### Social Impact of Dataset
|
67 |
-
Promoting the development of music AI industry
|
68 |
|
69 |
### Discussion of Biases
|
70 |
Only for Traditional Chinese Instruments
|
@@ -105,18 +129,17 @@ SOFTWARE.
|
|
105 |
```
|
106 |
|
107 |
### Citation Information
|
108 |
-
```
|
109 |
@dataset{zhaorui_liu_2021_5676893,
|
110 |
-
author = {
|
111 |
-
title = {
|
112 |
-
month = {
|
113 |
-
year = {
|
114 |
-
publisher = {
|
115 |
-
version = {1.
|
116 |
-
|
117 |
-
url = {https://doi.org/10.5281/zenodo.5676893}
|
118 |
}
|
119 |
```
|
120 |
|
121 |
### Contributions
|
122 |
-
Promoting the development of music AI industry
|
|
|
12 |
size_categories:
|
13 |
- n<1K
|
14 |
---
|
15 |
+
|
16 |
# Dataset Card for GZ_IsoTech Dataset
|
17 |
+
The raw dataset comprises 2,824 audio clips showcasing various guzheng playing techniques. Specifically, 2,328 clips were sourced from virtual sound banks, while 496 clips were performed by a skilled professional guzheng artist. These recordings encompass a comprehensive range of tones inherent to the guzheng instrument.
|
18 |
+
|
19 |
## Dataset Description
|
20 |
- **Homepage:** <https://ccmusic-database.github.io>
|
21 |
- **Repository:** <https://huggingface.co/datasets/ccmusic-database/Guzheng_Tech99>
|
22 |
- **Paper:** <https://doi.org/10.5281/zenodo.5676893>
|
23 |
+
- **Leaderboard:** <https://www.modelscope.cn/datasets/ccmusic/GZ_IsoTech>
|
24 |
- **Point of Contact:** <https://arxiv.org/abs/2209.08774>
|
25 |
|
26 |
### Dataset Summary
|
27 |
+
Due to the pre-existing split in the raw dataset, wherein the data has been partitioned approximately in a 4:1 ratio for training and testing sets, we uphold the original data division approach. In contrast to utilizing platform-specific automated splitting mechanisms, we directly employ the pre-split data for subsequent integration steps.
|
|
|
|
|
28 |
|
29 |
### Supported Tasks and Leaderboards
|
30 |
MIR, audio classification
|
|
|
32 |
### Languages
|
33 |
Chinese, English
|
34 |
|
35 |
+
## Usage
|
36 |
+
```python
|
37 |
+
from datasets import load_dataset
|
38 |
+
|
39 |
+
dataset = load_dataset("ccmusic-database/GZ_IsoTech")
|
40 |
+
for item in ds["train"]:
|
41 |
+
print(item)
|
42 |
+
|
43 |
+
for item in ds["test"]:
|
44 |
+
print(item)
|
45 |
+
```
|
46 |
+
|
47 |
+
## Maintenance
|
48 |
+
```bash
|
49 |
+
GIT_LFS_SKIP_SMUDGE=1 git clone git@hf.co:datasets/ccmusic-database/GZ_IsoTech
|
50 |
+
cd GZ_IsoTech
|
51 |
+
```
|
52 |
+
|
53 |
## Dataset Structure
|
54 |
+
| audio(.wav, 22050Hz) | mel(.jpg, 22050Hz) | label | cname |
|
55 |
+
| :-----------------------------------------------: | :------------------------------------: | :-----: | :----: |
|
56 |
+
| <audio controls src="./data/record_chanyin1.wav"> | <img src="./data/record_chanyin1.jpg"> | 8-class | string |
|
57 |
+
| ... | ... | ... | ... |
|
58 |
+
|
59 |
### Data Instances
|
60 |
.zip(.flac, .csv)
|
61 |
|
62 |
### Data Fields
|
63 |
+
Categorization of the clips is based on the diverse playing techniques characteristic of the guzheng, the clips are divided into eight categories: Vibrato (chanyin), Upward Portamento (shanghuayin), Downward Portamento (xiahuayin), Returning Portamento (huihuayin), Glissando (guazou, huazhi), Tremolo (yaozhi), Harmonic (fanyin), Plucks (gou, da, mo, tuo…).
|
64 |
|
65 |
### Data Splits
|
66 |
train, valid, test
|
67 |
|
68 |
## Dataset Creation
|
69 |
### Curation Rationale
|
70 |
+
The Guzheng is a kind of traditional Chinese instrument with diverse playing techniques. Instrument playing techniques (IPT) play an important role in musical performance. However, most of the existing works for IPT detection show low efficiency for variable-length audio and do not assure generalization as they rely on a single sound bank for training and testing. In this study, we propose an end-to-end Guzheng playing technique detection system using Fully Convolutional Networks that can be applied to variable-length audio. Because each Guzheng playing technique is applied to a note, a dedicated onset detector is trained to divide an audio into several notes and its predictions are fused with frame-wise IPT predictions. During fusion, we add the IPT predictions frame by frame inside each note and get the IPT with the highest probability within each note as the final output of that note. We create a new dataset named GZ_IsoTech from multiple sound banks and real-world recordings for Guzheng performance analysis. Our approach achieves 87.97% in frame-level accuracy and 80.76% in note-level F1 score, outperforming existing works by a large margin, which indicates the effectiveness of our proposed method in IPT detection.
|
71 |
|
72 |
### Source Data
|
73 |
#### Initial Data Collection and Normalization
|
|
|
78 |
|
79 |
### Annotations
|
80 |
#### Annotation process
|
81 |
+
This database contains 2824 audio clips of guzheng playing techniques. Among them, 2328 pieces were collected from virtual sound banks, and 496 pieces were played and recorded by a professional guzheng performer.
|
82 |
|
83 |
#### Who are the annotators?
|
84 |
Students from FD-LAMT
|
|
|
88 |
|
89 |
## Considerations for Using the Data
|
90 |
### Social Impact of Dataset
|
91 |
+
Promoting the development of the music AI industry
|
92 |
|
93 |
### Discussion of Biases
|
94 |
Only for Traditional Chinese Instruments
|
|
|
129 |
```
|
130 |
|
131 |
### Citation Information
|
132 |
+
```bibtex
|
133 |
@dataset{zhaorui_liu_2021_5676893,
|
134 |
+
author = {Monan Zhou, Shenyang Xu, Zhaorui Liu, Zhaowen Wang, Feng Yu, Wei Li and Baoqiang Han},
|
135 |
+
title = {CCMusic: an Open and Diverse Database for Chinese and General Music Information Retrieval Research},
|
136 |
+
month = {mar},
|
137 |
+
year = {2024},
|
138 |
+
publisher = {HuggingFace},
|
139 |
+
version = {1.2},
|
140 |
+
url = {https://huggingface.co/ccmusic-database}
|
|
|
141 |
}
|
142 |
```
|
143 |
|
144 |
### Contributions
|
145 |
+
Promoting the development of the music AI industry
|
test.zip → data/record_chanyin1.jpg
RENAMED
File without changes
|
train.zip → data/record_chanyin1.wav
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:76aa93fd89fbb3e957546362c4354953de5aabe9c8f7e5c626155c5a3de70725
|
3 |
+
size 67022
|