aburnazyan commited on
Commit
6c81428
1 Parent(s): 67cd8e7
Files changed (1) hide show
  1. hy_asr_grqaser.py +49 -47
hy_asr_grqaser.py CHANGED
@@ -1,51 +1,53 @@
1
  ##
2
  import os
3
  import pandas as pd
4
- from datasets import Dataset, DatasetDict, Features, Value, Audio
5
-
6
- ##
7
- def load_dataset_script(data_dir):
8
- print(f"data_dir: {data_dir}")
9
- """
10
- Load dataset script for custom audio-transcription dataset.
11
-
12
- :param data_dir: Directory where the data and metadata.csv are stored.
13
- :return: A Hugging Face Dataset object.
14
- """
15
- # Load metadata.csv
16
- metadata = pd.read_csv(os.path.join(data_dir, "metadata.csv"))
17
-
18
- # Create lists for audio files and transcriptions
19
- audio_files = []
20
- transcriptions = []
21
-
22
- # Iterate through the metadata and populate the lists
23
- for _, row in metadata.iterrows():
24
- audio_files.append({'path': os.path.join(data_dir, row['file_name'])})
25
- transcriptions.append(row['transcription'])
26
-
27
- # Define features of the dataset
28
- features = Features({
29
- 'audio': Audio(sampling_rate=16_000), # Adjust the sampling rate as needed
30
- 'sentence': Value('string')
31
- })
32
-
33
- # Create a dataset
34
- dataset = Dataset.from_dict({
35
- 'audio': audio_files,
36
- 'sentence': transcriptions
37
- }, features=features)
38
-
39
- # You can split the dataset here if needed, or return as a single dataset
40
- return DatasetDict({'train': dataset})
41
-
42
-
43
- ## Example usage
 
 
 
 
 
 
44
  # if __name__ == "__main__":
45
- # data_directory = "C:\\Projects\\aeneas\\hy_asr_grqaser"
46
- # dataset = load_dataset_script(data_directory)
47
- # print(dataset["train"][1])
48
- # from datasets import load_dataset
49
- # load_dataset("aburnazy/hy_asr_grqaser", data_dir=".")
50
-
51
-
 
1
  ##
2
  import os
3
  import pandas as pd
4
+ from datasets import DatasetBuilder, DatasetInfo, SplitGenerator, Split, Features, Value, Audio, Version
5
+
6
+ class HyAsrGrqaser(DatasetBuilder):
7
+ """Armenian Audio-Transcription Dataset"""
8
+
9
+ VERSION = Version("1.0.0")
10
+
11
+ def _info(self):
12
+ return DatasetInfo(
13
+ description="This dataset contains Armenian speech and transcriptions.",
14
+ features=Features({
15
+ 'audio': Audio(sampling_rate=16_000), # Adjust the sampling rate as needed
16
+ 'sentence': Value('string')
17
+ }),
18
+ supervised_keys=("audio", "sentence"),
19
+ )
20
+
21
+ def _split_generators(self, dl_manager):
22
+ """Returns SplitGenerators."""
23
+ # Assuming the script is in the root of the project structure
24
+ data_dir = os.path.dirname(__file__)
25
+ metadata_path = os.path.join(os.path.dirname(__file__), "metadata.csv")
26
+ return [
27
+ SplitGenerator(
28
+ name=Split.TRAIN,
29
+ gen_kwargs={"data_dir": data_dir, "metadata_path": metadata_path}
30
+ ),
31
+ ]
32
+
33
+ def _generate_examples(self, data_dir, metadata_path):
34
+ """Yields examples."""
35
+ # Load metadata.csv
36
+ metadata = pd.read_csv(metadata_path)
37
+
38
+ # Generate examples
39
+ for idx, row in metadata.iterrows():
40
+ file_path = os.path.join(data_dir, row['file_name'])
41
+ transcription_path = os.path.join(data_dir, row['transcription_file'])
42
+ with open(transcription_path, 'r') as f:
43
+ transcription = f.read().strip()
44
+ yield idx, {
45
+ 'audio': {'path': file_path},
46
+ 'sentence': transcription
47
+ }
48
+
49
+ # Testing the dataset locally
50
  # if __name__ == "__main__":
51
+ # from datasets import load_dataset
52
+ # dataset = load_dataset("C:\\Projects\\aeneas\\hy_asr_grqaser\\hy_asr_grqaser.py")
53
+ # print(dataset["train"][0])