MuhammadQASIM111 commited on
Commit
3aea4ea
·
verified ·
1 Parent(s): 36ff1ce

Rename dataset.py to dataset_loader.py

Browse files
Files changed (2) hide show
  1. dataset.py +0 -55
  2. dataset_loader.py +9 -0
dataset.py DELETED
@@ -1,55 +0,0 @@
1
- import csv
2
- import datasets
3
-
4
- _CITATION = """\
5
- @dataset{qasim2025animalsounds,
6
- title = {Animal Sound Classification Dataset},
7
- author = {Muhammad Qasim},
8
- year = {2025},
9
- url = {https://huggingface.co/datasets/MuhammadQASIM111/Animal_Sound_Classification}
10
- }
11
- """
12
-
13
- _DESCRIPTION = """\
14
- A meticulously curated dataset of labeled animal sounds (dogs, cats, cows) for audio classification tasks. The dataset contains trimmed and cleaned audio clips with labels.
15
- """
16
-
17
- _HOMEPAGE = "https://huggingface.co/datasets/MuhammadQASIM111/Animal_Sound_Classification"
18
-
19
- _LICENSE = "mit"
20
-
21
- _URLS = {
22
- "train": "ML1_features.csv"
23
- }
24
-
25
- class AnimalSoundClassification(datasets.GeneratorBasedBuilder):
26
- def _info(self):
27
- return datasets.DatasetInfo(
28
- description=_DESCRIPTION,
29
- features=datasets.Features({
30
- "audio": datasets.Value("string"), # path to audio file or URL
31
- "label": datasets.ClassLabel(names=["dog", "cat", "cow"]),
32
- }),
33
- supervised_keys=("audio", "label"),
34
- homepage=_HOMEPAGE,
35
- license=_LICENSE,
36
- citation=_CITATION,
37
- )
38
-
39
- def _split_generators(self, dl_manager):
40
- data_path = dl_manager.download_and_extract(_URLS["train"])
41
- return [
42
- datasets.SplitGenerator(
43
- name=datasets.Split.TRAIN,
44
- gen_kwargs={"filepath": data_path},
45
- ),
46
- ]
47
-
48
- def _generate_examples(self, filepath):
49
- with open(filepath, encoding="utf-8") as csv_file:
50
- reader = csv.DictReader(csv_file)
51
- for id_, row in enumerate(reader):
52
- yield id_, {
53
- "audio": row["audio"], # Make sure column name matches your CSV
54
- "label": row["label"].lower(), # Lowercasing to match ClassLabel names
55
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dataset_loader.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+
3
+ def load_data():
4
+ dataset = load_dataset('csv', data_files='ML1_features.csv')
5
+ return dataset
6
+
7
+ # Example usage
8
+ dataset = load_data()
9
+ print(dataset)