Datasets:
Formats:
parquet
Size:
1M - 10M
Tags:
indian speech
indian languages
synthetic speech
deepfake
audio deepfake detection
indian deepfake detection
License:
Update README.md
Browse files
README.md
CHANGED
|
@@ -258,31 +258,49 @@ For more details, please see the Table 1 and Section 3 of our paper: https://acl
|
|
| 258 |
## 📦 Access the Dataset
|
| 259 |
|
| 260 |
You can load data in a specific target language using the following code:
|
|
|
|
| 261 |
```python
|
| 262 |
|
| 263 |
import os
|
| 264 |
import soundfile as sf
|
| 265 |
from datasets import load_dataset
|
|
|
|
|
|
|
| 266 |
|
| 267 |
language = "Hindi" # Specify the target language here
|
| 268 |
|
| 269 |
# Load Dataset
|
| 270 |
dataset = load_dataset("vdivyasharma/IndicSynth", name=language, split="train")
|
| 271 |
|
| 272 |
-
#
|
| 273 |
output_dir = language
|
| 274 |
-
os.
|
|
|
|
|
|
|
|
|
|
|
|
|
| 275 |
|
| 276 |
# Loop through dataset and save each clip
|
| 277 |
-
for example in dataset:
|
| 278 |
audio_array = example["audio"]["array"]
|
| 279 |
sampling_rate = example["audio"]["sampling_rate"]
|
| 280 |
|
| 281 |
-
#
|
| 282 |
original_name = example.get("file") or example.get("path") or example["audio"]["path"].split("/")[-1]
|
| 283 |
|
| 284 |
-
# Save to
|
| 285 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 286 |
|
| 287 |
```
|
| 288 |
## License
|
|
|
|
| 258 |
## 📦 Access the Dataset
|
| 259 |
|
| 260 |
You can load data in a specific target language using the following code:
|
| 261 |
+
|
| 262 |
```python
|
| 263 |
|
| 264 |
import os
|
| 265 |
import soundfile as sf
|
| 266 |
from datasets import load_dataset
|
| 267 |
+
from tqdm import tqdm
|
| 268 |
+
import pandas as pd
|
| 269 |
|
| 270 |
language = "Hindi" # Specify the target language here
|
| 271 |
|
| 272 |
# Load Dataset
|
| 273 |
dataset = load_dataset("vdivyasharma/IndicSynth", name=language, split="train")
|
| 274 |
|
| 275 |
+
# Create target directory structure
|
| 276 |
output_dir = language
|
| 277 |
+
audio_dir = os.path.join(output_dir, "audio")
|
| 278 |
+
os.makedirs(audio_dir, exist_ok=True)
|
| 279 |
+
|
| 280 |
+
# Store metadata rows here
|
| 281 |
+
metadata_rows = []
|
| 282 |
|
| 283 |
# Loop through dataset and save each clip
|
| 284 |
+
for example in tqdm(dataset):
|
| 285 |
audio_array = example["audio"]["array"]
|
| 286 |
sampling_rate = example["audio"]["sampling_rate"]
|
| 287 |
|
| 288 |
+
# Get filename
|
| 289 |
original_name = example.get("file") or example.get("path") or example["audio"]["path"].split("/")[-1]
|
| 290 |
|
| 291 |
+
# Save audio to audio/ subfolder
|
| 292 |
+
audio_path = os.path.join("audio", original_name) # relative path for metadata
|
| 293 |
+
sf.write(os.path.join(output_dir, audio_path), audio_array, sampling_rate)
|
| 294 |
+
|
| 295 |
+
# Store metadata row
|
| 296 |
+
row = {k: v for k, v in example.items() if k != "audio"}
|
| 297 |
+
row["path"] = audio_path
|
| 298 |
+
metadata_rows.append(row)
|
| 299 |
+
|
| 300 |
+
# Save metadata to CSV
|
| 301 |
+
df = pd.DataFrame(metadata_rows)
|
| 302 |
+
df.to_csv(os.path.join(output_dir, "metadata.csv"), index=False)
|
| 303 |
+
|
| 304 |
|
| 305 |
```
|
| 306 |
## License
|