aoxo commited on
Commit
97c8cd2
·
verified ·
1 Parent(s): 615e0b3

Upload processing.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. processing.py +108 -0
processing.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from datasets import Dataset, DatasetDict, load_dataset
3
+ from datasets.features import Audio
4
+ import pandas as pd
5
+
6
+ # Function to load your custom dataset
7
+ def load_custom_dataset(data_dir):
8
+ data = {
9
+ "audio": [],
10
+ "text": []
11
+ }
12
+
13
+ wav_dir = os.path.join(data_dir, 'wav')
14
+ txt_dir = os.path.join(data_dir, 'transcription')
15
+
16
+ # Assuming filenames in 'wav' and 'txt' match
17
+ for wav_file in os.listdir(wav_dir):
18
+ if wav_file.endswith('.wav'):
19
+ txt_file = wav_file.replace('.wav', '.txt')
20
+ wav_path = os.path.join(wav_dir, wav_file)
21
+ txt_path = os.path.join(txt_dir, txt_file)
22
+
23
+ # Read the transcription text
24
+ with open(txt_path, 'r', encoding='utf-8') as f:
25
+ transcription = f.read().strip()
26
+
27
+ # Append to the dataset
28
+ data["audio"].append(wav_path)
29
+ data["text"].append(transcription)
30
+
31
+ # Create a pandas dataframe
32
+ df = pd.DataFrame(data)
33
+
34
+ # Convert to a Hugging Face dataset
35
+ dataset = Dataset.from_pandas(df)
36
+
37
+ # Define the audio feature (for .wav files)
38
+ dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000)) # Adjust the sampling rate if needed
39
+
40
+ return dataset
41
+
42
+ # Load your custom dataset
43
+ custom_train_dataset = load_custom_dataset("./")
44
+
45
+ # Load Common Voice test set (Malayalam)
46
+ common_voice_test = load_dataset("mozilla-foundation/common_voice_11_0", "ml", split="test", trust_remote_code=True)
47
+
48
+ common_voice_test = common_voice_test.select_columns(["audio", "sentence"])
49
+
50
+ # Combine them into a DatasetDict
51
+ dataset_dict = DatasetDict({
52
+ "train": custom_train_dataset,
53
+ "test": common_voice_test
54
+ })
55
+
56
+ # Now you have the `dataset_dict` with your custom train set and the Common Voice test set
57
+ print(dataset_dict)
58
+
59
+ from transformers import WhisperFeatureExtractor
60
+
61
+ feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-small")
62
+
63
+ from transformers import WhisperTokenizer
64
+
65
+ tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-small", language="Malayalam", task="transcribe")
66
+
67
+ from transformers import WhisperProcessor
68
+
69
+ processor = WhisperProcessor.from_pretrained("openai/whisper-small", language="Malayalam", task="transcribe")
70
+
71
+ print(dataset_dict['train'][0])
72
+
73
+ import gc # for garbage collection
74
+
75
+ def prepare_dataset(batch):
76
+ # Prepare input features for each audio file in the batch
77
+ audio_arrays = [item["array"] for item in batch["audio"]]
78
+ sampling_rates = [item["sampling_rate"] for item in batch["audio"]]
79
+
80
+ # Extract features for each audio sample
81
+ features = []
82
+ for audio, sr in zip(audio_arrays, sampling_rates):
83
+ feature = feature_extractor(audio, sampling_rate=sr).input_features[0]
84
+ features.append(feature)
85
+
86
+ # Free memory after each feature extraction
87
+ del audio # Remove reference to the audio array
88
+ del sr
89
+ gc.collect() # Trigger garbage collection to free memory
90
+
91
+ # Store features in batch
92
+ batch["input_features"] = features
93
+
94
+ # Encode target text to label ids
95
+ # Consider using a tokenizer with padding strategy (e.g., `padding="max_length"` or `padding="longest"`)
96
+ batch["labels"] = [tokenizer(text).input_ids for text in batch["text"]]
97
+
98
+ return batch
99
+
100
+ # Use Dataset.map to apply the function
101
+ dataset_dict = dataset_dict.map(
102
+ prepare_dataset,
103
+ remove_columns=dataset_dict.column_names["train"],
104
+ batch_size=8, # Process smaller batches
105
+ batched=True, # Enable batched processing
106
+ )
107
+
108
+ dataset_dict.save_to_disk("processed_dataset")