File size: 3,634 Bytes
97c8cd2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77c2cb3
97c8cd2
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
import os
from datasets import Dataset, DatasetDict, load_dataset
from datasets.features import Audio
import pandas as pd

# Function to load your custom dataset
def load_custom_dataset(data_dir):
    data = {
        "audio": [],
        "text": []
    }

    wav_dir = os.path.join(data_dir, 'wav')
    txt_dir = os.path.join(data_dir, 'transcription')

        # Assuming filenames in 'wav' and 'txt' match
    for wav_file in os.listdir(wav_dir):
            if wav_file.endswith('.wav'):
                txt_file = wav_file.replace('.wav', '.txt')
                wav_path = os.path.join(wav_dir, wav_file)
                txt_path = os.path.join(txt_dir, txt_file)

                # Read the transcription text
                with open(txt_path, 'r', encoding='utf-8') as f:
                    transcription = f.read().strip()

                # Append to the dataset
                data["audio"].append(wav_path)
                data["text"].append(transcription)

    # Create a pandas dataframe
    df = pd.DataFrame(data)

    # Convert to a Hugging Face dataset
    dataset = Dataset.from_pandas(df)

    # Define the audio feature (for .wav files)
    dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000))  # Adjust the sampling rate if needed

    return dataset

# Load your custom dataset
custom_train_dataset = load_custom_dataset("./")

# Load Common Voice test set (Malayalam)
common_voice_test = load_dataset("mozilla-foundation/common_voice_11_0", "ml", split="test", trust_remote_code=True)

common_voice_test = common_voice_test.select_columns(["audio", "sentence"])

# Combine them into a DatasetDict
dataset_dict = DatasetDict({
    "train": custom_train_dataset,
    "test": common_voice_test
})

# Now you have the `dataset_dict` with your custom train set and the Common Voice test set
print(dataset_dict)

from transformers import WhisperFeatureExtractor

feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-small")

from transformers import WhisperTokenizer

tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-small", language="Malayalam", task="transcribe")

from transformers import WhisperProcessor

processor = WhisperProcessor.from_pretrained("openai/whisper-small", language="Malayalam", task="transcribe")

print(dataset_dict['train'][0])

import gc  # for garbage collection

def prepare_dataset(batch):
    # Prepare input features for each audio file in the batch
    audio_arrays = [item["array"] for item in batch["audio"]]
    sampling_rates = [item["sampling_rate"] for item in batch["audio"]]

    # Extract features for each audio sample
    features = []
    for audio, sr in zip(audio_arrays, sampling_rates):
        feature = feature_extractor(audio, sampling_rate=sr).input_features[0]
        features.append(feature)

        # Free memory after each feature extraction
        del audio  # Remove reference to the audio array
        del sr
        gc.collect()  # Trigger garbage collection to free memory

    # Store features in batch
    batch["input_features"] = features

    # Encode target text to label ids
    # Consider using a tokenizer with padding strategy (e.g., `padding="max_length"` or `padding="longest"`)
    batch["labels"] = tokenizer(batch["text"], padding="longest", truncation=True).input_ids

    return batch

# Use Dataset.map to apply the function
dataset_dict = dataset_dict.map(
    prepare_dataset,
    remove_columns=dataset_dict.column_names["train"],
    batch_size=8,  # Process smaller batches
    batched=True,   # Enable batched processing
)

dataset_dict.save_to_disk("processed_dataset")