aoxo commited on
Commit
1ddbd4f
1 Parent(s): 5c34293

Upload tokenizer_construct.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. tokenizer_construct.py +185 -0
tokenizer_construct.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import librosa
4
+ from tokenizers import Tokenizer
5
+ from tokenizers.models import WordPiece
6
+ from tokenizers.pre_tokenizers import Whitespace
7
+ from tokenizers.trainers import WordPieceTrainer
8
+ import numpy as np
9
+
10
+ class MalayalamDatasetTokenizer:
11
+ def __init__(self, transcription_dir, wav_dir, vocab_size=16000):
12
+ """
13
+ Initialize tokenizer with directories for transcriptions and audio files
14
+
15
+ :param transcription_dir: Path to folder containing text transcriptions
16
+ :param wav_dir: Path to folder containing WAV audio files
17
+ :param vocab_size: Size of the vocabulary for text tokenization
18
+ """
19
+ self.transcription_dir = transcription_dir
20
+ self.wav_dir = wav_dir
21
+
22
+ # Initialize text tokenizer
23
+ self.text_tokenizer = self._create_text_tokenizer(vocab_size)
24
+
25
+ # Audio tokenization parameters
26
+ self.audio_tokenizer = {
27
+ "sample_rate": 16000, # Standard for speech models
28
+ "n_mfcc": 13, # Number of MFCCs to extract
29
+ "n_fft": 2048, # FFT window size
30
+ "hop_length": 512 # Hop length between frames
31
+ }
32
+
33
+ def _create_text_tokenizer(self, vocab_size):
34
+ """
35
+ Create a WordPiece tokenizer for Malayalam text
36
+ """
37
+ tokenizer = Tokenizer(WordPiece(unk_token="[UNK]"))
38
+ tokenizer.pre_tokenizer = Whitespace()
39
+
40
+ special_tokens = ["[PAD]", "[UNK]", "[CLS]", "[SEP]"]
41
+
42
+ trainer = WordPieceTrainer(
43
+ vocab_size=vocab_size,
44
+ special_tokens=special_tokens
45
+ )
46
+
47
+ return tokenizer
48
+
49
+ def _get_matched_files(self):
50
+ """
51
+ Find matching transcription and audio files
52
+
53
+ :return: List of tuples (transcription_path, audio_path)
54
+ """
55
+ matched_files = []
56
+
57
+ # Get all transcription files
58
+ for trans_file in os.listdir(self.transcription_dir):
59
+ # Remove extension to match with audio file
60
+ base_name = os.path.splitext(trans_file)[0]
61
+
62
+ # Check for corresponding WAV file
63
+ wav_path = os.path.join(self.wav_dir, base_name + '.wav')
64
+ trans_path = os.path.join(self.transcription_dir, trans_file)
65
+
66
+ if os.path.exists(wav_path):
67
+ matched_files.append((trans_path, wav_path))
68
+
69
+ return matched_files
70
+
71
+ def process_dataset(self):
72
+ """
73
+ Process entire dataset, tokenizing text and extracting audio features
74
+
75
+ :return: Processed dataset with tokenized text and audio features
76
+ """
77
+ dataset = []
78
+ matched_files = self._get_matched_files()
79
+
80
+ for trans_path, wav_path in matched_files:
81
+ # Read transcription
82
+ with open(trans_path, 'r', encoding='utf-8') as f:
83
+ transcription = f.read().strip()
84
+
85
+ # Tokenize text
86
+ text_tokens = self.text_tokenizer.encode(transcription).ids
87
+
88
+ # Extract audio features
89
+ audio_features = self._extract_audio_features(wav_path)
90
+
91
+ dataset.append({
92
+ 'transcription': transcription,
93
+ 'text_tokens': text_tokens,
94
+ 'audio_features': audio_features,
95
+ 'audio_path': wav_path,
96
+ 'transcription_path': trans_path
97
+ })
98
+
99
+ return dataset
100
+
101
+ def _extract_audio_features(self, audio_path):
102
+ """
103
+ Extract MFCC features from audio file
104
+
105
+ :param audio_path: Path to WAV file
106
+ :return: Extracted audio features
107
+ """
108
+ # Load audio file
109
+ audio, sr = librosa.load(
110
+ audio_path,
111
+ sr=self.audio_tokenizer['sample_rate']
112
+ )
113
+
114
+ # Extract MFCCs
115
+ mfccs = librosa.feature.mfcc(
116
+ y=audio,
117
+ sr=sr,
118
+ n_mfcc=self.audio_tokenizer['n_mfcc'],
119
+ n_fft=self.audio_tokenizer['n_fft'],
120
+ hop_length=self.audio_tokenizer['hop_length']
121
+ )
122
+
123
+ return mfccs.T.tolist()
124
+
125
+ def train_text_tokenizer(self):
126
+ """
127
+ Train text tokenizer on all transcription files
128
+ """
129
+ # Collect all transcriptions
130
+ transcriptions = []
131
+ for trans_path, _ in self._get_matched_files():
132
+ with open(trans_path, 'r', encoding='utf-8') as f:
133
+ transcriptions.append(f.read().strip())
134
+
135
+ # Train tokenizer
136
+ self.text_tokenizer.train_from_iterator(transcriptions)
137
+
138
+ def save_dataset(self, output_path):
139
+ """
140
+ Save processed dataset to JSON
141
+
142
+ :param output_path: Path to save processed dataset
143
+ """
144
+ dataset = self.process_dataset()
145
+
146
+ with open(output_path, 'w', encoding='utf-8') as f:
147
+ json.dump(dataset, f, ensure_ascii=False, indent=2)
148
+
149
+ print(f"Saved dataset to {output_path}")
150
+
151
+ def save_tokenizer(self, output_dir):
152
+ """
153
+ Save tokenizer configurations
154
+
155
+ :param output_dir: Directory to save tokenizer files
156
+ """
157
+ os.makedirs(output_dir, exist_ok=True)
158
+
159
+ # Save text tokenizer vocabulary
160
+ with open(os.path.join(output_dir, 'text_tokenizer.json'), 'w', encoding='utf-8') as f:
161
+ json.dump({
162
+ 'vocab': self.text_tokenizer.get_vocab(),
163
+ 'model_type': 'WordPiece'
164
+ }, f, ensure_ascii=False, indent=2)
165
+
166
+ # Save audio tokenizer configuration
167
+ with open(os.path.join(output_dir, 'audio_tokenizer.json'), 'w') as f:
168
+ json.dump(self.audio_tokenizer, f, indent=2)
169
+
170
+ # Example usage
171
+ if __name__ == "__main__":
172
+ # Initialize tokenizer
173
+ tokenizer = MalayalamDatasetTokenizer(
174
+ transcription_dir='transcription',
175
+ wav_dir='wav'
176
+ )
177
+
178
+ # Train text tokenizer
179
+ tokenizer.train_text_tokenizer()
180
+
181
+ # Process and save dataset
182
+ # tokenizer.save_dataset('malayalam_dataset.json')
183
+
184
+ # Save tokenizer configurations
185
+ tokenizer.save_tokenizer('malayalam_tokenizer')