Alignment-Lab-AI
commited on
Commit
•
5c61e32
1
Parent(s):
f3a62cc
Upload test3.py with huggingface_hub
Browse files
test3.py
ADDED
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import os
|
3 |
+
from helpers import *
|
4 |
+
from faster_whisper import WhisperModel
|
5 |
+
import whisperx
|
6 |
+
import torch
|
7 |
+
from pydub import AudioSegment
|
8 |
+
from nemo.collections.asr.models.msdd_models import NeuralDiarizer
|
9 |
+
import logging
|
10 |
+
import shutil
|
11 |
+
import srt
|
12 |
+
|
13 |
+
mtypes = {"cpu": "int8", "cuda": "float16"}
|
14 |
+
|
15 |
+
# Initialize parser
|
16 |
+
parser = argparse.ArgumentParser()
|
17 |
+
parser.add_argument(
|
18 |
+
"-a", "--audio", help="name of the target audio file", required=True
|
19 |
+
)
|
20 |
+
parser.add_argument(
|
21 |
+
"--no-stem",
|
22 |
+
action="store_false",
|
23 |
+
dest="stemming",
|
24 |
+
default=True,
|
25 |
+
help="Disables source separation. This helps with long files that don't contain a lot of music.",
|
26 |
+
)
|
27 |
+
parser.add_argument(
|
28 |
+
"--suppress_numerals",
|
29 |
+
action="store_true",
|
30 |
+
dest="suppress_numerals",
|
31 |
+
default=False,
|
32 |
+
help="Suppresses Numerical Digits. This helps the diarization accuracy but converts all digits into written text.",
|
33 |
+
)
|
34 |
+
parser.add_argument(
|
35 |
+
"--whisper-model",
|
36 |
+
dest="model_name",
|
37 |
+
default="medium.en",
|
38 |
+
help="name of the Whisper model to use",
|
39 |
+
)
|
40 |
+
parser.add_argument(
|
41 |
+
"--batch-size",
|
42 |
+
type=int,
|
43 |
+
dest="batch_size",
|
44 |
+
default=8,
|
45 |
+
help="Batch size for batched inference, reduce if you run out of memory, set to 0 for non-batched inference",
|
46 |
+
)
|
47 |
+
parser.add_argument(
|
48 |
+
"--language",
|
49 |
+
type=str,
|
50 |
+
default=None,
|
51 |
+
choices=whisper_langs,
|
52 |
+
help="Language spoken in the audio, specify None to perform language detection",
|
53 |
+
)
|
54 |
+
parser.add_argument(
|
55 |
+
"--device",
|
56 |
+
dest="device",
|
57 |
+
default="cuda" if torch.cuda.is_available() else "cpu",
|
58 |
+
help="if you have a GPU use 'cuda', otherwise 'cpu'",
|
59 |
+
)
|
60 |
+
args = parser.parse_args()
|
61 |
+
|
62 |
+
if args.stemming:
|
63 |
+
# Isolate vocals from the rest of the audio
|
64 |
+
return_code = os.system(
|
65 |
+
f'python3 -m demucs.separate -n htdemucs --two-stems=vocals "{args.audio}" -o "temp_outputs"'
|
66 |
+
)
|
67 |
+
if return_code != 0:
|
68 |
+
logging.warning(
|
69 |
+
"Source splitting failed, using original audio file. Use --no-stem argument to disable it."
|
70 |
+
)
|
71 |
+
vocal_target = args.audio
|
72 |
+
else:
|
73 |
+
vocal_target = os.path.join(
|
74 |
+
"temp_outputs",
|
75 |
+
"htdemucs",
|
76 |
+
os.path.splitext(os.path.basename(args.audio))[0],
|
77 |
+
"vocals.wav",
|
78 |
+
)
|
79 |
+
else:
|
80 |
+
vocal_target = args.audio
|
81 |
+
|
82 |
+
# Transcribe the audio file
|
83 |
+
if args.batch_size != 0:
|
84 |
+
from transcription_helpers import transcribe_batched
|
85 |
+
whisper_results, language = transcribe_batched(
|
86 |
+
vocal_target,
|
87 |
+
args.language,
|
88 |
+
args.batch_size,
|
89 |
+
args.model_name,
|
90 |
+
mtypes[args.device],
|
91 |
+
args.suppress_numerals,
|
92 |
+
args.device,
|
93 |
+
)
|
94 |
+
else:
|
95 |
+
from transcription_helpers import transcribe
|
96 |
+
whisper_results, language = transcribe(
|
97 |
+
vocal_target,
|
98 |
+
args.language,
|
99 |
+
args.model_name,
|
100 |
+
mtypes[args.device],
|
101 |
+
args.suppress_numerals,
|
102 |
+
args.device,
|
103 |
+
)
|
104 |
+
|
105 |
+
if language in wav2vec2_langs:
|
106 |
+
alignment_model, metadata = whisperx.load_align_model(
|
107 |
+
language_code=language, device=args.device
|
108 |
+
)
|
109 |
+
result_aligned = whisperx.align(
|
110 |
+
whisper_results, alignment_model, metadata, vocal_target, args.device
|
111 |
+
)
|
112 |
+
word_timestamps = filter_missing_timestamps(
|
113 |
+
result_aligned["word_segments"],
|
114 |
+
initial_timestamp=whisper_results[0].get("start"),
|
115 |
+
final_timestamp=whisper_results[-1].get("end"),
|
116 |
+
)
|
117 |
+
# clear gpu vram
|
118 |
+
del alignment_model
|
119 |
+
torch.cuda.empty_cache()
|
120 |
+
else:
|
121 |
+
assert (
|
122 |
+
args.batch_size == 0 # TODO: add a better check for word timestamps existence
|
123 |
+
), (
|
124 |
+
f"Unsupported language: {language}, use --batch_size to 0"
|
125 |
+
" to generate word timestamps using whisper directly and fix this error."
|
126 |
+
)
|
127 |
+
word_timestamps = []
|
128 |
+
for segment in whisper_results:
|
129 |
+
for word in segment["words"]:
|
130 |
+
word_timestamps.append({"word": word[2], "start": word[0], "end": word[1]})
|
131 |
+
|
132 |
+
|
133 |
+
# convert audio to mono for NeMo compatibility
|
134 |
+
sound = AudioSegment.from_file(vocal_target).set_channels(1)
|
135 |
+
ROOT = os.getcwd()
|
136 |
+
temp_path = os.path.join(ROOT, "temp_outputs")
|
137 |
+
os.makedirs(temp_path, exist_ok=True)
|
138 |
+
sound.export(os.path.join(temp_path, "mono_file.wav"), format="wav")
|
139 |
+
|
140 |
+
# Initialize NeMo MSDD diarization model
|
141 |
+
msdd_model = NeuralDiarizer(cfg=create_config(temp_path)).to(args.device)
|
142 |
+
msdd_model.diarize()
|
143 |
+
del msdd_model
|
144 |
+
torch.cuda.empty_cache()
|
145 |
+
|
146 |
+
# Reading timestamps <> Speaker Labels mapping
|
147 |
+
speaker_ts = []
|
148 |
+
with open(os.path.join(temp_path, "pred_rttms", "mono_file.rttm"), "r") as f:
|
149 |
+
lines = f.readlines()
|
150 |
+
for line in lines:
|
151 |
+
line_list = line.split(" ")
|
152 |
+
s = int(float(line_list[5]) * 1000)
|
153 |
+
e = s + int(float(line_list[8]) * 1000)
|
154 |
+
speaker_ts.append([s, e, int(line_list[11].split("_")[-1])])
|
155 |
+
|
156 |
+
wsm = get_words_speaker_mapping(word_timestamps, speaker_ts, "start")
|
157 |
+
wsm = get_realigned_ws_mapping_with_punctuation(wsm)
|
158 |
+
ssm = get_sentences_speaker_mapping(wsm, speaker_ts)
|
159 |
+
|
160 |
+
# Create the autodiarization directory structure
|
161 |
+
autodiarization_dir = "autodiarization"
|
162 |
+
os.makedirs(autodiarization_dir, exist_ok=True)
|
163 |
+
|
164 |
+
# Get the base name of the audio file
|
165 |
+
base_name = os.path.splitext(os.path.basename(args.audio))[0]
|
166 |
+
|
167 |
+
# Create a subdirectory for the current audio file
|
168 |
+
audio_dir = os.path.join(autodiarization_dir, base_name)
|
169 |
+
os.makedirs(audio_dir, exist_ok=True)
|
170 |
+
|
171 |
+
# Create a dictionary to store speaker-specific metadata
|
172 |
+
speaker_metadata = {}
|
173 |
+
|
174 |
+
# Generate the SRT file
|
175 |
+
srt_file = f"{os.path.splitext(args.audio)[0]}.srt"
|
176 |
+
with open(srt_file, "w", encoding="utf-8") as f:
|
177 |
+
write_srt(ssm, f)
|
178 |
+
|
179 |
+
# Read the generated SRT file
|
180 |
+
with open(srt_file, "r", encoding="utf-8") as f:
|
181 |
+
srt_data = f.read()
|
182 |
+
|
183 |
+
# Parse the SRT data
|
184 |
+
srt_segments = list(srt.parse(srt_data))
|
185 |
+
|
186 |
+
# Process each segment in the SRT data
|
187 |
+
for segment in srt_segments:
|
188 |
+
start_time = segment.start.total_seconds() * 1000
|
189 |
+
end_time = segment.end.total_seconds() * 1000
|
190 |
+
speaker_name, transcript = segment.content.split(": ", 1)
|
191 |
+
|
192 |
+
# Extract the speaker ID from the speaker name
|
193 |
+
speaker_id = int(speaker_name.split(" ")[-1])
|
194 |
+
|
195 |
+
# Split the audio segment
|
196 |
+
segment_audio = sound[start_time:end_time]
|
197 |
+
segment_path = os.path.join(audio_dir, f"speaker_{speaker_id}", f"speaker_{speaker_id}_{segment.index:03d}.wav")
|
198 |
+
os.makedirs(os.path.dirname(segment_path), exist_ok=True)
|
199 |
+
segment_audio.export(segment_path, format="wav")
|
200 |
+
|
201 |
+
# Store the metadata for each speaker
|
202 |
+
if speaker_name not in speaker_metadata:
|
203 |
+
speaker_metadata[speaker_name] = []
|
204 |
+
speaker_metadata[speaker_name].append(f"speaker_{speaker_id}_{segment.index:03d}|{speaker_name}|{transcript}")
|
205 |
+
|
206 |
+
# Write the metadata.csv file for each speaker
|
207 |
+
for speaker_name, metadata in speaker_metadata.items():
|
208 |
+
speaker_id = int(speaker_name.split(" ")[-1])
|
209 |
+
speaker_dir = os.path.join(audio_dir, f"speaker_{speaker_id}")
|
210 |
+
with open(os.path.join(speaker_dir, "metadata.csv"), "w", encoding="utf-8") as f:
|
211 |
+
f.write("\n".join(metadata))
|
212 |
+
|
213 |
+
# Clean up temporary files
|
214 |
+
cleanup(temp_path)
|