dlxj commited on
Commit
8a3de9a
·
1 Parent(s): 29a80af

修复 nemo-toolkit 中的 SIGKILL 问题

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. convert_ja_tar.py +23 -0
  2. how_to_use_cv11.py +0 -30
  3. main.py +41 -0
  4. nemo/README.md +0 -26
  5. nemo/__init__.py +0 -28
  6. nemo/agents/__init__.py +0 -13
  7. nemo/agents/voice_agent/__init__.py +0 -13
  8. nemo/agents/voice_agent/pipecat/__init__.py +0 -18
  9. nemo/agents/voice_agent/pipecat/frames/__init__.py +0 -13
  10. nemo/agents/voice_agent/pipecat/frames/frames.py +0 -26
  11. nemo/agents/voice_agent/pipecat/processors/__init__.py +0 -13
  12. nemo/agents/voice_agent/pipecat/processors/frameworks/__init__.py +0 -13
  13. nemo/agents/voice_agent/pipecat/processors/frameworks/rtvi.py +0 -72
  14. nemo/agents/voice_agent/pipecat/services/__init__.py +0 -13
  15. nemo/agents/voice_agent/pipecat/services/nemo/__init__.py +0 -19
  16. nemo/agents/voice_agent/pipecat/services/nemo/audio_logger.py +0 -844
  17. nemo/agents/voice_agent/pipecat/services/nemo/diar.py +0 -360
  18. nemo/agents/voice_agent/pipecat/services/nemo/llm.py +0 -760
  19. nemo/agents/voice_agent/pipecat/services/nemo/streaming_asr.py +0 -319
  20. nemo/agents/voice_agent/pipecat/services/nemo/streaming_diar.py +0 -212
  21. nemo/agents/voice_agent/pipecat/services/nemo/stt.py +0 -316
  22. nemo/agents/voice_agent/pipecat/services/nemo/tts.py +0 -892
  23. nemo/agents/voice_agent/pipecat/services/nemo/turn_taking.py +0 -441
  24. nemo/agents/voice_agent/pipecat/services/nemo/utils.py +0 -197
  25. nemo/agents/voice_agent/pipecat/transports/__init__.py +0 -13
  26. nemo/agents/voice_agent/pipecat/transports/base_input.py +0 -58
  27. nemo/agents/voice_agent/pipecat/transports/base_transport.py +0 -20
  28. nemo/agents/voice_agent/pipecat/transports/network/__init__.py +0 -13
  29. nemo/agents/voice_agent/pipecat/transports/network/websocket_server.py +0 -304
  30. nemo/agents/voice_agent/pipecat/utils/__init__.py +0 -13
  31. nemo/agents/voice_agent/pipecat/utils/text/__init__.py +0 -13
  32. nemo/agents/voice_agent/pipecat/utils/text/simple_text_aggregator.py +0 -238
  33. nemo/agents/voice_agent/utils/__init__.py +0 -15
  34. nemo/agents/voice_agent/utils/config_manager.py +0 -312
  35. nemo/agents/voice_agent/utils/tool_calling/__init__.py +0 -13
  36. nemo/agents/voice_agent/utils/tool_calling/basic_tools.py +0 -72
  37. nemo/agents/voice_agent/utils/tool_calling/mixins.py +0 -104
  38. nemo/collections/__init__.py +0 -13
  39. nemo/collections/asr/README.md +0 -37
  40. nemo/collections/asr/__init__.py +0 -25
  41. nemo/collections/asr/data/__init__.py +0 -13
  42. nemo/collections/asr/data/audio_to_ctm_dataset.py +0 -95
  43. nemo/collections/asr/data/audio_to_diar_label.py +0 -562
  44. nemo/collections/asr/data/audio_to_diar_label_lhotse.py +0 -114
  45. nemo/collections/asr/data/audio_to_eou_label_lhotse.py +0 -524
  46. nemo/collections/asr/data/audio_to_label.py +0 -1422
  47. nemo/collections/asr/data/audio_to_label_dataset.py +0 -304
  48. nemo/collections/asr/data/audio_to_text.py +0 -1389
  49. nemo/collections/asr/data/audio_to_text_dali.py +0 -777
  50. nemo/collections/asr/data/audio_to_text_dataset.py +0 -997
convert_ja_tar.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+
4
+ # 将 scripts/speech_recognition 添加到 sys.path,以便导入 convert_to_tarred_audio_dataset
5
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "scripts", "speech_recognition")))
6
+
7
+ import convert_to_tarred_audio_dataset
8
+
9
+ def main():
10
+ convert_to_tarred_audio_dataset.create_tar_datasets(
11
+ manifest_path="data/common_voice_11_0/ja/train/train_common_voice_11_0_manifest.json",
12
+ target_dir="data/common_voice_11_0/ja/train_tarred_1bk",
13
+ num_shards=1024,
14
+ max_duration=15.0,
15
+ min_duration=1.0,
16
+ shuffle=True,
17
+ shuffle_seed=1,
18
+ sort_in_shards=True,
19
+ workers=-1
20
+ )
21
+
22
+ if __name__ == "__main__":
23
+ main()
how_to_use_cv11.py DELETED
@@ -1,30 +0,0 @@
1
- # Load the dataset (locally)
2
-
3
- from datasets import load_dataset
4
-
5
- cv_11 = load_dataset("mozilla-foundation/common_voice_11_0", "hi", split="train")
6
-
7
- # Stream the dataset
8
-
9
- from datasets import load_dataset
10
-
11
- cv_11 = load_dataset("mozilla-foundation/common_voice_11_0", "hi", split="train", streaming=True)
12
-
13
- print(next(iter(cv_11)))
14
-
15
- # Create a PyTorch dataloader
16
-
17
- from datasets import load_dataset
18
- from torch.utils.data.sampler import BatchSampler, RandomSampler
19
-
20
- cv_11 = load_dataset("mozilla-foundation/common_voice_11_0", "hi", split="train")
21
- batch_sampler = BatchSampler(RandomSampler(cv_11), batch_size=32, drop_last=False)
22
- dataloader = DataLoader(cv_11, batch_sampler=batch_sampler)
23
-
24
- # Create a streaming PyTorch dataloader
25
-
26
- from datasets import load_dataset
27
- from torch.utils.data import DataLoader
28
-
29
- cv_11 = load_dataset("mozilla-foundation/common_voice_11_0", "hi", split="train")
30
- dataloader = DataLoader(cv_11, batch_size=32)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
main.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ def clear_text():
3
+ """
4
+ 好像不需要
5
+ """
6
+ import json
7
+ from tqdm import tqdm
8
+
9
+ # dev_manifest = f"{YOUR_DATA_ROOT}/validation/validation_mozilla-foundation_common_voice_11_0_manifest.json"
10
+ # test_manifest = f"{YOUR_DATA_ROOT}/test/test_mozilla-foundation_common_voice_11_0_manifest.json"
11
+ train_manifest = f"data/common_voice_11_0/ja/train/train_common_voice_11_0_manifest.json"
12
+
13
+ def compute_char_counts(manifest):
14
+ char_counts = {}
15
+ with open(manifest, 'r') as fn_in:
16
+ for line in tqdm(fn_in, desc="Compute counts.."):
17
+ line = line.replace("\n", "")
18
+ data = json.loads(line)
19
+ text = data["text"]
20
+ for word in text.split():
21
+ for char in word:
22
+ if char not in char_counts:
23
+ char_counts[char] = 1
24
+ else:
25
+ char_counts[char] += 1
26
+ return char_counts
27
+
28
+ char_counts = compute_char_counts(train_manifest)
29
+
30
+ threshold = 10
31
+ trash_char_list = []
32
+
33
+ for char in char_counts:
34
+ if char_counts[char] <= threshold:
35
+ trash_char_list.append(char)
36
+
37
+ print(trash_char_list)
38
+
39
+ if __name__ == "__main__":
40
+ # clear_text()
41
+ pass
nemo/README.md DELETED
@@ -1,26 +0,0 @@
1
- NeMo (**Ne**ural **Mo**dules) is a toolkit for creating AI applications built around **neural modules**, conceptual blocks of neural networks that take *typed* inputs and produce *typed* outputs.
2
-
3
- ## **collections/**
4
- * **ASR** - Collection of modules and models for building speech recognition networks.
5
- * **TTS** - Collection of modules and models for building speech synthesis networks.
6
- * **Audio** - Collection of modules and models for building audio processing networks.
7
- * **SpeechLM2** - Collection of modules and models for building multimodal LLM.
8
-
9
- ## **core/**
10
- Provides fundamental APIs and utilities for NeMo modules, including:
11
- - **Classes** - Base classes for datasets, models, and losses.
12
- - **Config** - Configuration management utilities.
13
- - **Neural Types** - Typed inputs/outputs for module interaction.
14
- - **Optim** - Optimizers and learning rate schedulers.
15
-
16
- ## **lightning/**
17
- Integration with PyTorch Lightning for training and distributed execution:
18
- - **Strategies & Plugins** - Custom Lightning strategies.
19
- - **Fabric** - Lightweight wrapper for model training.
20
- - **Checkpointing & Logging** - Utilities for managing model states.
21
-
22
- ## **utils/**
23
- General utilities for debugging, distributed training, logging, and model management:
24
- - **callbacks/** - Hooks for training processes.
25
- - **loggers/** - Logging utilities for different backends.
26
- - **debugging & profiling** - Performance monitoring tools.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/__init__.py DELETED
@@ -1,28 +0,0 @@
1
- # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
-
16
- from nemo.package_info import (
17
- __contact_emails__,
18
- __contact_names__,
19
- __description__,
20
- __download_url__,
21
- __homepage__,
22
- __keywords__,
23
- __license__,
24
- __package_name__,
25
- __repository_url__,
26
- __shortversion__,
27
- __version__,
28
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/__init__.py DELETED
@@ -1,13 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/__init__.py DELETED
@@ -1,13 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/pipecat/__init__.py DELETED
@@ -1,18 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- try:
16
- import pipecat
17
- except ImportError:
18
- raise ImportError("pipecat is not installed. Please install it with `pip install pipecat-ai`.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/pipecat/frames/__init__.py DELETED
@@ -1,13 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/pipecat/frames/frames.py DELETED
@@ -1,26 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
-
16
- from dataclasses import dataclass
17
- import numpy as np
18
- from pipecat.frames.frames import DataFrame
19
-
20
-
21
- @dataclass
22
- class DiarResultFrame(DataFrame):
23
- """Diarization frame."""
24
-
25
- diar_result: np.ndarray | int
26
- stream_id: str = "default"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/pipecat/processors/__init__.py DELETED
@@ -1,13 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/pipecat/processors/frameworks/__init__.py DELETED
@@ -1,13 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/pipecat/processors/frameworks/rtvi.py DELETED
@@ -1,72 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
-
16
- from loguru import logger
17
- from pipecat.frames.frames import Frame, LLMFullResponseEndFrame, LLMFullResponseStartFrame, TTSTextFrame
18
- from pipecat.observers.base_observer import FramePushed
19
- from pipecat.processors.frameworks.rtvi import (
20
- RTVIBotLLMStartedMessage,
21
- RTVIBotLLMStoppedMessage,
22
- RTVIBotTranscriptionMessage,
23
- RTVIBotTTSTextMessage,
24
- )
25
- from pipecat.processors.frameworks.rtvi import RTVIObserver as _RTVIObserver
26
- from pipecat.processors.frameworks.rtvi import RTVIProcessor, RTVITextMessageData
27
- from pipecat.transports.base_output import BaseOutputTransport
28
-
29
-
30
- class RTVIObserver(_RTVIObserver):
31
- """
32
- An observer that processes RTVI frames and pushes them to the transport.
33
- """
34
-
35
- def __init__(self, rtvi: RTVIProcessor, *args, **kwargs):
36
- super().__init__(rtvi, *args, **kwargs)
37
-
38
- async def on_push_frame(self, data: FramePushed):
39
- """Process a frame being pushed through the pipeline.
40
-
41
- Args:
42
- data: Frame push event data containing source, frame, direction, and timestamp.
43
- """
44
- src = data.source
45
- frame: Frame = data.frame
46
-
47
- if frame.id in self._frames_seen:
48
- return
49
-
50
- if not self._params.bot_llm_enabled:
51
- if isinstance(frame, LLMFullResponseStartFrame):
52
- await self.send_rtvi_message(RTVIBotLLMStartedMessage())
53
- self._frames_seen.add(frame.id)
54
- elif isinstance(frame, LLMFullResponseEndFrame):
55
- await self.send_rtvi_message(RTVIBotLLMStoppedMessage())
56
- self._frames_seen.add(frame.id)
57
- elif isinstance(frame, TTSTextFrame) and isinstance(src, BaseOutputTransport):
58
- message = RTVIBotTTSTextMessage(data=RTVITextMessageData(text=frame.text))
59
- await self.send_rtvi_message(message)
60
- await self._push_bot_transcription(frame.text)
61
- self._frames_seen.add(frame.id)
62
- else:
63
- await super().on_push_frame(data)
64
- else:
65
- await super().on_push_frame(data)
66
-
67
- async def _push_bot_transcription(self, text: str):
68
- """Push accumulated bot transcription as a message."""
69
- if len(text.strip()) > 0:
70
- message = RTVIBotTranscriptionMessage(data=RTVITextMessageData(text=text))
71
- logger.debug(f"Pushing bot transcription: `{text}`")
72
- await self.send_rtvi_message(message)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/pipecat/services/__init__.py DELETED
@@ -1,13 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/pipecat/services/nemo/__init__.py DELETED
@@ -1,19 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- from .diar import NemoDiarService
16
- from .llm import HuggingFaceLLMService
17
- from .stt import NemoSTTService
18
- from .tts import NeMoFastPitchHiFiGANTTSService
19
- from .turn_taking import NeMoTurnTakingService
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/pipecat/services/nemo/audio_logger.py DELETED
@@ -1,844 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import json
16
- import threading
17
- import wave
18
- from datetime import datetime
19
- from pathlib import Path
20
- from typing import Optional, Union
21
-
22
- import librosa
23
- import numpy as np
24
- from loguru import logger
25
- from pipecat.frames.frames import TranscriptionFrame
26
- from pipecat.observers.base_observer import BaseObserver, FramePushed
27
-
28
-
29
- class AudioLogger:
30
- """
31
- Utility class for logging audio data and transcriptions during voice agent interactions.
32
-
33
- This logger saves:
34
- - Audio files in WAV format
35
- - Transcriptions with metadata in JSON format
36
- - Session information and metadata
37
-
38
- File structure:
39
- log_dir/
40
- ├── session_YYYYMMDD_HHMMSS/
41
- │ ├── user/
42
- │ │ ├── 00001_HHMMSS.wav
43
- │ │ ├── 00001_HHMMSS.json
44
- │ │ ├── 00002_HHMMSS.wav
45
- │ │ └── 00002_HHMMSS.json
46
- │ ├── agent/
47
- │ │ ├── 00001_HHMMSS.wav
48
- │ │ ├── 00001_HHMMSS.json
49
- │ └── session_metadata.json
50
-
51
- Args:
52
- log_dir: Base directory for storing logs (default: "./audio_logs")
53
- session_id: Optional custom session ID. If None, auto-generated from timestamp
54
- enabled: Whether logging is enabled (default: True)
55
-
56
- # 12/19/2025 Note: Stereo conversation recording is implemented,
57
- # but -0.8 seconds offset needs to be applied to make the session sound synced.
58
- """
59
-
60
- def __init__(
61
- self,
62
- log_dir: Union[str, Path] = "./audio_logs",
63
- session_id: Optional[str] = None,
64
- enabled: bool = True,
65
- user_audio_sample_rate: int = 16000,
66
- pre_roll_time_sec: float = 0.8,
67
- round_precision: int = 2,
68
- ):
69
- self.enabled = enabled
70
- if not self.enabled:
71
- logger.info("[AudioLogger] AudioLogger is disabled")
72
- return
73
-
74
- self.log_dir = Path(log_dir)
75
-
76
- # Generate session ID if not provided
77
- self.session_start_time = datetime.now()
78
- if session_id is None:
79
- session_id = f"session_{self.session_start_time.strftime('%Y%m%d_%H%M%S')}"
80
- self.first_audio_timestamp = None
81
- self.session_id = session_id
82
- self.session_dir = self.log_dir / session_id
83
-
84
- # Create directories
85
- self.user_dir = self.session_dir / "user"
86
- self.agent_dir = self.session_dir / "agent"
87
-
88
- self.user_dir.mkdir(parents=True, exist_ok=True)
89
- self.agent_dir.mkdir(parents=True, exist_ok=True)
90
-
91
- # Counters for file naming (thread-safe)
92
- self._user_counter = 0
93
- self._agent_counter = 0
94
- self._turn_index = 0 # Turn index for conversation turns
95
- self._current_speaker = None # Track current speaker for turn transitions
96
- self._agent_turn_start_time = None # Captured when BotStartedSpeakingFrame is received
97
- self._lock = threading.Lock()
98
- self.staged_metadata = None
99
- self._staged_audio_data = None
100
- self._pre_roll_time_sec = pre_roll_time_sec
101
- self._round_precision = round_precision
102
-
103
- self.turn_audio_buffer = []
104
- self.continuous_user_audio_buffer = []
105
- self.turn_transcription_buffer = []
106
-
107
- # Stereo conversation recording (left=agent, right=user)
108
- self._stereo_conversation_filename = "conversation_stereo.wav"
109
- self._stereo_conversation_file = self.session_dir / self._stereo_conversation_filename
110
- self._stereo_sample_rate = user_audio_sample_rate # Use user audio sample rate (downsample agent audio)
111
- self._stereo_audio_buffer_left: list = [] # Agent audio (left channel)
112
- self._stereo_audio_buffer_right: list = [] # User audio (right channel)
113
-
114
- # Session metadata
115
- # agent_entries is a list of lists: each sublist contains segments for one turn
116
- # e.g., [[seg1, seg2, seg3], [seg4, seg5], ...] where each [] is a turn
117
- self.session_metadata = {
118
- "session_id": session_id,
119
- "start_time": self.session_start_time.isoformat(),
120
- "user_entries": [],
121
- "agent_entries": [], # List of turns, each turn is a list of segments
122
- }
123
-
124
- logger.info(f"[AudioLogger] AudioLogger initialized: {self.session_dir}")
125
-
126
- def append_continuous_user_audio(self, audio_data: bytes):
127
- """
128
- Append audio data to the continuous user audio buffer for stereo conversation.
129
-
130
- This method should be called for EVERY audio frame received from the user,
131
- regardless of VAD state, to record the complete conversation audio.
132
-
133
- Args:
134
- audio_data: Raw audio data as bytes
135
- """
136
- if not self.enabled:
137
- return
138
-
139
- self.continuous_user_audio_buffer.append(audio_data)
140
-
141
- def _resample_audio(
142
- self,
143
- audio_data: Union[bytes, np.ndarray],
144
- orig_sr: int,
145
- target_sr: int,
146
- ) -> np.ndarray:
147
- """
148
- Resample audio data to a target sample rate using librosa.
149
-
150
- Args:
151
- audio_data: Audio data as bytes (int16) or numpy array
152
- orig_sr: Original sample rate
153
- target_sr: Target sample rate
154
-
155
- Returns:
156
- Resampled audio as numpy array (float32)
157
- """
158
- # Convert bytes to numpy array if needed
159
- if isinstance(audio_data, bytes):
160
- audio_array = np.frombuffer(audio_data, dtype=np.int16).astype(np.float32) / 32768.0
161
- elif audio_data.dtype == np.int16:
162
- audio_array = audio_data.astype(np.float32) / 32768.0
163
- else:
164
- audio_array = audio_data.astype(np.float32)
165
-
166
- # Resample if needed
167
- if orig_sr != target_sr:
168
- audio_array = librosa.resample(audio_array, orig_sr=orig_sr, target_sr=target_sr)
169
-
170
- return audio_array
171
-
172
- def _append_to_stereo_conversation(
173
- self,
174
- audio_data: Union[bytes, np.ndarray],
175
- channel: str,
176
- start_time: float,
177
- sample_rate: int,
178
- ):
179
- """
180
- Append audio to the stereo conversation buffer at the correct time position.
181
-
182
- Args:
183
- audio_data: Audio data as bytes or numpy array
184
- channel: "left" for agent, "right" for user
185
- start_time: Start time in seconds from session start
186
- sample_rate: Sample rate of the input audio
187
- """
188
- if not self.enabled:
189
- return
190
-
191
- try:
192
- # Resample to stereo sample rate if needed
193
- audio_float = self._resample_audio(audio_data, sample_rate, self._stereo_sample_rate)
194
-
195
- # Calculate the sample position for this audio
196
- start_sample = int(start_time * self._stereo_sample_rate)
197
-
198
- # Get the appropriate buffer
199
- if channel == "left":
200
- buffer = self._stereo_audio_buffer_left
201
- else:
202
- buffer = self._stereo_audio_buffer_right
203
-
204
- # Extend buffer with zeros if needed to reach start position
205
- current_length = len(buffer)
206
- if start_sample > current_length:
207
- buffer.extend([0.0] * (start_sample - current_length))
208
-
209
- # Append or overwrite audio samples
210
- for i, sample in enumerate(audio_float):
211
- pos = start_sample + i
212
- if pos < len(buffer):
213
- # Mix with existing audio (in case of overlap)
214
- buffer[pos] = np.clip(buffer[pos] + sample, -1.0, 1.0)
215
- else:
216
- buffer.append(sample)
217
-
218
- logger.debug(
219
- f"[AudioLogger] Appended {len(audio_float)} samples to {channel} channel "
220
- f"at position {start_sample} (buffer now {len(buffer)} samples)"
221
- )
222
-
223
- except Exception as e:
224
- logger.error(f"[AudioLogger] Error appending to stereo conversation: {e}")
225
-
226
- def save_stereo_conversation(self):
227
- """
228
- Save the stereo conversation buffer to a WAV file.
229
- Left channel = Agent, Right channel = User.
230
-
231
- User audio comes from continuous_user_audio_buffer (not affected by VAD).
232
- """
233
- if not self.enabled:
234
- return
235
-
236
- if not self._stereo_audio_buffer_left and not self.continuous_user_audio_buffer:
237
- logger.warning("[AudioLogger] No stereo conversation audio to save")
238
- return
239
-
240
- try:
241
- # Build right channel (user) from continuous buffer
242
- # This is raw bytes at user sample rate, no resampling needed since stereo uses user sample rate
243
- if self.continuous_user_audio_buffer:
244
- continuous_audio_bytes = b"".join(self.continuous_user_audio_buffer)
245
- right_array = np.frombuffer(continuous_audio_bytes, dtype=np.int16).astype(np.float32) / 32768.0
246
- else:
247
- right_array = np.array([], dtype=np.float32)
248
-
249
- left_array = np.array(self._stereo_audio_buffer_left, dtype=np.float32)
250
-
251
- # Pad the shorter buffer with zeros
252
- max_length = max(len(left_array), len(right_array))
253
-
254
- # Pad to same length
255
- if len(left_array) < max_length:
256
- left_array = np.pad(left_array, (0, max_length - len(left_array)))
257
- if len(right_array) < max_length:
258
- right_array = np.pad(right_array, (0, max_length - len(right_array)))
259
-
260
- # Create stereo array (interleaved: L, R, L, R, ...)
261
- stereo_array = np.column_stack((left_array, right_array))
262
-
263
- # Convert to int16
264
- stereo_int16 = (stereo_array * 32767).astype(np.int16)
265
-
266
- # Save as WAV
267
- with wave.open(str(self._stereo_conversation_file), 'wb') as wav_file: # type: ignore[union-attr]
268
- wav_file.setnchannels(2) # Stereo
269
- wav_file.setsampwidth(2) # 16-bit
270
- wav_file.setframerate(self._stereo_sample_rate)
271
- wav_file.writeframes(stereo_int16.tobytes())
272
-
273
- duration_sec = max_length / self._stereo_sample_rate
274
- logger.info(
275
- f"[AudioLogger] Saved stereo conversation: {self._stereo_conversation_file} "
276
- f"({duration_sec:.2f} seconds, {max_length} samples)"
277
- )
278
-
279
- except Exception as e:
280
- logger.error(f"[AudioLogger] Error saving stereo conversation: {e}")
281
-
282
- def get_time_from_start_of_session(self, timestamp: datetime = None) -> float:
283
- """Get the time from the start of the session to the given datetime string."""
284
- # get the time difference in seconds.
285
- if self.first_audio_timestamp is None:
286
- raise ValueError("First audio timestamp is not set. Aborting time calculation.")
287
- time_diff = (timestamp if timestamp else datetime.now()) - self.first_audio_timestamp
288
- return time_diff.total_seconds()
289
-
290
- def _get_next_counter(self, speaker: str) -> int:
291
- """Get the next counter value for a speaker in a thread-safe manner."""
292
- with self._lock:
293
- if speaker == "user":
294
- self._user_counter += 1
295
- return self._user_counter
296
- else:
297
- self._agent_counter += 1
298
- return self._agent_counter
299
-
300
- def increment_turn_index(self, speaker: str = None) -> int:
301
- """
302
- Increment the turn index if the speaker has changed.
303
-
304
- Args:
305
- speaker: "user" or "agent". If provided, only increments
306
- if this is different from the current speaker.
307
- If None, always increments.
308
-
309
- Returns:
310
- The current turn index after any increment.
311
- """
312
- with self._lock:
313
- if speaker is None:
314
- # Always increment if no speaker specified
315
- self._turn_index += 1
316
- logger.debug(f"[AudioLogger] Turn index incremented to {self._turn_index}")
317
- elif speaker != self._current_speaker:
318
- # Only increment if speaker changed
319
- self._current_speaker = speaker
320
- self._turn_index += 1
321
- # Reset agent turn start time when speaker changes
322
- if speaker == "agent":
323
- self._agent_turn_start_time = None
324
- logger.debug(
325
- f"[AudioLogger] Speaker changed to {speaker}, turn index incremented to {self._turn_index}"
326
- )
327
- # else: same speaker, no increment
328
- return self._turn_index
329
-
330
- def set_agent_turn_start_time(self):
331
- """
332
- Set the start time for the current agent turn.
333
-
334
- This should be called when BotStartedSpeakingFrame is received,
335
- which indicates the audio is actually starting to play (not just generated).
336
- This provides more accurate timing than capturing time during TTS generation.
337
- """
338
- if not self.enabled:
339
- return
340
-
341
- # Only set if not already set for this turn
342
- if self._agent_turn_start_time is None:
343
- self._agent_turn_start_time = self.get_time_from_start_of_session()
344
- logger.debug(f"[AudioLogger] Agent turn start time set to {self._agent_turn_start_time:.3f}s")
345
-
346
- def _save_audio_wav(
347
- self,
348
- audio_data: Union[bytes, np.ndarray],
349
- file_path: Path,
350
- sample_rate: int,
351
- num_channels: int = 1,
352
- ):
353
- """
354
- Save audio data to a WAV file.
355
-
356
- Args:
357
- audio_data: Audio data as bytes or numpy array
358
- file_path: Path to save the WAV file
359
- sample_rate: Audio sample rate in Hz
360
- num_channels: Number of audio channels (default: 1)
361
- """
362
- try:
363
- # Convert audio data to bytes if it's a numpy array
364
- if isinstance(audio_data, np.ndarray):
365
- if audio_data.dtype in [np.float32, np.float64]:
366
- # Convert float [-1, 1] to int16 [-32768, 32767]
367
- audio_data = np.clip(audio_data, -1.0, 1.0)
368
- audio_data = (audio_data * 32767).astype(np.int16)
369
- elif audio_data.dtype != np.int16:
370
- audio_data = audio_data.astype(np.int16)
371
- audio_bytes = audio_data.tobytes()
372
- else:
373
- audio_bytes = audio_data
374
-
375
- # Write WAV file
376
- with wave.open(str(file_path), 'wb') as wav_file: # type: ignore[union-attr]
377
- wav_file.setnchannels(num_channels)
378
- wav_file.setsampwidth(2) # 16-bit audio
379
- wav_file.setframerate(sample_rate)
380
- wav_file.writeframes(audio_bytes)
381
-
382
- logger.debug(f"[AudioLogger] Saved audio to {file_path}")
383
- except Exception as e:
384
- logger.error(f"[AudioLogger] Error saving audio to {file_path}: {e}")
385
- raise
386
-
387
- def _save_metadata_json(self, metadata: dict, file_path: Path):
388
- """Save metadata to a JSON file."""
389
- try:
390
- with open(file_path, 'w', encoding='utf-8') as f:
391
- json.dump(metadata, f, indent=2, ensure_ascii=False)
392
- logger.debug(f"[AudioLogger] Saved metadata to {file_path}")
393
- except Exception as e:
394
- logger.error(f"[AudioLogger] Error saving metadata to {file_path}: {e}")
395
- raise
396
-
397
- def clear_user_audio_buffer(self):
398
- """
399
- Clear the user audio buffer if the user stopped speaking detected by VAD.
400
- """
401
- # Clear turn buffers if logging wasn't completed (e.g., no final transcription)
402
- if len(self.turn_audio_buffer) > 0 or len(self.turn_transcription_buffer) > 0:
403
- logger.debug(
404
- "[AudioLogger] Clearing turn audio and transcription buffers due to VAD user stopped speaking"
405
- )
406
- self.turn_audio_buffer = []
407
- self.turn_transcription_buffer = []
408
-
409
- def stage_user_audio(
410
- self,
411
- timestamp_now: datetime,
412
- transcription: str,
413
- sample_rate: int = 16000,
414
- num_channels: int = 1,
415
- is_first_frame: bool = False,
416
- is_backchannel: bool = False,
417
- additional_metadata: Optional[dict] = None,
418
- ) -> Optional[dict]:
419
- """
420
- Stage user audio metadata and transcription (from STT).
421
- This data will be saved when the turn is complete by `save_user_audio` method.
422
- Audio data is retrieved from continuous_user_audio_buffer based on timestamps.
423
-
424
- Args:
425
- timestamp_now: Timestamp when the audio was received
426
- transcription: Transcribed text
427
- sample_rate: Audio sample rate in Hz (default: 16000)
428
- num_channels: Number of audio channels (default: 1)
429
- is_first_frame: Whether this is the first frame of a turn (default: False)
430
- is_backchannel: Whether this is a backchannel utterance (default: False)
431
- additional_metadata: Additional metadata to include
432
-
433
- Returns:
434
- Dictionary with logged file paths, or None if logging is disabled
435
- """
436
- if not self.enabled:
437
- return None
438
-
439
- try:
440
- # Get counter and generate filenames
441
- counter = self._get_next_counter("user")
442
- # timestamp_now = datetime.now()
443
- base_name = f"{counter:05d}_{timestamp_now.strftime('%H%M%S')}"
444
-
445
- audio_file = self.user_dir / f"{base_name}.wav"
446
- metadata_file = self.user_dir / f"{base_name}.json"
447
-
448
- if is_first_frame or self.staged_metadata is None or "start_time" not in self.staged_metadata:
449
- raw_start_time = self.get_time_from_start_of_session(timestamp=timestamp_now)
450
- # Apply pre-roll: go back pre_roll_time_sec, but don't go before the last entry's end time
451
- pre_roll_start = raw_start_time - self._pre_roll_time_sec
452
- if self.session_metadata["user_entries"]:
453
- last_entry_end_time = self.session_metadata["user_entries"][-1]["end_time"]
454
- _start_time = max(pre_roll_start, last_entry_end_time)
455
- else:
456
- # No previous entries, just ensure we don't go negative
457
- _start_time = max(pre_roll_start, 0.0)
458
- else:
459
- # start_time is stored as float (seconds from session start), not ISO string
460
- _start_time = self.staged_metadata["start_time"]
461
-
462
- # Make end time into float (seconds from session start)
463
- _end_time = self.get_time_from_start_of_session(timestamp=datetime.now())
464
- audio_duration_sec = round(_end_time - _start_time, self._round_precision)
465
-
466
- # Prepare metadata (initialize if None to allow update)
467
- if self.staged_metadata is None:
468
- self.staged_metadata = {}
469
- self.staged_metadata.update(
470
- {
471
- "base_name": base_name,
472
- "counter": counter,
473
- "turn_index": self._turn_index,
474
- "speaker": "user",
475
- "timestamp": timestamp_now.isoformat(),
476
- "start_time": _start_time,
477
- "end_time": _end_time,
478
- "transcription": transcription,
479
- "audio_file": audio_file.name,
480
- "sample_rate": sample_rate,
481
- "num_channels": num_channels,
482
- "audio_duration_sec": audio_duration_sec,
483
- "is_backchannel": is_backchannel,
484
- }
485
- )
486
-
487
- if additional_metadata:
488
- self.staged_metadata.update(additional_metadata)
489
-
490
- return {
491
- "audio_file": str(audio_file),
492
- "metadata_file": str(metadata_file),
493
- "counter": counter,
494
- }
495
-
496
- except Exception as e:
497
- logger.error(f"Error logging user audio: {e}")
498
- return None
499
-
500
- def stage_turn_audio_and_transcription(
501
- self,
502
- timestamp_now: datetime,
503
- is_first_frame: bool = False,
504
- additional_metadata: Optional[dict] = None,
505
- ):
506
- """
507
- Stage the complete turn audio and accumulated transcriptions.
508
-
509
- This method is called when a final transcription is received.
510
- It joins all accumulated audio and transcription chunks and stages them together.
511
-
512
- Args:
513
- timestamp_now: Timestamp when the audio was received
514
- is_first_frame: Whether this is the first frame of a turn (default: False)
515
- additional_metadata: Additional metadata to include (e.g., model, backend info)
516
- """
517
- if not self.turn_audio_buffer or not self.turn_transcription_buffer:
518
- logger.debug("[AudioLogger] No audio or transcription to stage")
519
- return
520
-
521
- try:
522
- complete_transcription = "".join(self.turn_transcription_buffer)
523
-
524
- logger.debug(
525
- f"[AudioLogger] Staging a turn with: {len(self.turn_audio_buffer)} audio chunks, "
526
- f"{len(self.turn_transcription_buffer)} transcription chunks"
527
- )
528
-
529
- metadata = {
530
- "num_transcription_chunks": len(self.turn_transcription_buffer),
531
- "num_audio_chunks": len(self.turn_audio_buffer),
532
- }
533
- if additional_metadata:
534
- metadata.update(additional_metadata)
535
-
536
- self.stage_user_audio(
537
- timestamp_now=timestamp_now,
538
- transcription=complete_transcription,
539
- sample_rate=self._stereo_sample_rate,
540
- num_channels=1,
541
- is_first_frame=is_first_frame,
542
- additional_metadata=metadata,
543
- )
544
-
545
- logger.info(
546
- f"[AudioLogger] Staged the audio and transcription for turn: '{complete_transcription[:50]}...'"
547
- )
548
-
549
- except Exception as e:
550
- logger.warning(f"[AudioLogger] Failed to stage user audio: {e}")
551
-
552
- def save_user_audio(self, is_backchannel: bool = False, float_divisor: float = 32768.0):
553
- """Save the user audio to the disk.
554
-
555
- Args:
556
- is_backchannel: Whether this audio is a backchannel utterance (default: False)
557
- """
558
- # Safety check: ensure staged metadata exists and has required fields
559
- if self.staged_metadata is None or "base_name" not in self.staged_metadata:
560
- # This is expected - multiple TranscriptionFrames may be pushed but only one has audio staged
561
- logger.debug("[AudioLogger] No staged metadata to save (this is normal for multiple frame pushes)")
562
- return
563
-
564
- try:
565
- # Add backchannel metadata (only set if not already True to preserve turn-taking detection)
566
- if is_backchannel or not self.staged_metadata.get("is_backchannel", False):
567
- self.staged_metadata["is_backchannel"] = is_backchannel
568
-
569
- audio_file = self.user_dir / f"{self.staged_metadata['base_name']}.wav"
570
- metadata_file = self.user_dir / f"{self.staged_metadata['base_name']}.json"
571
-
572
- # Get the audio data from continuous user audio buffer
573
- stt, end = self.staged_metadata["start_time"], self.staged_metadata["end_time"]
574
- continuous_audio_bytes = b"".join(self.continuous_user_audio_buffer)
575
- full_audio_array = np.frombuffer(continuous_audio_bytes, dtype=np.int16).astype(np.float32) / float_divisor
576
- start_idx = int(stt * self._stereo_sample_rate)
577
- end_idx = int(end * self._stereo_sample_rate)
578
- staged_audio_data = full_audio_array[start_idx:end_idx]
579
-
580
- self._save_audio_wav(
581
- audio_data=staged_audio_data,
582
- file_path=audio_file,
583
- sample_rate=self.staged_metadata["sample_rate"],
584
- )
585
-
586
- self._save_metadata_json(metadata=self.staged_metadata, file_path=metadata_file)
587
- backchannel_label = " [BACKCHANNEL]" if is_backchannel else ""
588
- transcription_preview = self.staged_metadata['transcription'][:50]
589
- ellipsis = '...' if len(self.staged_metadata['transcription']) > 50 else ''
590
- logger.info(
591
- f"[AudioLogger] Saved user audio #{self.staged_metadata['counter']}"
592
- f"{backchannel_label}: '{transcription_preview}{ellipsis}'"
593
- )
594
-
595
- # Note: User audio for stereo conversation is handled via continuous_user_audio_buffer
596
- # which is populated in append_continuous_user_audio() (not affected by VAD)
597
-
598
- # Update session metadata
599
- with self._lock:
600
- self.session_metadata["user_entries"].append(self.staged_metadata)
601
- self._save_session_metadata()
602
-
603
- self.clear_user_audio_buffer()
604
-
605
- # Clear staged data after successful save
606
- self.staged_metadata = None
607
- self._staged_audio_data = None
608
- except Exception as e:
609
- logger.error(f"[AudioLogger] Error saving user audio: {e}")
610
- raise
611
-
612
- def log_agent_audio(
613
- self,
614
- audio_data: Union[bytes, np.ndarray],
615
- text: str,
616
- sample_rate: int = 22050,
617
- num_channels: int = 1,
618
- additional_metadata: Optional[dict] = None,
619
- tts_generation_time: Optional[float] = None,
620
- ) -> Optional[dict]:
621
- """
622
- Log agent audio and text (from TTS).
623
-
624
- Args:
625
- audio_data: Generated audio data as bytes or numpy array
626
- text: Input text that was synthesized
627
- sample_rate: Audio sample rate in Hz (default: 22050)
628
- num_channels: Number of audio channels (default: 1)
629
- additional_metadata: Additional metadata to include
630
- tts_generation_time: Time when TTS generation started (seconds from session start).
631
- Used to calculate actual start_time for first segment of a turn.
632
-
633
- Returns:
634
- Dictionary with logged file paths, or None if logging is disabled
635
- """
636
- if not self.enabled:
637
- return None
638
-
639
- try:
640
- # Get counter and generate filenames
641
- counter = self._get_next_counter("agent")
642
- timestamp_now = datetime.now()
643
- base_name = f"{counter:05d}_{timestamp_now.strftime('%H%M%S')}"
644
-
645
- audio_file = self.agent_dir / f"{base_name}.wav"
646
- metadata_file = self.agent_dir / f"{base_name}.json"
647
-
648
- # Save audio
649
- self._save_audio_wav(audio_data, audio_file, sample_rate, num_channels)
650
-
651
- # Calculate audio duration
652
- audio_duration_sec = (
653
- len(audio_data) / (sample_rate * num_channels * 2)
654
- if isinstance(audio_data, bytes)
655
- else len(audio_data) / sample_rate
656
- )
657
-
658
- # Determine start_time based on previous segment in the same turn
659
- # If this is the first segment of the turn, use tts_generation_time
660
- # Otherwise, use the previous segment's end_time for sequential playback
661
- start_time = None
662
- with self._lock:
663
- agent_entries = self.session_metadata["agent_entries"]
664
- # agent_entries is a list of turns, each turn is a list of segments
665
- if agent_entries and agent_entries[-1]: # If there's a current turn with segments
666
- last_segment = agent_entries[-1][-1] # Last segment of last turn
667
- if last_segment["turn_index"] == self._turn_index:
668
- # Same turn - start after previous segment ends
669
- start_time = last_segment["end_time"]
670
-
671
- if start_time is None:
672
- # First segment of the turn - use agent_turn_start_time (from BotStartedSpeakingFrame)
673
- # This is more accurate than tts_generation_time as it reflects actual playback start
674
- if self._agent_turn_start_time is not None:
675
- start_time = self._agent_turn_start_time
676
- elif tts_generation_time is not None:
677
- # Fallback to tts_generation_time if agent_turn_start_time not set
678
- start_time = tts_generation_time
679
- else:
680
- start_time = self.get_time_from_start_of_session(timestamp=timestamp_now)
681
-
682
- end_time = start_time + audio_duration_sec
683
-
684
- # Prepare metadata
685
- # cutoff_time is None by default (no interruption)
686
- # It will be set by set_agent_cutoff_time() if TTS is interrupted
687
- metadata = {
688
- "base_name": base_name,
689
- "counter": counter,
690
- "turn_index": self._turn_index,
691
- "speaker": "agent",
692
- "timestamp": timestamp_now.isoformat(),
693
- "start_time": round(start_time, self._round_precision),
694
- "end_time": round(end_time, self._round_precision),
695
- "cutoff_time": None, # None means not interrupted; float if interrupted
696
- "text": text,
697
- "audio_file": audio_file.name,
698
- "sample_rate": sample_rate,
699
- "num_channels": num_channels,
700
- "audio_duration_sec": round(audio_duration_sec, self._round_precision),
701
- }
702
-
703
- if additional_metadata:
704
- metadata.update(additional_metadata)
705
-
706
- # Save metadata
707
- self._save_metadata_json(metadata, metadata_file)
708
-
709
- # Append to stereo conversation (left channel = agent)
710
- self._append_to_stereo_conversation(
711
- audio_data=audio_data,
712
- channel="left",
713
- start_time=start_time,
714
- sample_rate=sample_rate,
715
- )
716
-
717
- # Update session metadata
718
- # agent_entries is a list of turns, each turn is a list of segments
719
- with self._lock:
720
- agent_entries = self.session_metadata["agent_entries"]
721
- # Check if we need to start a new turn or append to existing turn
722
- if not agent_entries or agent_entries[-1][-1]["turn_index"] != self._turn_index:
723
- # Start a new turn (new sublist)
724
- agent_entries.append([metadata])
725
- else:
726
- # Append to current turn
727
- agent_entries[-1].append(metadata)
728
- self._save_session_metadata()
729
-
730
- logger.info(f"[AudioLogger] Logged agent audio #{counter}: '{text[:50]}{'...' if len(text) > 50 else ''}'")
731
-
732
- return {
733
- "audio_file": str(audio_file),
734
- "metadata_file": str(metadata_file),
735
- "counter": counter,
736
- }
737
-
738
- except Exception as e:
739
- logger.error(f"[AudioLogger] Error logging agent audio: {e}")
740
- return None
741
-
742
- def set_agent_cutoff_time(self, cutoff_time: Optional[float] = None):
743
- """
744
- Set the cutoff time for the most recent agent audio entry.
745
-
746
- This method should be called when TTS is interrupted by user speech.
747
- The cutoff_time represents when the agent audio was actually cut off,
748
- which may be earlier than the natural end_time.
749
-
750
- Args:
751
- cutoff_time: The cutoff time in seconds from session start.
752
- If None, uses current time from session start.
753
- """
754
- if not self.enabled:
755
- return
756
-
757
- if cutoff_time is None:
758
- cutoff_time = self.get_time_from_start_of_session()
759
-
760
- with self._lock:
761
- agent_entries = self.session_metadata["agent_entries"]
762
- if not agent_entries or not agent_entries[-1]:
763
- logger.warning("[AudioLogger] No agent entries to set cutoff time")
764
- return
765
-
766
- # Get the current turn (last sublist) and update ALL segments in it
767
- current_turn = agent_entries[-1]
768
- turn_index = current_turn[0]["turn_index"]
769
-
770
- # Update cutoff_time for ALL segments in the current turn
771
- for segment in current_turn:
772
- segment["cutoff_time"] = cutoff_time
773
- # Also update individual JSON files
774
- try:
775
- metadata_file = self.agent_dir / f"{segment['base_name']}.json"
776
- self._save_metadata_json(segment, metadata_file)
777
- except Exception as e:
778
- logger.error(f"[AudioLogger] Error updating agent cutoff time for segment: {e}")
779
-
780
- # Truncate the stereo buffer (left channel = agent) at the cutoff point
781
- cutoff_sample = int(cutoff_time * self._stereo_sample_rate)
782
- if cutoff_sample < len(self._stereo_audio_buffer_left):
783
- # Zero out agent audio after cutoff point
784
- for i in range(cutoff_sample, len(self._stereo_audio_buffer_left)):
785
- self._stereo_audio_buffer_left[i] = 0.0
786
- logger.debug(
787
- f"[AudioLogger] Truncated agent stereo buffer at sample {cutoff_sample} "
788
- f"(cutoff_time={cutoff_time:.3f}s)"
789
- )
790
-
791
- logger.info(
792
- f"[AudioLogger] Set cutoff_time={cutoff_time:.3f}s for turn {turn_index} "
793
- f"({len(current_turn)} segments)"
794
- )
795
-
796
- # Save updated session metadata
797
- self._save_session_metadata()
798
-
799
- def _save_session_metadata(self):
800
- """Save the session metadata to disk."""
801
- if not self.enabled:
802
- return
803
-
804
- try:
805
- metadata_file = self.session_dir / "session_metadata.json"
806
- self.session_metadata["last_updated"] = datetime.now().isoformat()
807
- self._save_metadata_json(self.session_metadata, metadata_file)
808
- except Exception as e:
809
- logger.error(f"[AudioLogger] Error saving session metadata: {e}")
810
-
811
- def finalize_session(self):
812
- """Finalize the session and save final metadata."""
813
- if not self.enabled:
814
- return
815
-
816
- # Save stereo conversation before finalizing
817
- self.save_stereo_conversation()
818
-
819
- self.session_metadata["end_time"] = datetime.now().isoformat()
820
- self.session_metadata["total_user_entries"] = self._user_counter
821
- self.session_metadata["total_agent_segments"] = self._agent_counter
822
- self.session_metadata["total_agent_turns"] = len(self.session_metadata["agent_entries"])
823
- self._save_session_metadata()
824
- logger.info(
825
- f"[AudioLogger] Session finalized: {self.session_id} "
826
- f"(User: {self._user_counter}, Agent: {self._agent_counter} segments in "
827
- f"{len(self.session_metadata['agent_entries'])} turns)"
828
- )
829
-
830
-
831
- class RTVIAudioLoggerObserver(BaseObserver):
832
- """Observer that triggers audio logging when TranscriptionFrame is pushed."""
833
-
834
- def __init__(self, audio_logger: AudioLogger):
835
- super().__init__()
836
- self._audio_logger = audio_logger
837
-
838
- async def on_push_frame(self, data: FramePushed):
839
- """Handle frame push events and save user audio on TranscriptionFrame."""
840
- frame = data.frame
841
- if isinstance(frame, TranscriptionFrame) and self._audio_logger:
842
- self._audio_logger.save_user_audio()
843
- # Call parent class's on_push_frame method
844
- await super().on_push_frame(data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/pipecat/services/nemo/diar.py DELETED
@@ -1,360 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
-
16
- import asyncio
17
- from typing import AsyncGenerator, Optional
18
-
19
- import numpy as np
20
- from loguru import logger
21
- from pipecat.frames.frames import (
22
- CancelFrame,
23
- EndFrame,
24
- ErrorFrame,
25
- Frame,
26
- StartFrame,
27
- VADUserStartedSpeakingFrame,
28
- VADUserStoppedSpeakingFrame,
29
- )
30
- from pipecat.processors.frame_processor import FrameDirection
31
- from pipecat.services.stt_service import STTService
32
- from pipecat.transcriptions.language import Language
33
- from pipecat.utils.time import time_now_iso8601
34
- from pipecat.utils.tracing.service_decorators import traced_stt
35
- from pydantic import BaseModel
36
-
37
- from nemo.agents.voice_agent.pipecat.frames.frames import DiarResultFrame
38
- from nemo.agents.voice_agent.pipecat.services.nemo.streaming_diar import DiarizationConfig, NeMoStreamingDiarService
39
-
40
-
41
- class NeMoDiarInputParams(BaseModel):
42
- threshold: Optional[float] = (
43
- 0.4 # threshold value used to determine if a speaker exists or not, setting it to a lower value will increase the sensitivity of the diarization model
44
- )
45
- language: Optional[Language] = Language.EN_US
46
- frame_len_in_secs: Optional[float] = 0.08 # 80ms for FastConformer model
47
- config_path: Optional[str] = None # path to the Niva ASR config file
48
- raw_audio_frame_len_in_secs: Optional[float] = 0.016 # 16ms for websocket transport
49
- buffer_size: Optional[int] = (
50
- 30 # number of audio frames to buffer, 1 frame is 16ms, streaming Sortformer was trained with 6*0.08=0.48s chunks
51
- )
52
-
53
-
54
- class NemoDiarService(STTService):
55
- def __init__(
56
- self,
57
- *,
58
- model: Optional[str] = "",
59
- device: Optional[str] = "cuda:0",
60
- sample_rate: Optional[int] = 16000,
61
- params: Optional[NeMoDiarInputParams] = None,
62
- use_vad: bool = True,
63
- audio_passthrough: bool = True,
64
- backend: Optional[str] = "legacy",
65
- enabled: bool = True,
66
- **kwargs,
67
- ):
68
- super().__init__(audio_passthrough=audio_passthrough, **kwargs)
69
-
70
- self._enabled = enabled
71
- self._queue = asyncio.Queue()
72
- self._response_queue = asyncio.Queue() # Add response queue
73
- self._processing_task = None # Add processing task
74
- self._response_task = None # Add response task
75
- self._device = device
76
- self._sample_rate = sample_rate
77
- self._audio_passthrough = audio_passthrough
78
- params.buffer_size = params.frame_len_in_secs // params.raw_audio_frame_len_in_secs
79
- self._params = params
80
- self._model_name = model
81
- self._use_vad = use_vad
82
- self._backend = backend
83
- if not params:
84
- raise ValueError("params is required")
85
-
86
- self._load_model()
87
-
88
- self._vad_user_speaking = False
89
- self._audio_buffer = []
90
- self._current_speaker_id = None
91
- self._processing_running = False
92
-
93
- if not self._use_vad:
94
- self._vad_user_speaking = True
95
-
96
- def _load_model(self):
97
- if not self._enabled or not self._model_name:
98
- self._model = None
99
- self._enabled = False
100
- return
101
-
102
- if self._backend == "legacy":
103
- cfg = DiarizationConfig()
104
- cfg.device = self._device
105
- self._model = NeMoStreamingDiarService(
106
- cfg, self._model_name, frame_len_in_secs=self._params.frame_len_in_secs, sample_rate=self.sample_rate
107
- )
108
- else:
109
- raise ValueError(f"Invalid backend: {self._backend}")
110
- logger.info(f"Diarization service initialized on device: {self._device}")
111
-
112
- def can_generate_metrics(self) -> bool:
113
- """Indicates whether this service can generate metrics.
114
-
115
- Returns:
116
- bool: True, as this service supports metric generation.
117
- """
118
- return True
119
-
120
- async def start(self, frame: StartFrame):
121
- """Handle service start."""
122
- await super().start(frame)
123
-
124
- # Initialize the model if not already done
125
- if not hasattr(self, "_model"):
126
- self._load_model()
127
-
128
- # Start background processing task
129
- if not self._processing_task:
130
- self._processing_task = self.create_task(self._processing_task_handler())
131
-
132
- # Start response handling task
133
- if not self._response_task:
134
- self._response_task = self.create_task(self._response_task_handler())
135
-
136
- async def stop(self, frame: EndFrame):
137
- """Handle service stop."""
138
- await super().stop(frame)
139
- await self._stop_tasks()
140
-
141
- async def cancel(self, frame: CancelFrame):
142
- """Handle service cancellation."""
143
- await super().cancel(frame)
144
- await self._stop_tasks()
145
-
146
- async def _stop_tasks(self):
147
- """Stop background processing tasks."""
148
- await self._queue.put(None) # Signal to stop processing
149
- if self._processing_task:
150
- await self.cancel_task(self._processing_task)
151
- self._processing_task = None
152
-
153
- if self._response_task:
154
- await self.cancel_task(self._response_task)
155
- self._response_task = None
156
-
157
- def _diarization_processor(self):
158
- """Background processor that handles diarization calls."""
159
- try:
160
- while self._processing_running:
161
- try:
162
- # Get audio from queue - blocking call that will be interrupted by cancellation
163
- future = asyncio.run_coroutine_threadsafe(self._queue.get(), self.get_event_loop())
164
- audio = future.result()
165
-
166
- if audio is None: # Stop signal
167
- logger.debug("Received stop signal in background processor")
168
- break
169
-
170
- # Process diarization
171
- diar_result = self._model.diarize(audio)
172
-
173
- # Send result back to async loop
174
- asyncio.run_coroutine_threadsafe(self._response_queue.put(diar_result), self.get_event_loop())
175
-
176
- except Exception as e:
177
- logger.error(f"Error in background diarization processor: {e}")
178
- # Send error back to async loop
179
- asyncio.run_coroutine_threadsafe(self._response_queue.put(('error', e)), self.get_event_loop())
180
-
181
- except Exception as e:
182
- logger.error(f"Background diarization processor fatal error: {e}")
183
- finally:
184
- logger.debug("Background diarization processor stopped")
185
-
186
- async def _processing_task_handler(self):
187
- """Handler for background processing task."""
188
- try:
189
- self._processing_running = True
190
- logger.debug("Starting background processing task")
191
- await asyncio.to_thread(self._diarization_processor)
192
- except asyncio.CancelledError:
193
- logger.debug("Background processing task cancelled")
194
- self._processing_running = False
195
- raise
196
- finally:
197
- self._processing_running = False
198
-
199
- async def _handle_diarization_result(self, diar_result):
200
- """Handle diarization result from background processing."""
201
- try:
202
- if diar_result is None:
203
- return
204
- dominant_speaker_id = self._get_dominant_speaker_id(diar_result)
205
- # logger.debug(f"Dominant speaker ID: {dominant_speaker_id}")
206
- if dominant_speaker_id is not None and dominant_speaker_id != self._current_speaker_id:
207
- self._current_speaker_id = dominant_speaker_id
208
- logger.debug(f"Pushing DiarResultFrame with speaker {dominant_speaker_id}")
209
- await self.push_frame(DiarResultFrame(dominant_speaker_id, stream_id="default"))
210
- except Exception as e:
211
- logger.error(f"Error handling diarization result: {e}")
212
- await self.push_frame(
213
- ErrorFrame(
214
- str(e),
215
- time_now_iso8601(),
216
- )
217
- )
218
-
219
- async def _response_task_handler(self):
220
- """Handler for processing diarization results."""
221
- logger.debug("Response task handler started")
222
- try:
223
- while True:
224
- try:
225
- result = await self._response_queue.get()
226
-
227
- if isinstance(result, tuple) and result[0] == 'error':
228
- # Handle error from background processing
229
- error = result[1]
230
- logger.error(f"Error in NeMo Diarization processing: {error}")
231
- await self.push_frame(
232
- ErrorFrame(
233
- str(error),
234
- time_now_iso8601(),
235
- )
236
- )
237
- else:
238
- # Handle successful diarization result
239
- await self._handle_diarization_result(result)
240
-
241
- except Exception as e:
242
- logger.error(f"Error in response task handler: {e}")
243
- except asyncio.CancelledError:
244
- logger.debug("Response task handler cancelled")
245
- raise
246
-
247
- async def run_stt(self, audio: bytes) -> AsyncGenerator[Frame, None]:
248
- """Process audio data and generate transcription frames.
249
-
250
- Args:
251
- audio: Raw audio bytes to transcribe
252
-
253
- Yields:
254
- Frame: Transcription frames containing the results
255
- """
256
- if self._vad_user_speaking and self._enabled:
257
- self._audio_buffer.append(audio)
258
- if len(self._audio_buffer) >= self._params.buffer_size:
259
- await self.start_ttfb_metrics()
260
- await self.start_processing_metrics()
261
- audio = b"".join(self._audio_buffer)
262
- self._audio_buffer = []
263
- # Queue audio for background processing
264
- await self._queue.put(audio)
265
- yield None
266
-
267
- @traced_stt
268
- async def _handle_transcription(self, transcript: str, is_final: bool, language: Optional[str] = None):
269
- """Handle a transcription result.
270
-
271
- Args:
272
- transcript: The transcribed text
273
- is_final: Whether this is a final transcription
274
- language: The language of the transcription
275
- """
276
- pass # Base implementation - can be extended for specific handling needs
277
-
278
- async def set_language(self, language: Language):
279
- """Update the service's recognition language.
280
-
281
- Args:
282
- language: New language for recognition
283
- """
284
- if self._params:
285
- self._params.language = language
286
- else:
287
- self._params = NeMoDiarInputParams(language=language)
288
-
289
- logger.info(f"Switching STT language to: {language}")
290
-
291
- async def set_model(self, model: str):
292
- """Update the service's model.
293
-
294
- Args:
295
- model: New model name/path to use
296
- """
297
- await super().set_model(model)
298
- self._model_name = model
299
- self._load_model()
300
-
301
- async def process_frame(self, frame: Frame, direction: FrameDirection):
302
- """Process audio data and generate transcription frames.
303
-
304
- Args:
305
- audio: Raw audio bytes to transcribe
306
-
307
- Yields:
308
- Frame: Transcription frames containing the results
309
- """
310
- if not self._enabled:
311
- # if diarization is disabled, just pass the frame through
312
- await self.push_frame(frame, direction)
313
- return
314
-
315
- await super().process_frame(frame, direction)
316
- if isinstance(frame, VADUserStartedSpeakingFrame):
317
- self._vad_user_speaking = True
318
- self._audio_buffer = []
319
- logger.debug("VADUserStartedSpeakingFrame received")
320
- elif isinstance(frame, VADUserStoppedSpeakingFrame):
321
- self._vad_user_speaking = False
322
- logger.debug("VADUserStoppedSpeakingFrame received")
323
- self._current_speaker_id = None
324
- self._audio_buffer = []
325
-
326
- def reset(self):
327
- """Reset the diarization service."""
328
- self._current_speaker_id = None
329
- self._audio_buffer = []
330
- self._vad_user_speaking = False
331
- self._model.reset_state()
332
-
333
- def _get_dominant_speaker_id(self, spk_pred: np.ndarray):
334
- spk_pred = (spk_pred > self._params.threshold).astype(int)
335
- dominant_speaker_id = None
336
- if spk_pred.sum() > 0:
337
- # get the dominant speaker id
338
- # Filter to only keep frames that have any speaker probability > 0.0
339
- valid_frame_mask = spk_pred.sum(axis=1) > 0
340
-
341
- # Filter diar_result to only keep valid frames
342
- filtered_diar_result = spk_pred[valid_frame_mask] # ndarray of shape [num_valid_frames, num_speakers]
343
-
344
- # Get the primary speaker for each valid frame
345
- primary_spk = np.argmax(filtered_diar_result, axis=1) # ndarray of shape [num_valid_frames]
346
- # logger.debug(f"Primary speaker for valid frames: {primary_spk}")
347
-
348
- # count the number of different speakers in the primary speaker sequence
349
- num_speakers = len(np.unique(primary_spk))
350
- # logger.debug(f"Number of different speakers: {num_speakers}")
351
-
352
- # If there are multiple speakers, get the dominant one
353
- if num_speakers > 1:
354
- # Count occurrences of each speaker
355
- speaker_counts = np.bincount(primary_spk)
356
- dominant_speaker_id = np.argmax(speaker_counts)
357
- else:
358
- # Only one speaker, return that speaker ID
359
- dominant_speaker_id = primary_spk[0]
360
- return dominant_speaker_id
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/pipecat/services/nemo/llm.py DELETED
@@ -1,760 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import asyncio
16
- import os
17
- import socket
18
- import subprocess
19
- import time
20
- import uuid
21
- from threading import Thread
22
- from typing import AsyncGenerator, List, Mapping, Optional
23
-
24
- import psutil
25
- import requests
26
- from jinja2.exceptions import TemplateError
27
- from loguru import logger
28
- from omegaconf import DictConfig, OmegaConf
29
- from openai import APITimeoutError, AsyncStream, BadRequestError
30
- from openai.types.chat import ChatCompletionChunk, ChatCompletionMessageParam
31
- from pipecat.adapters.services.open_ai_adapter import OpenAILLMInvocationParams
32
- from pipecat.frames.frames import (
33
- CancelFrame,
34
- EndFrame,
35
- LLMFullResponseEndFrame,
36
- LLMFullResponseStartFrame,
37
- LLMTextFrame,
38
- )
39
- from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext
40
- from pipecat.services.openai.llm import OpenAILLMService
41
- from transformers import AsyncTextIteratorStreamer, AutoModelForCausalLM, AutoTokenizer
42
- from vllm.config import ModelConfig as vllmModelConfig
43
-
44
- DEFAULT_GENERATION_KWARGS = {
45
- "max_new_tokens": 256,
46
- "temperature": 0.7,
47
- "top_p": 0.9,
48
- "do_sample": True,
49
- }
50
-
51
-
52
- class LLMUtilsMixin:
53
- """Utils for local LLM services."""
54
-
55
- def _maybe_add_user_message(self, messages: List[ChatCompletionMessageParam]) -> List[ChatCompletionMessageParam]:
56
- """
57
- Some LLMs like "nvidia/Llama-3.1-Nemotron-Nano-8B-v1" requires a user turn after the system prompt,
58
- this function is used to add a dummy user turn if the system prompt is followed by an assistant turn.
59
- """
60
- if len(messages) > 1 and messages[0]["role"] == "system" and messages[1]["role"] == "assistant":
61
- message = {"role": "user", "content": "Hi"}
62
- messages.insert(1, message)
63
- elif len(messages) == 1 and messages[0]["role"] == "system":
64
- messages.append({"role": "user", "content": "Hi"})
65
- return messages
66
-
67
- def _maybe_merge_consecutive_user_turns(
68
- self, messages: List[ChatCompletionMessageParam]
69
- ) -> List[ChatCompletionMessageParam]:
70
- """
71
- Merge consecutive user turns into a single turn,
72
- since some LLMs like "nvidia/Llama-3.1-Nemotron-Nano-8B-v1" do not support consecutive user turns.
73
- """
74
- if not messages:
75
- return messages
76
-
77
- merged_messages = []
78
-
79
- user_content = ""
80
- for message in messages:
81
- role = message["role"]
82
- if role != "user":
83
- # check if there's any preceeding user content, add them first
84
- if user_content:
85
- merged_messages.append({"role": "user", "content": user_content})
86
- user_content = ""
87
- merged_messages.append(message)
88
- else:
89
- if user_content:
90
- user_content += "; " + message["content"]
91
- else:
92
- user_content = message["content"]
93
-
94
- # add the last user content
95
- if user_content:
96
- merged_messages.append({"role": "user", "content": user_content})
97
-
98
- return merged_messages
99
-
100
-
101
- class HuggingFaceLLMLocalService(LLMUtilsMixin):
102
- """
103
- HuggingFace LLM local service.
104
- """
105
-
106
- def __init__(
107
- self,
108
- model: str = "meta-llama/Meta-Llama-3-8B-Instruct",
109
- device: str = "cuda:0",
110
- dtype: str = "bfloat16",
111
- thinking_budget: int = 0,
112
- generation_kwargs: dict = None,
113
- apply_chat_template_kwargs: dict = None,
114
- ):
115
- self.device = device
116
- self.dtype = dtype
117
- self.thinking_budget = thinking_budget
118
- self.tokenizer = AutoTokenizer.from_pretrained(model)
119
- self.model = AutoModelForCausalLM.from_pretrained(
120
- model, device_map=device, dtype=dtype, trust_remote_code=True
121
- ) # type: AutoModelForCausalLM
122
-
123
- self.generation_kwargs = generation_kwargs if generation_kwargs else DEFAULT_GENERATION_KWARGS
124
- logger.debug(f"LLM generation kwargs: {self.generation_kwargs}")
125
-
126
- self.apply_chat_template_kwargs = apply_chat_template_kwargs if apply_chat_template_kwargs else {}
127
- if "tokenize" in self.apply_chat_template_kwargs:
128
- if self.apply_chat_template_kwargs["tokenize"] is not False:
129
- logger.warning(
130
- f"Found `tokenize=True` in apply_chat_template_kwargs={self.apply_chat_template_kwargs},"
131
- "it will be ignored and forced to `False`"
132
- )
133
- self.apply_chat_template_kwargs.pop("tokenize")
134
-
135
- logger.debug(f"LLM apply_chat_template kwargs: {self.apply_chat_template_kwargs}")
136
-
137
- def _apply_chat_template(self, messages: List[ChatCompletionMessageParam]) -> str:
138
- """
139
- Apply the chat template to the messages.
140
- """
141
- return self.tokenizer.apply_chat_template(messages, tokenize=False, **self.apply_chat_template_kwargs)
142
-
143
- def _get_prompt_from_messages(self, messages: List[ChatCompletionMessageParam]) -> str:
144
- """
145
- Get the formatted prompt from the conversation history messages.
146
- This function also tries to fix the messages if the LLM cannot handle consecutive turns of the same role,
147
- or requires a user turn after the system prompt.
148
- """
149
- try:
150
- prompt = self._apply_chat_template(messages)
151
- return prompt
152
- except TemplateError as e:
153
- logger.warning(f"Got TemplateError: {e}.")
154
-
155
- logger.debug(f"Input LLM messages: {messages}")
156
- if len(messages) > 1 and messages[0]["role"] == "system" and messages[1]["role"] == "assistant":
157
- logger.warning("Trying to fix by adding dummy user message after system prompt...")
158
- try:
159
- messages = self._maybe_add_user_message(messages)
160
- logger.debug(f"LLM messages after adding dummy user message: {messages}")
161
- prompt = self._apply_chat_template(messages)
162
- return prompt
163
- except TemplateError as e:
164
- logger.warning(f"Got TemplateError: {e}. Trying to fix by merging consecutive turns if possible.")
165
-
166
- try:
167
- new_messages = self._maybe_merge_consecutive_user_turns(messages)
168
- logger.debug(f"LLM messages after merging consecutive user turns: {new_messages}")
169
- prompt = self._apply_chat_template(new_messages)
170
- # Update the messages in place if successful
171
- messages.clear()
172
- messages.extend(new_messages)
173
- return prompt
174
- except Exception as e:
175
- logger.warning(f"Got Exception: {e}, messages: {messages}")
176
- raise e
177
-
178
- async def generate_stream(
179
- self, messages: List[ChatCompletionMessageParam], **kwargs
180
- ) -> AsyncGenerator[ChatCompletionChunk, None]:
181
- """
182
- Generate a stream of chat completion chunks from the messages.
183
- """
184
- # Convert messages to prompt format
185
- prompt = self._get_prompt_from_messages(messages)
186
-
187
- logger.debug(f"LLM prompt: {prompt}")
188
-
189
- inputs = self.tokenizer(prompt, add_special_tokens=False, return_tensors="pt").to(self.device)
190
-
191
- # Generate with streaming
192
- streamer = AsyncTextIteratorStreamer(self.tokenizer, skip_prompt=True, skip_special_tokens=True)
193
- generation_kwargs = {
194
- **inputs,
195
- "streamer": streamer,
196
- **self.generation_kwargs,
197
- }
198
-
199
- # Start generation in background
200
- thread = Thread(
201
- target=self.model.generate,
202
- kwargs=generation_kwargs,
203
- )
204
- thread.start()
205
-
206
- # Stream the output
207
- async for text in streamer:
208
- # logger.debug(f"Streamer yielded text: {text}")
209
- chunk = ChatCompletionChunk(
210
- id="hf-" + str(uuid.uuid4()),
211
- choices=[{"delta": {"content": text}, "finish_reason": None, "index": 0}],
212
- created=int(time.time()),
213
- model=self.model.config._name_or_path,
214
- object="chat.completion.chunk",
215
- )
216
- yield chunk
217
-
218
-
219
- class HuggingFaceLLMService(OpenAILLMService):
220
- """
221
- LLM service that hosts a HuggingFace model.
222
- """
223
-
224
- def __init__(
225
- self,
226
- *,
227
- model: str = "google/gemma-7b-it",
228
- device: str = "cuda",
229
- dtype: str = "bfloat16",
230
- thinking_budget: int = 0,
231
- generation_kwargs: dict = None,
232
- apply_chat_template_kwargs: dict = None,
233
- **kwargs,
234
- ):
235
- self._model_name = model
236
- self._device = device
237
- self._dtype = dtype
238
- self._thinking_budget = thinking_budget
239
- self._generation_kwargs = generation_kwargs if generation_kwargs is not None else DEFAULT_GENERATION_KWARGS
240
- self._apply_chat_template_kwargs = apply_chat_template_kwargs if apply_chat_template_kwargs is not None else {}
241
- super().__init__(model=model, **kwargs)
242
-
243
- def create_client(self, api_key=None, base_url=None, **kwargs):
244
- """
245
- Create a HuggingFaceLLMLocalService client.
246
- """
247
- return HuggingFaceLLMLocalService(
248
- model=self._model_name,
249
- device=self._device,
250
- dtype=self._dtype,
251
- thinking_budget=self._thinking_budget,
252
- generation_kwargs=self._generation_kwargs,
253
- apply_chat_template_kwargs=self._apply_chat_template_kwargs,
254
- )
255
-
256
- async def _process_context(self, context: OpenAILLMContext):
257
- """Process a context through the LLM and push text frames.
258
-
259
- Args:
260
- context (OpenAILLMContext): The context to process, containing messages
261
- and other information needed for the LLM interaction.
262
- """
263
- await self.push_frame(LLMFullResponseStartFrame())
264
- cumulative_text = ""
265
- try:
266
- await self.start_ttfb_metrics()
267
- messages = context.get_messages()
268
- async for chunk in self._client.generate_stream(messages):
269
- if chunk.choices[0].delta.content:
270
- await self.stop_ttfb_metrics()
271
- text = chunk.choices[0].delta.content
272
- cumulative_text += text
273
- frame = LLMTextFrame(text)
274
- await self.push_frame(frame)
275
- except Exception as e:
276
- logger.error(f"Error in _process_context: {e}", exc_info=True)
277
- raise
278
- finally:
279
- cumulative_text = " ".join(cumulative_text.split()).strip()
280
- if not cumulative_text:
281
- logger.warning(f"LLM response is empty for context: {context}")
282
- await self.push_frame(LLMFullResponseEndFrame())
283
-
284
- async def get_chat_completions(
285
- self, params_from_context: OpenAILLMInvocationParams
286
- ) -> AsyncGenerator[ChatCompletionChunk, None]:
287
- """Create a streaming chat completion using HuggingFace model.
288
-
289
- Args:
290
- context (OpenAILLMContext): The context object containing tools configuration
291
- and other settings for the chat completion.
292
- messages (List[ChatCompletionMessageParam]): The list of messages comprising
293
- the conversation history and current request.
294
-
295
- Returns:
296
- AsyncGenerator[ChatCompletionChunk]: A streaming response of chat completion
297
- chunks that can be processed asynchronously.
298
- """
299
- messages = params_from_context["messages"]
300
-
301
- return self._client.generate_stream(messages)
302
-
303
-
304
- class VLLMService(OpenAILLMService, LLMUtilsMixin):
305
- """
306
- LLM service that hosts a vLLM server.
307
- """
308
-
309
- def __init__(
310
- self,
311
- *,
312
- model: str,
313
- device: str = "cuda",
314
- api_key="None",
315
- base_url="http://localhost:8000/v1",
316
- organization="None",
317
- project="None",
318
- default_headers: Optional[Mapping[str, str]] = None,
319
- params: Optional[OpenAILLMService.InputParams] = None,
320
- thinking_budget: int = 0,
321
- start_vllm_on_init: bool = False,
322
- vllm_server_params: Optional[str] = None,
323
- vllm_server_max_wait_time: int = 3600, # 1 hour max wait time
324
- vllm_server_check_interval: int = 5, # check server every 5 seconds
325
- **kwargs,
326
- ):
327
- self._device = device
328
- self._vllm_server_max_wait_time = vllm_server_max_wait_time
329
- self._vllm_server_check_interval = vllm_server_check_interval
330
- if start_vllm_on_init:
331
- base_url = self._start_vllm_server(model, vllm_server_params, base_url)
332
-
333
- super().__init__(
334
- model=model,
335
- api_key=api_key,
336
- base_url=base_url,
337
- organization=organization,
338
- project=project,
339
- default_headers=default_headers,
340
- params=params,
341
- **kwargs,
342
- )
343
- self._thinking_budget = thinking_budget
344
- self._vllm_server_params = vllm_server_params
345
- self._start_vllm_on_init = start_vllm_on_init
346
-
347
- # TODO: handle thinking budget
348
- logger.info(
349
- f"VLLMService initialized with model: {model}, api_key: {api_key}, base_url: {base_url},"
350
- f"params: {params}, thinking_budget: {thinking_budget}"
351
- )
352
-
353
- def _start_vllm_server(
354
- self, model: str, vllm_server_params: Optional[str] = None, base_url: Optional[str] = None
355
- ) -> str:
356
- """
357
- Start a vllm server and return the base url.
358
- """
359
-
360
- requested_port = None
361
- # If base_url is provided, extract port from it
362
- if base_url:
363
- try:
364
- # Extract port from base_url like "http://localhost:8003/v1"
365
- from urllib.parse import urlparse
366
-
367
- parsed_url = urlparse(base_url)
368
- if parsed_url.port:
369
- requested_port = parsed_url.port
370
- except Exception as e:
371
- logger.warning(
372
- f"Could not parse port from base_url {base_url}: {e}, using port from vllm_server_params"
373
- )
374
-
375
- # Parse port from vllm_server_params, default to 8000
376
- if vllm_server_params:
377
- params_list = vllm_server_params.split()
378
- for i, param in enumerate(params_list):
379
- if param == "--port" and i + 1 < len(params_list):
380
- try:
381
- param_port = int(params_list[i + 1])
382
- if requested_port is None:
383
- requested_port = param_port
384
- else:
385
- if param_port != requested_port:
386
- logger.warning(
387
- f"Port {param_port} from vllm_server_params is different from base_url port"
388
- f"{requested_port}, using new port {param_port}"
389
- )
390
- requested_port = param_port
391
- break
392
- except ValueError:
393
- logger.warning(f"Invalid port number: {params_list[i + 1]}, using default 8000")
394
-
395
- if requested_port is None:
396
- # try to use default port
397
- requested_port = 8000
398
-
399
- def find_available_port(start_port: int) -> int:
400
- """Find an available port starting from start_port"""
401
- for port in range(start_port, start_port + 100): # Try up to 100 ports
402
- try:
403
- with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
404
- s.bind(('localhost', port))
405
- return port
406
- except OSError:
407
- continue
408
- raise RuntimeError(f"Could not find an available port starting from {start_port}")
409
-
410
- def get_pid_on_port(port: int) -> Optional[int]:
411
- for conn in psutil.net_connections(kind="inet"):
412
- if conn.laddr.port == port and conn.status == psutil.CONN_LISTEN:
413
- return conn.pid
414
- return None
415
-
416
- def check_server_model(port: int, verbose: bool = False) -> tuple[bool, str]:
417
- """Check if server is running on port and return (is_running, model_name)"""
418
- try:
419
- response = requests.get(f"http://localhost:{port}/v1/models", timeout=5)
420
- if response.status_code == 200:
421
- # get the PID for the server process
422
- pid = get_pid_on_port(port)
423
- if pid is not None and verbose:
424
- logger.warning(
425
- f"Found vLLM server process (PID: {pid}) on port {port}, you can use `lsof -i :{port}`"
426
- "to find the process and kill it if you want to start a new server."
427
- )
428
- models_data = response.json()
429
- if "data" in models_data and models_data["data"]:
430
- served_model = models_data["data"][0].get("id", "")
431
- return True, served_model
432
- return True, ""
433
- return False, ""
434
- except (requests.exceptions.RequestException, requests.exceptions.Timeout):
435
- return False, ""
436
-
437
- # First, check if vLLM server is already running on the requested port
438
- is_running, served_model = check_server_model(requested_port, verbose=True)
439
- if is_running:
440
- if served_model == model:
441
- final_base_url = f"http://localhost:{requested_port}/v1"
442
- logger.info(f"vLLM server is already running at {final_base_url} with the correct model: {model}")
443
- return final_base_url
444
- else:
445
- logger.warning(
446
- f"vLLM server on port {requested_port} is serving model '{served_model}' but we need '{model}'."
447
- "Finding new port..."
448
- )
449
-
450
- # Find an available port for our model
451
- port = find_available_port(requested_port)
452
- if port != requested_port:
453
- logger.info(f"Using port {port} instead of requested port {requested_port}")
454
-
455
- final_base_url = f"http://localhost:{port}/v1"
456
-
457
- # Check if there's already a vLLM process running on the same port and model
458
- for proc in psutil.process_iter(['pid', 'name', 'cmdline']):
459
- try:
460
- if proc.info['cmdline'] and any('vllm' in arg and 'serve' in arg for arg in proc.info['cmdline']):
461
- # Check if this process is using the same port and model
462
- cmdline_str = ' '.join(proc.info['cmdline'])
463
- if f"--port {port}" in cmdline_str:
464
- # Extract the model from the command line
465
- cmdline_parts = proc.info['cmdline']
466
- model_index = -1
467
- for i, arg in enumerate(cmdline_parts):
468
- if arg == "serve" and i + 1 < len(cmdline_parts):
469
- model_index = i + 1
470
- break
471
-
472
- if model_index != -1 and model_index < len(cmdline_parts):
473
- running_model = cmdline_parts[model_index]
474
- if running_model == model:
475
- logger.info(
476
- f"Found existing vLLM server process (PID: {proc.info['pid']}) on port {port}"
477
- f"serving model {model}"
478
- )
479
- # Wait a bit and check if it's responding
480
- time.sleep(2)
481
- is_running, served_model = check_server_model(port)
482
- if is_running and served_model == model:
483
- logger.info(
484
- f"Existing vLLM server is responding at {final_base_url} with correct model"
485
- )
486
- return final_base_url
487
- else:
488
- logger.warning(
489
- f"Existing vLLM process found on port {port} but not responding correctly,"
490
- "will start new server"
491
- )
492
- else:
493
- logger.info(
494
- f"Found vLLM process on port {port} but serving different model '{running_model}'"
495
- f"(need '{model}'). Will start new server."
496
- )
497
- except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
498
- continue
499
-
500
- # Build the command with the determined port
501
- cmd_parts = ["vllm", "serve", model]
502
-
503
- # Parse and modify vllm_server_params to use the correct port
504
- if vllm_server_params:
505
- # parse the vllm_server_params and add the port to the command
506
- params_list = vllm_server_params.split()
507
- modified_params = []
508
- i = 0
509
- while i < len(params_list):
510
- if params_list[i] == "--port" and i + 1 < len(params_list):
511
- # Replace the port with our determined port
512
- modified_params.extend(["--port", str(port)])
513
- i += 2 # Skip the original port value
514
- else:
515
- modified_params.append(params_list[i])
516
- i += 1
517
- cmd_parts.extend(modified_params)
518
- else:
519
- # Add port if vllm_server_params is not provided
520
- cmd_parts.extend(["--port", str(port)])
521
-
522
- logger.info(f"Starting vLLM server with command: {' '.join(cmd_parts)}")
523
- logger.warning("It will take a while to download the model if it's not already downloaded.")
524
- # Set up environment variables for device configuration
525
- env = os.environ.copy()
526
- if self._device and self._device != "cpu":
527
- # Extract CUDA device number if it's in format "cuda:0", "cuda:1", etc.
528
- if self._device.startswith("cuda:"):
529
- device_id = self._device.split(":")[1]
530
- env["CUDA_VISIBLE_DEVICES"] = device_id
531
- logger.info(f"Setting CUDA_VISIBLE_DEVICES={device_id}")
532
- elif self._device == "cuda":
533
- # Use default CUDA device (don't set CUDA_VISIBLE_DEVICES)
534
- logger.info("Using default CUDA device")
535
- else:
536
- # For other device strings, try to extract device number
537
- logger.warning(f"Unknown device format: {self._device}, using as-is")
538
- env["CUDA_VISIBLE_DEVICES"] = self._device
539
- elif self._device == "cpu":
540
- env["CUDA_VISIBLE_DEVICES"] = ""
541
- logger.info("Setting CUDA_VISIBLE_DEVICES='' to use CPU")
542
-
543
- try:
544
- # Start the vLLM server process with environment variables
545
- process = subprocess.Popen(
546
- cmd_parts,
547
- stdout=subprocess.PIPE,
548
- stderr=subprocess.PIPE,
549
- text=True,
550
- env=env,
551
- preexec_fn=os.setsid if os.name != 'nt' else None, # Create new process group
552
- )
553
-
554
- # Store the process for potential cleanup later
555
- self._vllm_process = process
556
-
557
- # Wait for server to start up
558
- max_wait_time = self._vllm_server_max_wait_time
559
- check_interval = self._vllm_server_check_interval
560
- waited_time = 0
561
-
562
- logger.info(f"Waiting for vLLM server to start on port {port}...")
563
- while waited_time < max_wait_time:
564
- is_running, served_model = check_server_model(port)
565
- if is_running and served_model == model:
566
- logger.info(f"vLLM server started successfully at {final_base_url} serving model: {model}")
567
- return final_base_url
568
- elif is_running and served_model != model:
569
- logger.warning(
570
- f"vLLM server started but serving wrong model '{served_model}' instead of '{model}'."
571
- "Continuing to wait..."
572
- )
573
-
574
- # Check if process is still running
575
- if process.poll() is not None:
576
- # Process has terminated
577
- stdout, stderr = process.communicate()
578
- logger.error(f"vLLM server process terminated unexpectedly. stdout: {stdout}, stderr: {stderr}")
579
- raise RuntimeError(f"Failed to start vLLM server: {stderr}")
580
-
581
- time.sleep(check_interval)
582
- waited_time += check_interval
583
- logger.debug(f"Still waiting for vLLM server on port {port}... ({waited_time}s)")
584
-
585
- # If we get here, server didn't start in time
586
- logger.error(f"vLLM server failed to start within {max_wait_time} seconds on port {port}")
587
- process.terminate()
588
- raise RuntimeError(f"vLLM server failed to start within {max_wait_time} seconds on port {port}")
589
-
590
- except FileNotFoundError:
591
- logger.error("vLLM not found. Please install vLLM: pip install vllm")
592
- raise RuntimeError("vLLM not found. Please install vLLM: pip install vllm")
593
- except Exception as e:
594
- logger.error(f"Failed to start vLLM server: {e}")
595
- self._stop_vllm_server()
596
- raise e
597
-
598
- def _stop_vllm_server(self):
599
- """Stop the vLLM server process if it's running."""
600
- if hasattr(self, '_vllm_process') and self._vllm_process:
601
- logger.info(f"Stopping vLLM server process {self._vllm_process.pid}")
602
- self._vllm_process.terminate()
603
-
604
- async def stop(self, frame: EndFrame):
605
- """Stop the LLM service.
606
-
607
- Args:
608
- frame: The end frame.
609
- """
610
- await super().stop(frame)
611
- self._stop_vllm_server()
612
-
613
- async def cancel(self, frame: CancelFrame):
614
- """Cancel the LLM service.
615
-
616
- Args:
617
- frame: The cancel frame.
618
- """
619
- await super().cancel(frame)
620
- self._stop_vllm_server()
621
-
622
- async def get_chat_completions(
623
- self, params_from_context: OpenAILLMInvocationParams
624
- ) -> AsyncStream[ChatCompletionChunk]:
625
- """Get streaming chat completions from OpenAI API.
626
-
627
- Args:
628
- context: The LLM context containing tools and configuration.
629
- messages: List of chat completion messages to send.
630
-
631
- Returns:
632
- Async stream of chat completion chunks.
633
- """
634
-
635
- params = self.build_chat_completion_params(params_from_context)
636
- messages = params_from_context["messages"]
637
- if self._retry_on_timeout:
638
- try:
639
- chunks = await asyncio.wait_for(
640
- self._get_response_from_client(messages, params), timeout=self._retry_timeout_secs
641
- )
642
- return chunks
643
- except (APITimeoutError, asyncio.TimeoutError):
644
- # Retry, this time without a timeout so we get a response
645
- logger.debug(f"{self}: Retrying chat completion due to timeout")
646
- chunks = await self._get_response_from_client(messages, params)
647
- return chunks
648
- else:
649
- chunks = await self._get_response_from_client(messages, params)
650
- return chunks
651
-
652
- async def _get_response_from_client(
653
- self, messages: List[ChatCompletionMessageParam], params: dict
654
- ) -> AsyncStream[ChatCompletionChunk]:
655
- """Get a response from the client."""
656
-
657
- try:
658
- chunks = await self._client.chat.completions.create(**params)
659
- except BadRequestError as e:
660
- logger.error(f"Error in _get_response_from_client: {e}, trying to fix...")
661
- logger.debug(f"LLM messages before fixing: {messages}")
662
- messages = self._maybe_add_user_message(messages)
663
- messages = self._maybe_merge_consecutive_user_turns(messages)
664
- logger.debug(f"LLM messages after fixing: {messages}")
665
- params["messages"] = messages
666
- chunks = await self._client.chat.completions.create(**params)
667
-
668
- return chunks
669
-
670
-
671
- def get_llm_service_from_config(config: DictConfig) -> OpenAILLMService:
672
- """Get an LLM service from the configuration."""
673
- backend = config.type
674
-
675
- logger.info(f"Initializing LLM service from config: {config}")
676
-
677
- # If backend is "auto", try to detect the best backend
678
- if backend == "auto":
679
- model_name = config.get("model")
680
- if not model_name:
681
- raise ValueError("Model name is required for LLM")
682
-
683
- try:
684
- _ = vllmModelConfig(model_name, trust_remote_code=True)
685
- backend = "vllm"
686
- logger.info(f"Auto-detected vLLM as the best backend for model {model_name}")
687
- except Exception as e:
688
- logger.info(
689
- f"The LLM doesn't seem to be supported by vLLM yet (error: {e}), using HuggingFace as the backend"
690
- f"for model: {model_name}. If you are sure that the LLM is supported by vLLM, you can set `type: vllm`"
691
- "in the config file to force using vLLM."
692
- )
693
- backend = "hf"
694
-
695
- assert backend in [
696
- "hf",
697
- "vllm",
698
- "auto",
699
- ], f"Invalid backend: {backend}, only `hf`, `vllm`, and `auto` are supported."
700
-
701
- if backend == "hf":
702
- llm_model = config.model
703
- llm_device = config.device
704
- llm_dtype = config.dtype
705
- llm_generation_kwargs = config.get("generation_kwargs", {})
706
- if llm_generation_kwargs is not None:
707
- llm_generation_kwargs = OmegaConf.to_container(llm_generation_kwargs, resolve=True)
708
- llm_apply_chat_template_kwargs = config.get("apply_chat_template_kwargs", None)
709
- if llm_apply_chat_template_kwargs is not None:
710
- llm_apply_chat_template_kwargs = OmegaConf.to_container(llm_apply_chat_template_kwargs, resolve=True)
711
- llm_thinking_budget = config.get("thinking_budget", 0)
712
- return HuggingFaceLLMService(
713
- model=llm_model,
714
- device=llm_device,
715
- dtype=llm_dtype,
716
- generation_kwargs=llm_generation_kwargs,
717
- apply_chat_template_kwargs=llm_apply_chat_template_kwargs,
718
- thinking_budget=llm_thinking_budget,
719
- )
720
- elif backend == "vllm":
721
- llm_model = config.get("model", "vllm_server")
722
- llm_api_key = config.get("api_key", "None")
723
- llm_base_url = config.get("base_url", "http://localhost:8000/v1")
724
- llm_organization = config.get("organization", "None")
725
- llm_project = config.get("project", "None")
726
- llm_default_headers = config.get("default_headers", None)
727
- llm_params = config.get("vllm_generation_params", None)
728
- llm_dtype = config.dtype
729
- vllm_server_params = config.get("vllm_server_params", None)
730
- if vllm_server_params is not None:
731
- if "dtype" not in vllm_server_params:
732
- vllm_server_params = f"--dtype {llm_dtype} {vllm_server_params}"
733
- logger.info(f"Adding dtype {llm_dtype} to vllm_server_params: {vllm_server_params}")
734
- if llm_params is not None:
735
- # cast into OpenAILLMService.InputParams object
736
- llm_params = OmegaConf.to_container(llm_params, resolve=True)
737
- extra = llm_params.get("extra", None)
738
- # ensure extra is a dictionary
739
- if extra is None:
740
- llm_params["extra"] = {}
741
- elif not isinstance(extra, dict):
742
- raise ValueError(f"extra must be a dictionary, got {type(extra)}")
743
- llm_params = OpenAILLMService.InputParams(**llm_params)
744
- else:
745
- llm_params = OpenAILLMService.InputParams()
746
- llm_thinking_budget = config.get("thinking_budget", 0)
747
- return VLLMService(
748
- model=llm_model,
749
- api_key=llm_api_key,
750
- base_url=llm_base_url,
751
- organization=llm_organization,
752
- project=llm_project,
753
- default_headers=llm_default_headers,
754
- params=llm_params,
755
- thinking_budget=llm_thinking_budget,
756
- start_vllm_on_init=config.get("start_vllm_on_init", False),
757
- vllm_server_params=vllm_server_params,
758
- )
759
- else:
760
- raise ValueError(f"Invalid LLM backend: {backend}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/pipecat/services/nemo/streaming_asr.py DELETED
@@ -1,319 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # NOTE: This file will be deprecated in the future, as the new inference pipeline will replace it.
15
-
16
- import math
17
- import time
18
- from dataclasses import dataclass
19
- from typing import List, Optional
20
-
21
- import numpy as np
22
- import torch
23
- from omegaconf import open_dict
24
-
25
- import nemo.collections.asr as nemo_asr
26
- from nemo.agents.voice_agent.pipecat.services.nemo.utils import CacheFeatureBufferer
27
- from nemo.collections.asr.parts.utils.rnnt_utils import Hypothesis
28
- from nemo.collections.common.tokenizers.sentencepiece_tokenizer import SentencePieceTokenizer
29
-
30
-
31
- @dataclass
32
- class ASRResult:
33
- text: str
34
- is_final: bool
35
- eou_prob: Optional[float] = None
36
- eob_prob: Optional[float] = None
37
- eou_latency: Optional[float] = None
38
- eob_latency: Optional[float] = None
39
- processing_time: Optional[float] = None
40
-
41
-
42
- class NemoStreamingASRService:
43
- def __init__(
44
- self,
45
- model: str = "nvidia/parakeet_realtime_eou_120m-v1",
46
- att_context_size: List[int] = [70, 1],
47
- device: str = "cuda",
48
- eou_string: str = "<EOU>",
49
- eob_string: str = "<EOB>",
50
- decoder_type: str = None,
51
- chunk_size: int = -1,
52
- shift_size: int = -1,
53
- left_chunks: int = 2,
54
- sample_rate: int = 16000,
55
- frame_len_in_secs: float = 0.08,
56
- use_amp: bool = False,
57
- chunk_size_in_secs: float = 0.08,
58
- ):
59
- self.model = model
60
- self.eou_string = eou_string
61
- self.eob_string = eob_string
62
- self.device = device
63
- self.att_context_size = att_context_size
64
- self.decoder_type = decoder_type
65
- self.chunk_size = chunk_size
66
- self.shift_size = shift_size
67
- self.left_chunks = left_chunks
68
- self.asr_model = self._load_model(model)
69
- self.tokenizer: SentencePieceTokenizer = self.asr_model.tokenizer
70
- self.use_amp = use_amp
71
- self.pad_and_drop_preencoded = False
72
- self.blank_id = self.get_blank_id()
73
- self.chunk_size_in_secs = chunk_size_in_secs
74
-
75
- assert len(self.att_context_size) == 2, "Att context size must be a list of two integers"
76
- assert (
77
- self.att_context_size[0] >= 0
78
- ), f"Left att context size must be greater than 0: {self.att_context_size[0]}"
79
- assert (
80
- self.att_context_size[1] >= 0
81
- ), f"Right att context size must be greater than 0: {self.att_context_size[1]}"
82
-
83
- window_stride_in_secs = self.asr_model.cfg.preprocessor.window_stride
84
- model_stride = self.asr_model.cfg.encoder.subsampling_factor
85
- self.model_chunk_size = self.asr_model.encoder.streaming_cfg.chunk_size
86
- if isinstance(self.model_chunk_size, list):
87
- self.model_chunk_size = self.model_chunk_size[1]
88
- self.pre_encode_cache_size = self.asr_model.encoder.streaming_cfg.pre_encode_cache_size
89
- if isinstance(self.pre_encode_cache_size, list):
90
- self.pre_encode_cache_size = self.pre_encode_cache_size[1]
91
- self.pre_encode_cache_size_in_secs = self.pre_encode_cache_size * window_stride_in_secs
92
-
93
- self.tokens_per_frame = math.ceil(np.trunc(self.chunk_size_in_secs / window_stride_in_secs) / model_stride)
94
- # overwrite the encoder streaming params with proper shift size for cache aware streaming
95
- self.asr_model.encoder.setup_streaming_params(
96
- chunk_size=self.model_chunk_size // model_stride, shift_size=self.tokens_per_frame
97
- )
98
-
99
- model_chunk_size_in_secs = self.model_chunk_size * window_stride_in_secs
100
-
101
- self.buffer_size_in_secs = self.pre_encode_cache_size_in_secs + model_chunk_size_in_secs
102
-
103
- self._audio_buffer = CacheFeatureBufferer(
104
- sample_rate=sample_rate,
105
- buffer_size_in_secs=self.buffer_size_in_secs,
106
- chunk_size_in_secs=self.chunk_size_in_secs,
107
- preprocessor_cfg=self.asr_model.cfg.preprocessor,
108
- device=self.device,
109
- )
110
- self._reset_cache()
111
- self._previous_hypotheses = self._get_blank_hypothesis()
112
- self._last_transcript_timestamp = time.time()
113
- print(f"NemoStreamingASRService initialized with model `{model}` on device `{self.device}`")
114
-
115
- def _reset_cache(self):
116
- (
117
- self._cache_last_channel, # [17, B, 70, 512]
118
- self._cache_last_time, # [17, B, 512, 8]
119
- self._cache_last_channel_len, # B
120
- ) = self.asr_model.encoder.get_initial_cache_state(
121
- 1
122
- ) # batch size is 1
123
-
124
- def _get_blank_hypothesis(self) -> List[Hypothesis]:
125
- blank_hypothesis = Hypothesis(score=0.0, y_sequence=[], dec_state=None, timestamp=[], last_token=None)
126
- return [blank_hypothesis]
127
-
128
- @property
129
- def drop_extra_pre_encoded(self):
130
- return self.asr_model.encoder.streaming_cfg.drop_extra_pre_encoded
131
-
132
- def get_blank_id(self):
133
- return len(self.tokenizer.vocab)
134
-
135
- def get_text_from_tokens(self, tokens: List[int]) -> str:
136
- sep = "\u2581" # '▁'
137
- tokens = [int(t) for t in tokens if t != self.blank_id]
138
- if tokens:
139
- pieces = self.tokenizer.ids_to_tokens(tokens)
140
- text = "".join([p.replace(sep, ' ') if p.startswith(sep) else p for p in pieces])
141
- else:
142
- text = ""
143
- return text
144
-
145
- def _load_model(self, model: str):
146
- if model.endswith(".nemo"):
147
- asr_model = nemo_asr.models.ASRModel.restore_from(model, map_location=torch.device(self.device))
148
- else:
149
- asr_model = nemo_asr.models.ASRModel.from_pretrained(model, map_location=torch.device(self.device))
150
-
151
- if self.decoder_type is not None and hasattr(asr_model, "cur_decoder"):
152
- asr_model.change_decoding_strategy(decoder_type=self.decoder_type)
153
- elif isinstance(asr_model, nemo_asr.models.EncDecCTCModel):
154
- self.decoder_type = "ctc"
155
- elif isinstance(asr_model, nemo_asr.models.EncDecRNNTModel):
156
- self.decoder_type = "rnnt"
157
- else:
158
- raise ValueError("Decoder type not supported for this model.")
159
-
160
- if self.att_context_size is not None:
161
- if hasattr(asr_model.encoder, "set_default_att_context_size"):
162
- asr_model.encoder.set_default_att_context_size(att_context_size=self.att_context_size)
163
- else:
164
- raise ValueError("Model does not support multiple lookaheads.")
165
- else:
166
- self.att_context_size = asr_model.cfg.encoder.att_context_size
167
-
168
- decoding_cfg = asr_model.cfg.decoding
169
- with open_dict(decoding_cfg):
170
- decoding_cfg.strategy = "greedy"
171
- decoding_cfg.compute_timestamps = False
172
- decoding_cfg.preserve_alignments = True
173
- if hasattr(asr_model, 'joint'): # if an RNNT model
174
- decoding_cfg.greedy.max_symbols = 10
175
- decoding_cfg.fused_batch_size = -1
176
- asr_model.change_decoding_strategy(decoding_cfg)
177
-
178
- if hasattr(asr_model.encoder, "set_default_att_context_size"):
179
- asr_model.encoder.set_default_att_context_size(att_context_size=self.att_context_size)
180
-
181
- # chunk_size is set automatically for models trained for streaming.
182
- # For models trained for offline mode with full context, we need to pass the chunk_size explicitly.
183
- if self.chunk_size > 0:
184
- if self.shift_size < 0:
185
- shift_size = self.chunk_size
186
- else:
187
- shift_size = self.shift_size
188
- asr_model.encoder.setup_streaming_params(
189
- chunk_size=self.chunk_size, left_chunks=self.left_chunks, shift_size=shift_size
190
- )
191
-
192
- asr_model.eval()
193
- return asr_model
194
-
195
- def _get_best_hypothesis(self, encoded, encoded_len, partial_hypotheses=None):
196
- if self.decoder_type == "ctc":
197
- best_hyp = self.asr_model.decoding.ctc_decoder_predictions_tensor(
198
- encoded,
199
- encoded_len,
200
- return_hypotheses=True,
201
- )
202
- elif self.decoder_type == "rnnt":
203
- best_hyp = self.asr_model.decoding.rnnt_decoder_predictions_tensor(
204
- encoded, encoded_len, return_hypotheses=True, partial_hypotheses=partial_hypotheses
205
- )
206
- else:
207
- raise ValueError("Decoder type not supported for this model.")
208
- return best_hyp
209
-
210
- def _get_tokens_and_probs_from_alignments(self, alignments):
211
- tokens = []
212
- probs = []
213
- if self.decoder_type == "ctc":
214
- all_logits = alignments[0]
215
- all_tokens = alignments[1]
216
- for i in range(len(all_tokens)):
217
- token_id = int(all_tokens[i])
218
- if token_id != self.blank_id:
219
- tokens.append(token_id)
220
- logits = all_logits[i] # shape (vocab_size,)
221
- probs_i = torch.softmax(logits, dim=-1)[token_id].item()
222
- probs.append(probs_i)
223
- elif self.decoder_type == "rnnt":
224
- for t in range(len(alignments)):
225
- for u in range(len(alignments[t])):
226
- logits, token_id = alignments[t][u] # (logits, token_id)
227
- token_id = int(token_id)
228
- if token_id != self.blank_id:
229
- tokens.append(token_id)
230
- probs_i = torch.softmax(logits, dim=-1)[token_id].item()
231
- probs.append(probs_i)
232
- else:
233
- raise ValueError("Decoder type not supported for this model.")
234
-
235
- return tokens, probs
236
-
237
- def transcribe(self, audio: bytes, stream_id: str = "default") -> ASRResult:
238
- start_time = time.time()
239
-
240
- # Convert bytes to numpy array
241
- audio_array = np.frombuffer(audio, dtype=np.int16).astype(np.float32) / 32768.0
242
-
243
- self._audio_buffer.update(audio_array)
244
-
245
- features = self._audio_buffer.get_feature_buffer()
246
- feature_lengths = torch.tensor([features.shape[1]], device=self.device)
247
- features = features.unsqueeze(0) # Add batch dimension
248
-
249
- with torch.no_grad():
250
- (
251
- encoded,
252
- encoded_len,
253
- cache_last_channel,
254
- cache_last_time,
255
- cache_last_channel_len,
256
- ) = self.asr_model.encoder.cache_aware_stream_step(
257
- processed_signal=features,
258
- processed_signal_length=feature_lengths,
259
- cache_last_channel=self._cache_last_channel,
260
- cache_last_time=self._cache_last_time,
261
- cache_last_channel_len=self._cache_last_channel_len,
262
- keep_all_outputs=False,
263
- drop_extra_pre_encoded=self.drop_extra_pre_encoded,
264
- )
265
-
266
- best_hyp = self._get_best_hypothesis(encoded, encoded_len, partial_hypotheses=self._previous_hypotheses)
267
-
268
- self._previous_hypotheses = best_hyp
269
- self._cache_last_channel = cache_last_channel
270
- self._cache_last_time = cache_last_time
271
- self._cache_last_channel_len = cache_last_channel_len
272
-
273
- tokens, probs = self._get_tokens_and_probs_from_alignments(best_hyp[0].alignments)
274
-
275
- text = self.get_text_from_tokens(tokens)
276
-
277
- is_final = False
278
- eou_latency = None
279
- eob_latency = None
280
- eou_prob = None
281
- eob_prob = None
282
- current_timestamp = time.time()
283
- if self.eou_string in text or self.eob_string in text:
284
- is_final = True
285
- if self.eou_string in text:
286
- eou_latency = (
287
- current_timestamp - self._last_transcript_timestamp if text.strip() == self.eou_string else 0.0
288
- )
289
- eou_prob = self.get_eou_probability(tokens, probs, self.eou_string)
290
- if self.eob_string in text:
291
- eob_latency = (
292
- current_timestamp - self._last_transcript_timestamp if text.strip() == self.eob_string else 0.0
293
- )
294
- eob_prob = self.get_eou_probability(tokens, probs, self.eob_string)
295
- self.reset_state(stream_id=stream_id)
296
- if text.strip():
297
- self._last_transcript_timestamp = current_timestamp
298
-
299
- processing_time = time.time() - start_time
300
- return ASRResult(
301
- text=text,
302
- is_final=is_final,
303
- eou_latency=eou_latency,
304
- eob_latency=eob_latency,
305
- eou_prob=eou_prob,
306
- eob_prob=eob_prob,
307
- processing_time=processing_time,
308
- )
309
-
310
- def reset_state(self, stream_id: str = "default"):
311
- self._audio_buffer.reset()
312
- self._reset_cache()
313
- self._previous_hypotheses = self._get_blank_hypothesis()
314
- self._last_transcript_timestamp = time.time()
315
-
316
- def get_eou_probability(self, tokens: List[int], probs: List[float], eou_string: str = "<EOU>") -> float:
317
- text_tokens = self.tokenizer.ids_to_tokens(tokens)
318
- eou_index = text_tokens.index(eou_string)
319
- return probs[eou_index]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/pipecat/services/nemo/streaming_diar.py DELETED
@@ -1,212 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # NOTE: This file will be deprecated in the future, as the new inference pipeline will replace it.
15
-
16
- from dataclasses import dataclass
17
- from typing import Tuple
18
-
19
- import numpy as np
20
- import torch
21
- from torch import Tensor
22
-
23
- from nemo.agents.voice_agent.pipecat.services.nemo.utils import CacheFeatureBufferer
24
- from nemo.collections.asr.models import SortformerEncLabelModel
25
-
26
- from nemo.collections.asr.modules.sortformer_modules import StreamingSortformerState
27
-
28
-
29
- @dataclass
30
- class DiarizationConfig:
31
- """Diarization configuration parameters for inference."""
32
-
33
- model_path: str = "nvidia/diar_streaming_sortformer_4spk-v2"
34
- device: str = "cuda"
35
-
36
- log: bool = False # If True, log will be printed
37
- max_num_speakers: int = 4
38
- spkcache_len: int = 188
39
- spkcache_refresh_rate: int = 144
40
- fifo_len: int = 188
41
- chunk_len: int = 6
42
- chunk_left_context: int = 1
43
- chunk_right_context: int = 7
44
-
45
-
46
- class NeMoStreamingDiarService:
47
- def __init__(
48
- self,
49
- cfg: DiarizationConfig,
50
- model: str,
51
- frame_len_in_secs: float = 0.08,
52
- sample_rate: int = 16000,
53
- left_offset: int = 8,
54
- right_offset: int = 8,
55
- use_amp: bool = False,
56
- compute_dtype: torch.dtype = torch.float32,
57
- ):
58
- self.model = model
59
- self.cfg = cfg
60
- self.cfg.model_path = model
61
- self.diarizer = self.build_diarizer()
62
- self.device = cfg.device
63
- self.use_amp = use_amp
64
- self.compute_dtype = compute_dtype
65
- self.frame_len_in_secs = frame_len_in_secs
66
- self.left_offset = left_offset
67
- self.right_offset = right_offset
68
- self.chunk_size = self.cfg.chunk_len
69
- self.buffer_size_in_secs = (
70
- self.cfg.chunk_len * self.frame_len_in_secs + (self.left_offset + self.right_offset) * 0.01
71
- )
72
- self.max_num_speakers = self.cfg.max_num_speakers
73
-
74
- self.feature_bufferer = CacheFeatureBufferer(
75
- sample_rate=sample_rate,
76
- buffer_size_in_secs=self.buffer_size_in_secs,
77
- chunk_size_in_secs=self.cfg.chunk_len * self.frame_len_in_secs,
78
- preprocessor_cfg=self.diarizer.cfg.preprocessor,
79
- device=self.device,
80
- )
81
- self.streaming_state = self.init_streaming_state(batch_size=1)
82
- self.total_preds = torch.zeros((1, 0, self.max_num_speakers), device=self.diarizer.device)
83
-
84
- print(f"NeMoStreamingDiarService initialized with model `{model}` on device `{self.device}`")
85
-
86
- def build_diarizer(self):
87
- if self.cfg.model_path.endswith(".nemo"):
88
- diar_model = SortformerEncLabelModel.restore_from(self.cfg.model_path, map_location=self.cfg.device)
89
- else:
90
- diar_model = SortformerEncLabelModel.from_pretrained(self.cfg.model_path, map_location=self.cfg.device)
91
-
92
- # Steaming mode setup
93
- diar_model.sortformer_modules.chunk_len = self.cfg.chunk_len
94
- diar_model.sortformer_modules.spkcache_len = self.cfg.spkcache_len
95
- diar_model.sortformer_modules.chunk_left_context = self.cfg.chunk_left_context
96
- diar_model.sortformer_modules.chunk_right_context = self.cfg.chunk_right_context
97
- diar_model.sortformer_modules.fifo_len = self.cfg.fifo_len
98
- diar_model.sortformer_modules.log = self.cfg.log
99
- diar_model.sortformer_modules.spkcache_refresh_rate = self.cfg.spkcache_refresh_rate
100
- diar_model.eval()
101
-
102
- return diar_model
103
-
104
- def print_diar_result(self, diar_result: np.ndarray):
105
- for t in range(diar_result.shape[0]):
106
- spk_probs = ""
107
- for s in range(diar_result.shape[1]):
108
- spk_probs += f"{diar_result[t, s]:.2f} "
109
- print(f"Time {t}: {spk_probs}")
110
-
111
- def diarize(self, audio: bytes, stream_id: str = "default") -> str:
112
-
113
- audio_array = np.frombuffer(audio, dtype=np.int16).astype(np.float32) / 32768.0
114
-
115
- self.feature_bufferer.update(audio_array)
116
-
117
- features = self.feature_bufferer.get_feature_buffer()
118
- feature_buffers = features.unsqueeze(0) # add batch dimension
119
- feature_buffers = feature_buffers.transpose(1, 2) # [batch, feature, time] -> [batch, time, feature]
120
- feature_buffer_lens = torch.tensor([feature_buffers.shape[1]], device=self.device)
121
- self.streaming_state, chunk_preds = self.stream_step(
122
- processed_signal=feature_buffers,
123
- processed_signal_length=feature_buffer_lens,
124
- streaming_state=self.streaming_state,
125
- total_preds=self.total_preds,
126
- left_offset=self.left_offset,
127
- right_offset=self.right_offset,
128
- )
129
- self.total_preds = chunk_preds
130
- diar_result = chunk_preds[:, -self.chunk_size :, :].clone().cpu().numpy()
131
- return diar_result[0] # tensor of shape [6, 4]
132
-
133
- def reset_state(self, stream_id: str = "default"):
134
- self.feature_bufferer.reset()
135
- self.streaming_state = self.init_streaming_state(batch_size=1)
136
- self.total_preds = torch.zeros((1, 0, self.max_num_speakers), device=self.diarizer.device)
137
-
138
- def init_streaming_state(self, batch_size: int = 1) -> StreamingSortformerState:
139
- """
140
- Initialize the streaming state for the diarization model.
141
-
142
- Args:
143
- batch_size: The batch size to use.
144
-
145
- Returns:
146
- SortformerStreamingState: The initialized streaming state.
147
- """
148
- # Use the model's init_streaming_state method but convert to SortformerStreamingState format
149
- nemo_state = self.diarizer.sortformer_modules.init_streaming_state(
150
- batch_size=batch_size, async_streaming=self.diarizer.async_streaming, device=self.device
151
- )
152
-
153
- return nemo_state
154
-
155
- def stream_step(
156
- self,
157
- processed_signal: Tensor,
158
- processed_signal_length: Tensor,
159
- streaming_state: StreamingSortformerState,
160
- total_preds: Tensor,
161
- left_offset: int = 0,
162
- right_offset: int = 0,
163
- ) -> Tuple[StreamingSortformerState, Tensor]:
164
- """
165
- Execute a single streaming step for diarization.
166
-
167
- Args:
168
- processed_signal: The processed audio signal.
169
- processed_signal_length: The length of the processed signal.
170
- streaming_state: The current streaming state.
171
- total_preds: The total predictions so far.
172
- left_offset: The left offset for the current chunk.
173
- right_offset: The right offset for the current chunk.
174
-
175
- Returns:
176
- Tuple[SortformerStreamingState, Tensor]: The updated streaming state and predictions.
177
- """
178
- # Move tensors to correct device
179
- if processed_signal.device != self.device:
180
- processed_signal = processed_signal.to(self.device)
181
-
182
- if processed_signal_length.device != self.device:
183
- processed_signal_length = processed_signal_length.to(self.device)
184
-
185
- if total_preds is not None and total_preds.device != self.device:
186
- total_preds = total_preds.to(self.device)
187
-
188
- with (
189
- torch.amp.autocast(device_type=self.device, dtype=self.compute_dtype, enabled=self.use_amp),
190
- torch.inference_mode(),
191
- torch.no_grad(),
192
- ):
193
- try:
194
- # Call the model's forward_streaming_step method
195
- streaming_state, diar_pred_out_stream = self.diarizer.forward_streaming_step(
196
- processed_signal=processed_signal,
197
- processed_signal_length=processed_signal_length,
198
- streaming_state=streaming_state,
199
- total_preds=total_preds,
200
- left_offset=left_offset,
201
- right_offset=right_offset,
202
- )
203
- except Exception as e:
204
- print(f"Error in diarizer streaming step: {e}")
205
- # print the stack trace
206
- import traceback
207
-
208
- traceback.print_exc()
209
- # Return the existing state and preds if there's an error
210
- return streaming_state, total_preds
211
-
212
- return streaming_state, diar_pred_out_stream
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/pipecat/services/nemo/stt.py DELETED
@@ -1,316 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import asyncio
16
- from datetime import datetime
17
- from typing import AsyncGenerator, List, Optional
18
-
19
- from loguru import logger
20
- from pipecat.frames.frames import (
21
- CancelFrame,
22
- EndFrame,
23
- ErrorFrame,
24
- Frame,
25
- InterimTranscriptionFrame,
26
- StartFrame,
27
- TranscriptionFrame,
28
- VADUserStartedSpeakingFrame,
29
- VADUserStoppedSpeakingFrame,
30
- )
31
- from pipecat.processors.frame_processor import FrameDirection
32
- from pipecat.services.stt_service import STTService
33
- from pipecat.transcriptions.language import Language
34
- from pipecat.utils.time import time_now_iso8601
35
- from pipecat.utils.tracing.service_decorators import traced_stt
36
- from pydantic import BaseModel
37
-
38
- from nemo.agents.voice_agent.pipecat.services.nemo.audio_logger import AudioLogger
39
- from nemo.agents.voice_agent.pipecat.services.nemo.streaming_asr import NemoStreamingASRService
40
-
41
- ASR_EOU_MODELS = ["nvidia/parakeet_realtime_eou_120m-v1"]
42
-
43
- try:
44
- # disable nemo logging
45
- from nemo.utils import logging
46
-
47
- level = logging.getEffectiveLevel()
48
- logging.setLevel(logging.CRITICAL)
49
-
50
-
51
- except ModuleNotFoundError as e:
52
- logger.error(f"Exception: {e}")
53
- logger.error('In order to use NVIDIA NeMo STT, you need to `pip install "nemo_toolkit[all]"`.')
54
- raise Exception(f"Missing module: {e}")
55
-
56
-
57
- class NeMoSTTInputParams(BaseModel):
58
- """Input parameters for NeMo STT service."""
59
-
60
- language: Optional[Language] = Language.EN_US
61
- att_context_size: Optional[List] = [70, 1]
62
- frame_len_in_secs: Optional[float] = 0.08 # 80ms for FastConformer model
63
- config_path: Optional[str] = None # path to the Niva ASR config file
64
- raw_audio_frame_len_in_secs: Optional[float] = 0.016 # 16ms for websocket transport
65
- buffer_size: Optional[int] = 5 # number of audio frames to buffer, 1 frame is 16ms
66
-
67
-
68
- class NemoSTTService(STTService):
69
- """NeMo Speech-to-Text service for Pipecat integration."""
70
-
71
- def __init__(
72
- self,
73
- *,
74
- model: Optional[str] = "nnvidia/parakeet_realtime_eou_120m-v1",
75
- device: Optional[str] = "cuda:0",
76
- sample_rate: Optional[int] = 16000,
77
- params: Optional[NeMoSTTInputParams] = None,
78
- has_turn_taking: Optional[bool] = None, # if None, it will be set by the model name
79
- backend: Optional[str] = "legacy",
80
- decoder_type: Optional[str] = "rnnt",
81
- audio_logger: Optional[AudioLogger] = None,
82
- **kwargs,
83
- ):
84
- super().__init__(**kwargs)
85
- self._queue = asyncio.Queue()
86
- self._sample_rate = sample_rate
87
- self._params = params or NeMoSTTInputParams()
88
- self._model_name = model
89
- if has_turn_taking is None:
90
- has_turn_taking = True if model in ASR_EOU_MODELS else False
91
- logger.info(f"Setting has_turn_taking to `{has_turn_taking}` based on model name: `{model}`")
92
- self._has_turn_taking = has_turn_taking
93
- self._backend = backend
94
- self._decoder_type = decoder_type
95
- self._audio_logger = audio_logger
96
- self._is_vad_active = False
97
- logger.info(f"NeMoSTTInputParams: {self._params}")
98
-
99
- self._device = device
100
-
101
- self._load_model()
102
-
103
- self.audio_buffer = []
104
- self.user_is_speaking = False
105
-
106
- def _load_model(self):
107
- if self._backend == "legacy":
108
- self._model = NemoStreamingASRService(
109
- self._model_name,
110
- self._params.att_context_size,
111
- device=self._device,
112
- decoder_type=self._decoder_type,
113
- frame_len_in_secs=self._params.frame_len_in_secs,
114
- )
115
- else:
116
- raise ValueError(f"Invalid ASR backend: {self._backend}")
117
-
118
- def can_generate_metrics(self) -> bool:
119
- """Indicates whether this service can generate metrics.
120
-
121
- Returns:
122
- bool: True, as this service supports metric generation.
123
- """
124
- return True
125
-
126
- async def start(self, frame: StartFrame):
127
- """Handle service start.
128
-
129
- Args:
130
- frame: StartFrame containing initial configuration
131
- """
132
- await super().start(frame)
133
-
134
- # Initialize the model if not already done
135
- if not hasattr(self, "_model"):
136
- self._load_model()
137
-
138
- async def stop(self, frame: EndFrame):
139
- """Handle service stop.
140
-
141
- Args:
142
- frame: EndFrame that triggered this method
143
- """
144
- await super().stop(frame)
145
- # Clear any internal state if needed
146
- await self._queue.put(None) # Signal to stop processing
147
-
148
- async def cancel(self, frame: CancelFrame):
149
- """Handle service cancellation.
150
-
151
- Args:
152
- frame: CancelFrame that triggered this method
153
- """
154
- await super().cancel(frame)
155
- # Clear any internal state
156
- await self._queue.put(None) # Signal to stop processing
157
- self._queue = asyncio.Queue() # Reset the queue
158
-
159
- async def run_stt(self, audio: bytes) -> AsyncGenerator[Frame, None]:
160
- """Process audio data and generate transcription frames.
161
-
162
- Args:
163
- audio: Raw audio bytes to transcribe
164
-
165
- Yields:
166
- Frame: Transcription frames containing the results
167
- """
168
- timestamp_now = datetime.now()
169
- await self.start_ttfb_metrics()
170
- await self.start_processing_metrics()
171
- if self._audio_logger is not None and self._audio_logger.first_audio_timestamp is None:
172
- self._audio_logger.first_audio_timestamp = timestamp_now
173
-
174
- try:
175
- is_final = False
176
- user_has_finished = False
177
- transcription = None
178
- self.audio_buffer.append(audio)
179
- if len(self.audio_buffer) >= self._params.buffer_size:
180
- audio = b"".join(self.audio_buffer)
181
- self.audio_buffer = []
182
-
183
- # Append to continuous user audio buffer for stereo conversation recording
184
- if self._audio_logger is not None:
185
- self._audio_logger.append_continuous_user_audio(audio)
186
-
187
- asr_result = self._model.transcribe(audio)
188
- transcription = asr_result.text
189
- is_final = asr_result.is_final
190
- if self._audio_logger is not None:
191
- if self._is_vad_active:
192
- is_first_frame = False
193
- self._audio_logger.turn_audio_buffer.append(audio)
194
- # Accumulate transcriptions for turn-based logging
195
- if transcription:
196
- self._audio_logger.turn_transcription_buffer.append(transcription)
197
- self._audio_logger.stage_turn_audio_and_transcription(
198
- timestamp_now=timestamp_now,
199
- is_first_frame=is_first_frame,
200
- additional_metadata={
201
- "model": self._model_name,
202
- "backend": self._backend,
203
- },
204
- )
205
- eou_latency = asr_result.eou_latency
206
- eob_latency = asr_result.eob_latency
207
- eou_prob = asr_result.eou_prob
208
- eob_prob = asr_result.eob_prob
209
- if eou_latency is not None:
210
- logger.debug(
211
- f"EOU latency: {eou_latency: .4f} seconds. EOU probability: {eou_prob: .2f}."
212
- f"Processing time: {asr_result.processing_time: .4f} seconds."
213
- )
214
- user_has_finished = True
215
- if eob_latency is not None:
216
- logger.debug(
217
- f"EOB latency: {eob_latency: .4f} seconds. EOB probability: {eob_prob: .2f}."
218
- f"Processing time: {asr_result.processing_time: .4f} seconds."
219
- )
220
- user_has_finished = True
221
- await self.stop_ttfb_metrics()
222
- await self.stop_processing_metrics()
223
-
224
- if transcription:
225
- logger.debug(f"Transcription (is_final={is_final}): `{transcription}`")
226
- self.user_is_speaking = True if not user_has_finished else False
227
-
228
- # Get the language from params or default to EN_US
229
- language = self._params.language if self._params else Language.EN_US
230
-
231
- # Create and push the transcription frame
232
- if self._has_turn_taking:
233
- # if turn taking is enabled, we push interim transcription frames
234
- # and let the turn taking service handle the final transcription
235
- frame_type = InterimTranscriptionFrame
236
- else:
237
- # otherwise, we use the is_final flag to determine the frame type
238
- frame_type = TranscriptionFrame if is_final else InterimTranscriptionFrame
239
- await self.push_frame(
240
- frame_type(
241
- transcription,
242
- "", # No speaker ID in this implementation
243
- time_now_iso8601(),
244
- language,
245
- result={"text": transcription},
246
- )
247
- )
248
-
249
- # Handle the transcription
250
- await self._handle_transcription(
251
- transcript=transcription,
252
- is_final=is_final,
253
- language=language,
254
- )
255
-
256
- yield None
257
-
258
- except Exception as e:
259
- logger.error(f"Error in NeMo STT processing: {e}")
260
- await self.push_frame(
261
- ErrorFrame(
262
- str(e),
263
- time_now_iso8601(),
264
- )
265
- )
266
- yield None
267
-
268
- @traced_stt
269
- async def _handle_transcription(self, transcript: str, is_final: bool, language: Optional[str] = None):
270
- """Handle a transcription result.
271
-
272
- Args:
273
- transcript: The transcribed text
274
- is_final: Whether this is a final transcription
275
- language: The language of the transcription
276
- """
277
- pass # Base implementation - can be extended for specific handling needs
278
-
279
- async def set_language(self, language: Language):
280
- """Update the service's recognition language.
281
-
282
- Args:
283
- language: New language for recognition
284
- """
285
- if self._params:
286
- self._params.language = language
287
- else:
288
- self._params = NeMoSTTInputParams(language=language)
289
-
290
- logger.info(f"Switching STT language to: {language}")
291
-
292
- async def set_model(self, model: str):
293
- """Update the service's model.
294
-
295
- Args:
296
- model: New model name/path to use
297
- """
298
- await super().set_model(model)
299
- self._model_name = model
300
- self._load_model()
301
-
302
- async def process_frame(self, frame: Frame, direction: FrameDirection):
303
- """Process incoming frames and handle VAD events."""
304
- if isinstance(frame, VADUserStoppedSpeakingFrame) and isinstance(self._model, NemoStreamingASRService):
305
- # manualy reset the state of the model when end of utterance is detected by VAD
306
- logger.debug("Resetting state of the model due to VADUserStoppedSpeakingFrame")
307
- if self.user_is_speaking:
308
- logger.debug(
309
- "[EOU missing] STT failed to detect end of utterance before VAD detected user stopped speaking"
310
- )
311
- self._model.reset_state()
312
- self._is_vad_active = False
313
- elif isinstance(frame, VADUserStartedSpeakingFrame):
314
- self._is_vad_active = True
315
-
316
- await super().process_frame(frame, direction)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/pipecat/services/nemo/tts.py DELETED
@@ -1,892 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import asyncio
16
- import inspect
17
- import uuid
18
- from collections.abc import AsyncGenerator
19
- from datetime import datetime
20
- from typing import Iterator, List, Optional
21
-
22
- import numpy as np
23
- import torch
24
- from loguru import logger
25
- from omegaconf import DictConfig, OmegaConf
26
- from pipecat.frames.frames import (
27
- CancelFrame,
28
- EndFrame,
29
- ErrorFrame,
30
- Frame,
31
- LLMTextFrame,
32
- StartFrame,
33
- TTSAudioRawFrame,
34
- TTSStartedFrame,
35
- TTSStoppedFrame,
36
- )
37
- from pipecat.services.llm_service import FunctionCallParams
38
- from pipecat.services.tts_service import TTSService
39
-
40
- from nemo.agents.voice_agent.pipecat.services.nemo.audio_logger import AudioLogger
41
- from nemo.agents.voice_agent.pipecat.utils.text.simple_text_aggregator import SimpleSegmentedTextAggregator
42
- from nemo.agents.voice_agent.utils.tool_calling.mixins import ToolCallingMixin
43
- from nemo.collections.tts.models import FastPitchModel, HifiGanModel
44
-
45
-
46
- class BaseNemoTTSService(TTSService, ToolCallingMixin):
47
- """Text-to-Speech service using Nemo TTS models.
48
-
49
- This service works with any TTS model that exposes a generate(text) method
50
- that returns audio data. The TTS generation runs in a dedicated background thread to
51
- avoid blocking the main asyncio event loop, following the same pattern as NemoDiarService.
52
-
53
- Args:
54
- model: TTS model instance with a generate(text) method
55
- sample_rate: Audio sample rate in Hz (defaults to 22050)
56
- **kwargs: Additional arguments passed to TTSService
57
- """
58
-
59
- def __init__(
60
- self,
61
- *,
62
- model,
63
- device: str = "cuda",
64
- sample_rate: int = 22050,
65
- think_tokens: Optional[List[str]] = None,
66
- audio_logger: Optional[AudioLogger] = None,
67
- ignore_strings: Optional[List[str]] = None,
68
- **kwargs,
69
- ):
70
- super().__init__(sample_rate=sample_rate, **kwargs)
71
- logger.info(f"Initializing TTS service with model: {model} and device: {device}")
72
- self._model_name = model
73
- self._device = device
74
- self._model = self._setup_model()
75
- self._think_tokens = think_tokens
76
- self._audio_logger = audio_logger
77
- if think_tokens is not None:
78
- assert (
79
- isinstance(think_tokens, list) and len(think_tokens) == 2
80
- ), f"think_tokens must be a list of two strings, but got type {type(think_tokens)}: {think_tokens}"
81
- self._ignore_strings = set(ignore_strings) if ignore_strings is not None else None
82
- # Background processing infrastructure - no response handler needed
83
- self._tts_queue = asyncio.Queue()
84
- self._processing_task = None
85
- self._processing_running = False
86
-
87
- # Track pending requests with their response queues
88
- self._pending_requests = {}
89
- self._have_seen_think_tokens = False
90
-
91
- def reset(self):
92
- """Reset the TTS service."""
93
- self._text_aggregator.reset()
94
-
95
- def setup_tool_calling(self):
96
- """
97
- Setup the tool calling mixin by registering all available tools.
98
- """
99
- pass # No tools by default
100
-
101
- def _setup_model(self):
102
- raise NotImplementedError("Subclass must implement _setup_model")
103
-
104
- def _generate_audio(self, text: str) -> Iterator[np.ndarray]:
105
- raise NotImplementedError("Subclass must implement _generate_audio")
106
-
107
- def can_generate_metrics(self) -> bool:
108
- """If the TTS service can generate metrics."""
109
- return True
110
-
111
- async def start(self, frame: StartFrame):
112
- """Handle service start."""
113
- await super().start(frame)
114
-
115
- # Initialize the model if not already done
116
- if not hasattr(self, "_model") or self._model is None:
117
- self._model = self._setup_model()
118
-
119
- # Only start background processing task - no response handler needed
120
- if not self._processing_task:
121
- self._processing_task = self.create_task(self._processing_task_handler())
122
-
123
- async def stop(self, frame: EndFrame):
124
- """Handle service stop."""
125
- await super().stop(frame)
126
- await self._stop_tasks()
127
-
128
- async def cancel(self, frame: CancelFrame):
129
- """Handle service cancellation."""
130
- await super().cancel(frame)
131
- await self._stop_tasks()
132
-
133
- async def _stop_tasks(self):
134
- """Stop background processing tasks."""
135
- self._processing_running = False
136
- await self._tts_queue.put(None) # Signal to stop processing
137
-
138
- if self._processing_task:
139
- await self.cancel_task(self._processing_task)
140
- self._processing_task = None
141
-
142
- def _tts_processor(self):
143
- """Background processor that handles TTS generation calls."""
144
- try:
145
- while self._processing_running:
146
- try:
147
- future = asyncio.run_coroutine_threadsafe(self._tts_queue.get(), self.get_event_loop())
148
- request = future.result()
149
-
150
- if request is None: # Stop signal
151
- logger.debug("Received stop signal in TTS background processor")
152
- break
153
-
154
- text, request_id = request
155
- logger.debug(f"Processing TTS request for text: [{text}]")
156
-
157
- # Get the response queue for this request
158
- response_queue = None
159
- future = asyncio.run_coroutine_threadsafe(
160
- self._get_response_queue(request_id), self.get_event_loop()
161
- )
162
- response_queue = future.result()
163
-
164
- if response_queue is None:
165
- logger.warning(f"No response queue found for request {request_id}")
166
- continue
167
-
168
- # Process TTS generation
169
- try:
170
- audio_result = self._generate_audio(text)
171
-
172
- # Send result directly to the waiting request
173
- asyncio.run_coroutine_threadsafe(
174
- response_queue.put(('success', audio_result)), self.get_event_loop()
175
- )
176
- except Exception as e:
177
- logger.error(f"Error in TTS generation: {e}")
178
- # Send error directly to the waiting request
179
- asyncio.run_coroutine_threadsafe(response_queue.put(('error', e)), self.get_event_loop())
180
-
181
- except Exception as e:
182
- logger.error(f"Error in background TTS processor: {e}")
183
-
184
- except Exception as e:
185
- logger.error(f"Background TTS processor fatal error: {e}")
186
- finally:
187
- logger.debug("Background TTS processor stopped")
188
-
189
- async def _get_response_queue(self, request_id: str):
190
- """Get the response queue for a specific request."""
191
- return self._pending_requests.get(request_id)
192
-
193
- async def _processing_task_handler(self):
194
- """Handler for background processing task."""
195
- try:
196
- self._processing_running = True
197
- logger.debug("Starting background TTS processing task")
198
- await asyncio.to_thread(self._tts_processor)
199
- except asyncio.CancelledError:
200
- logger.debug("Background TTS processing task cancelled")
201
- self._processing_running = False
202
- raise
203
- finally:
204
- self._processing_running = False
205
-
206
- def _handle_think_tokens(self, text: str) -> Optional[str]:
207
- """
208
- Handle the thinking tokens for TTS.
209
- If the thinking tokens are not provided, return the text as it is.
210
- Otherwise:
211
- If both thinking tokens appear in the text, return the text after the end of thinking tokens.
212
- If the LLM is thinking, return None.
213
- If the LLM is done thinking, return the text after the end of thinking tokens.
214
- If the LLM starts thinking, return the text before the start of thinking tokens.
215
- If the LLM is not thinking, return the text as is.
216
- """
217
- if not self._think_tokens or not text:
218
- return text
219
- elif self._think_tokens[0] in text and self._think_tokens[1] in text:
220
- # LLM finishes thinking in one chunk or outputs dummy thinking tokens
221
- logger.debug(f"LLM finishes thinking: {text}")
222
- idx = text.index(self._think_tokens[1])
223
- # only return the text after the end of thinking tokens
224
- text = text[idx + len(self._think_tokens[1]) :]
225
- self._have_seen_think_tokens = False
226
- logger.debug(f"Returning text after thinking: {text}")
227
- return text
228
- elif self._have_seen_think_tokens:
229
- # LLM is thinking
230
- if self._think_tokens[1] not in text:
231
- logger.debug(f"LLM is still thinking: {text}")
232
- # LLM is still thinking
233
- return None
234
- else:
235
- # LLM is done thinking
236
- logger.debug(f"LLM is done thinking: {text}")
237
- idx = text.index(self._think_tokens[1])
238
- # only return the text after the end of thinking tokens
239
- text = text[idx + len(self._think_tokens[1]) :]
240
- self._have_seen_think_tokens = False
241
- logger.debug(f"Returning text after thinking: {text}")
242
- return text
243
- elif self._think_tokens[0] in text:
244
- # LLM now starts thinking
245
- logger.debug(f"LLM starts thinking: {text}")
246
- self._have_seen_think_tokens = True
247
- # return text before the start of thinking tokens
248
- idx = text.index(self._think_tokens[0])
249
- text = text[:idx]
250
- logger.debug(f"Returning text before thinking: {text}")
251
- return text
252
- else:
253
- # LLM is not thinking
254
- return text
255
-
256
- def _drop_special_tokens(self, text: str) -> Optional[str]:
257
- """
258
- Drop the special tokens from the text.
259
- """
260
- if self._ignore_strings is None:
261
- return text
262
- for ignore_string in self._ignore_strings:
263
- if ignore_string in text:
264
- logger.debug(f"Dropping string `{ignore_string}` from text: `{text}`")
265
- text = text.replace(ignore_string, "")
266
- return text
267
-
268
- async def run_tts(self, text: str) -> AsyncGenerator[Frame, None]:
269
- """Generate speech from text using the Nemo TTS model."""
270
-
271
- if self._think_tokens is not None:
272
- text = self._handle_think_tokens(text)
273
-
274
- if not text:
275
- yield None
276
- return
277
-
278
- if self._ignore_strings is not None:
279
- text = self._drop_special_tokens(text)
280
-
281
- logger.debug(f"{self}: Generating TTS [{text}]")
282
-
283
- try:
284
- await self.start_ttfb_metrics()
285
- yield TTSStartedFrame()
286
-
287
- # Increment turn index at the start of agent speaking (only if speaker changed)
288
- if self._audio_logger is not None:
289
- self._audio_logger.increment_turn_index(speaker="agent")
290
-
291
- # Generate unique request ID
292
-
293
- request_id = str(uuid.uuid4())
294
-
295
- # Create response queue for this specific request
296
- request_queue = asyncio.Queue()
297
- self._pending_requests[request_id] = request_queue
298
-
299
- try:
300
- # Queue the TTS request for background processing
301
- await self._tts_queue.put((text, request_id))
302
-
303
- # Wait for the result directly from our request queue
304
- result = await request_queue.get()
305
- status, data = result
306
-
307
- if status == 'error':
308
- logger.error(f"{self} TTS generation error: {data}")
309
- yield ErrorFrame(error=f"TTS generation error: {str(data)}")
310
- return
311
-
312
- audio_result = data
313
- if audio_result is None:
314
- logger.error(f"{self} TTS model returned None for text: [{text}]")
315
- yield ErrorFrame(error="TTS generation failed - no audio returned")
316
- return
317
-
318
- await self.start_tts_usage_metrics(text)
319
-
320
- # Collect all audio for logging
321
- all_audio_bytes = b""
322
- # Capture the start time when TTS begins (not when it ends)
323
- if self._audio_logger is not None and self._audio_logger.first_audio_timestamp is None:
324
- self._audio_logger.first_audio_timestamp = datetime.now()
325
-
326
- # Process the audio result (same as before)
327
- if (
328
- inspect.isgenerator(audio_result)
329
- or hasattr(audio_result, '__iter__')
330
- and hasattr(audio_result, '__next__')
331
- ):
332
- # Handle generator case
333
- first_chunk = True
334
- for audio_chunk in audio_result:
335
- if first_chunk:
336
- await self.stop_ttfb_metrics()
337
- first_chunk = False
338
- # Capture start time on first chunk
339
- if self._audio_logger is not None:
340
- tts_start_time = self._audio_logger.get_time_from_start_of_session()
341
-
342
- if audio_chunk is None:
343
- break
344
-
345
- audio_bytes = self._convert_to_bytes(audio_chunk)
346
- all_audio_bytes += audio_bytes
347
- chunk_size = self.chunk_size
348
- for i in range(0, len(audio_bytes), chunk_size):
349
- audio_chunk_bytes = audio_bytes[i : i + chunk_size]
350
- if not audio_chunk_bytes:
351
- break
352
-
353
- frame = TTSAudioRawFrame(
354
- audio=audio_chunk_bytes, sample_rate=self.sample_rate, num_channels=1
355
- )
356
- yield frame
357
- else:
358
- # Handle single result case
359
- await self.stop_ttfb_metrics()
360
- # Capture start time for single result
361
- if self._audio_logger is not None:
362
- tts_start_time = self._audio_logger.get_time_from_start_of_session()
363
- audio_bytes = self._convert_to_bytes(audio_result)
364
- all_audio_bytes = audio_bytes
365
-
366
- chunk_size = self.chunk_size
367
- for i in range(0, len(audio_bytes), chunk_size):
368
- chunk = audio_bytes[i : i + chunk_size]
369
- if not chunk:
370
- break
371
-
372
- frame = TTSAudioRawFrame(audio=chunk, sample_rate=self.sample_rate, num_channels=1)
373
- yield frame
374
-
375
- # Log the complete audio if logger is available
376
- if self._audio_logger is not None and all_audio_bytes:
377
- try:
378
- self._audio_logger.log_agent_audio(
379
- audio_data=all_audio_bytes,
380
- text=text,
381
- sample_rate=self.sample_rate,
382
- num_channels=1,
383
- additional_metadata={
384
- "model": self._model_name,
385
- },
386
- tts_generation_time=tts_start_time,
387
- )
388
- except Exception as e:
389
- logger.warning(f"Failed to log agent audio: {e}")
390
-
391
- yield TTSStoppedFrame()
392
-
393
- finally:
394
- # Clean up the pending request
395
- if request_id in self._pending_requests:
396
- del self._pending_requests[request_id]
397
-
398
- except Exception as e:
399
- logger.exception(f"{self} error generating TTS: {e}")
400
- error_message = f"TTS generation error: {str(e)}"
401
- yield ErrorFrame(error=error_message)
402
-
403
- def _convert_to_bytes(self, audio_data) -> bytes:
404
- """Convert various audio data formats to bytes."""
405
- if isinstance(audio_data, (bytes, bytearray)):
406
- return bytes(audio_data)
407
-
408
- if isinstance(audio_data, np.ndarray):
409
- # Ensure it's in the right format (16-bit PCM)
410
- if audio_data.dtype in [np.float32, np.float64]:
411
- # Convert float [-1, 1] to int16 [-32768, 32767]
412
- audio_data = np.clip(audio_data, -1.0, 1.0) # Ensure values are in range
413
- audio_data = (audio_data * 32767).astype(np.int16)
414
- elif audio_data.dtype != np.int16:
415
- # Convert other integer types to int16
416
- audio_data = audio_data.astype(np.int16)
417
- return audio_data.tobytes()
418
- elif hasattr(audio_data, 'tobytes'):
419
- return audio_data.tobytes()
420
- else:
421
- return bytes(audio_data)
422
-
423
-
424
- class NeMoFastPitchHiFiGANTTSService(BaseNemoTTSService):
425
- """Text-to-Speech service using NeMo FastPitch-Hifigan model.
426
-
427
- More info: https://huggingface.co/nvidia/tts_en_fastpitch
428
-
429
- Args:
430
- fastpitch_model: FastPitch model name
431
- hifigan_model: Hifigan model name
432
- device: Device to run on (default: 'cuda')
433
- **kwargs: Additional arguments passed to BaseNemoTTSService
434
- """
435
-
436
- def __init__(
437
- self,
438
- fastpitch_model: str = "nvidia/tts_en_fastpitch",
439
- hifigan_model: str = "nvidia/tts_hifigan",
440
- device: str = "cuda",
441
- **kwargs,
442
- ):
443
- model_name = f"{fastpitch_model}+{hifigan_model}"
444
- self._fastpitch_model_name = fastpitch_model
445
- self._hifigan_model_name = hifigan_model
446
- super().__init__(model=model_name, device=device, **kwargs)
447
- self.setup_tool_calling()
448
-
449
- def _setup_model(self):
450
- logger.info(
451
- f"Loading FastPitch model={self._fastpitch_model_name} and HiFiGAN model={self._hifigan_model_name}"
452
- )
453
- self._fastpitch_model = self._setup_fastpitch_model(self._fastpitch_model_name)
454
- self._hifigan_model = self._setup_hifigan_model(self._hifigan_model_name)
455
- return self._fastpitch_model, self._hifigan_model
456
-
457
- def _setup_fastpitch_model(self, model_name: str):
458
- if model_name.endswith(".nemo"):
459
- fastpitch_model = FastPitchModel.restore_from(model_name, map_location=torch.device(self._device))
460
- else:
461
- fastpitch_model = FastPitchModel.from_pretrained(model_name, map_location=torch.device(self._device))
462
- fastpitch_model.eval()
463
- return fastpitch_model
464
-
465
- def _setup_hifigan_model(self, model_name: str):
466
- if model_name.endswith(".nemo"):
467
- hifigan_model = HifiGanModel.restore_from(model_name, map_location=torch.device(self._device))
468
- else:
469
- hifigan_model = HifiGanModel.from_pretrained(model_name, map_location=torch.device(self._device))
470
- hifigan_model.eval()
471
- return hifigan_model
472
-
473
- def _generate_audio(self, text: str) -> Iterator[np.ndarray]:
474
- with torch.no_grad():
475
- parsed = self._fastpitch_model.parse(text)
476
- spectrogram = self._fastpitch_model.generate_spectrogram(tokens=parsed)
477
- audio = self._hifigan_model.convert_spectrogram_to_audio(spec=spectrogram)
478
- audio = audio.detach().view(-1).cpu().numpy()
479
- yield audio
480
-
481
-
482
- class KokoroTTSService(BaseNemoTTSService):
483
- """Text-to-Speech service using Kokoro-82M model.
484
-
485
- Kokoro is an open-weight TTS model with 82 million parameters.
486
- More info: https://huggingface.co/hexgrad/Kokoro-82M
487
-
488
- Args:
489
- lang_code: Language code for the model (default: 'a' for American English)
490
- voice: Voice to use (default: 'af_heart')
491
- device: Device to run on (default: 'cuda')
492
- sample_rate: Audio sample rate in Hz (default: 24000 for Kokoro)
493
- download_all: Download all models for different languages (default: True)
494
- cache_models: Cache models on GPU for faster switching between languages (default: True)
495
- **kwargs: Additional arguments passed to BaseNemoTTSService
496
- """
497
-
498
- def __init__(
499
- self,
500
- model: str = "hexgrad/Kokoro-82M",
501
- lang_code: str = "a",
502
- voice: str = "af_heart",
503
- device: str = "cuda",
504
- sample_rate: int = 24000,
505
- speed: float = 1.0,
506
- download_all: bool = True,
507
- cache_models: bool = True,
508
- **kwargs,
509
- ):
510
- self._lang_code = lang_code
511
- self._voice = voice
512
- self._speed = speed
513
- assert speed > 0, "Speed must be greater than 0"
514
- self._original_speed = speed
515
- self._original_voice = voice
516
- self._gender = 'female' if voice[1] == 'f' else 'male'
517
- self._original_gender = self._gender
518
- self._original_lang_code = self._lang_code
519
- if download_all:
520
- self._model_maps = self._download_all_models(
521
- lang_code=["a", "b"], device=device, repo_id=model, cache_models=cache_models
522
- )
523
- else:
524
- self._model_maps = {}
525
- super().__init__(model=model, device=device, sample_rate=sample_rate, **kwargs)
526
- self.setup_tool_calling()
527
-
528
- def _setup_model(self, lang_code: Optional[str] = None, voice: Optional[str] = None):
529
- """Initialize the Kokoro pipeline."""
530
- try:
531
- from kokoro import KPipeline
532
- except ImportError:
533
- raise ImportError(
534
- "kokoro package is required for KokoroTTSService. Install it with: `pip install kokoro>=0.9.2`"
535
- )
536
- if lang_code is None:
537
- lang_code = self._lang_code
538
- if voice is None:
539
- voice = self._voice
540
- logger.info(f"Loading Kokoro TTS model with model={self._model_name}, lang_code={lang_code}, voice={voice}")
541
- if lang_code in self._model_maps:
542
- pipeline = self._model_maps[lang_code]
543
- else:
544
- pipeline = KPipeline(lang_code=lang_code, device=self._device, repo_id=self._model_name)
545
- self._model_maps[lang_code] = pipeline
546
- return pipeline
547
-
548
- def _download_all_models(
549
- self, lang_code: List[str] = ['a', 'b'], device="cuda", repo_id="hexgrad/Kokoro-82M", cache_models=True
550
- ):
551
- """Download all models for Kokoro TTS service."""
552
- logger.info(f"Downloading all models for Kokoro TTS service with lang_code={lang_code}")
553
- from kokoro import KPipeline
554
-
555
- model_maps = {}
556
-
557
- for lang in lang_code:
558
- pipeline = KPipeline(lang_code=lang, device=device, repo_id=repo_id)
559
- if cache_models:
560
- model_maps[lang] = pipeline
561
- torch.cuda.empty_cache()
562
- return model_maps
563
-
564
- def _generate_audio(self, text: str) -> Iterator[np.ndarray]:
565
- """Generate audio using the Kokoro pipeline.
566
-
567
- Args:
568
- text: Text to convert to speech
569
-
570
- Yields:
571
- Audio data as numpy arrays
572
- """
573
- try:
574
- # Generate audio using Kokoro pipeline
575
- generator = self._model(text, voice=self._voice, speed=self._speed)
576
-
577
- # The generator yields tuples of (gs, ps, audio)
578
- # We only need the audio component
579
- for i, (gs, ps, audio) in enumerate(generator):
580
- logger.debug(
581
- f"Kokoro generated audio chunk {i}: gs={gs}, ps={ps},"
582
- f"audio_shape={audio.shape if hasattr(audio, 'shape') else len(audio)}"
583
- )
584
- if isinstance(audio, torch.Tensor):
585
- audio = audio.detach().cpu().numpy()
586
- # Kokoro returns audio as numpy array in float32 format [-1, 1]
587
- # The base class will handle conversion to int16 bytes
588
- yield audio
589
-
590
- except Exception as e:
591
- logger.error(f"Error generating audio with Kokoro: {e}")
592
- raise
593
-
594
- async def tool_tts_set_speed(self, params: FunctionCallParams, speed_lambda: float):
595
- """
596
- Set a specific speaking speed of the assistant's voice.
597
- This tool should be called only when the user specifies the speed explicitly,
598
- such as "speak twice as fast" or "speak half as slow" or "speak 1.5 times as fast".
599
-
600
- Inform user of the result of this tool call. After calling this tool, continue the previous
601
- response if it was unfinished and was interrupted by the user, otherwise start a new response
602
- and ask if the user needs help on anything else. Avoid repeating previous responses.
603
-
604
- Args:
605
- speed_lambda: positive float, the relative change of the speaking speed to the original speed.
606
- E.g., 1.0 for original speed, 1.25 for 25% faster than original speed,
607
- 0.8 for 20% slower than original speed.
608
-
609
- """
610
- if speed_lambda <= 0:
611
- result = {
612
- "success": False,
613
- "message": f"Speed remains unchanged since the change is not a positive number: {speed_lambda}",
614
- }
615
- logger.debug(f"Speed remains unchanged since the change is not a positive number: {speed_lambda}")
616
- else:
617
- self._speed = speed_lambda * self._speed
618
- result = {
619
- "success": True,
620
- "message": f"Speed set to {speed_lambda} of the previous speed",
621
- }
622
- logger.debug(f"Speed set to {speed_lambda} of the previous speed {self._original_speed}")
623
- await params.result_callback(result)
624
-
625
- async def tool_tts_reset_speed(self, params: FunctionCallParams):
626
- """
627
- Reset the speaking speed to the original speed.
628
-
629
- Inform user of the result of this tool call. After calling this tool, continue the previous
630
- response if it was unfinished and was interrupted by the user, otherwise start a new response
631
- and ask if the user needs help on anything else. Avoid repeating previous responses.
632
- """
633
- self._speed = self._original_speed
634
- result = {"success": True, "message": "Speaking speed is reset to the original one"}
635
- logger.debug(f"Speaking speed is reset to the original speed {self._original_speed}")
636
- await params.result_callback(result)
637
-
638
- async def tool_tts_speak_faster(self, params: FunctionCallParams):
639
- """
640
- Speak faster by increasing the speaking speed 15% faster each time this function is called.
641
-
642
- Inform user of the result of this tool call. After calling this tool, continue the previous
643
- response if it was unfinished and was interrupted by the user, otherwise start a new response
644
- and ask if the user needs help on anything else. Avoid repeating previous responses.
645
- """
646
- speed_lambda = 1.15
647
- self._speed = speed_lambda * self._speed
648
- result = {
649
- "success": True,
650
- "message": f"Speaking speed is increased to {speed_lambda} of the previous speed",
651
- }
652
- logger.debug(f"Speed is set to {speed_lambda} of the previous speed, new speed is {self._speed}")
653
- await params.result_callback(result)
654
-
655
- async def tool_tts_speak_slower(self, params: FunctionCallParams):
656
- """
657
- Speak slower by decreasing the speaking speed 15% slower each time this function is called.
658
-
659
- Inform user of the result of this tool call. After calling this tool, continue the previous
660
- response if it was unfinished and was interrupted by the user, otherwise start a new response
661
- and ask if the user needs help on anything else. Avoid repeating previous responses.
662
- """
663
- speed_lambda = 0.85
664
- self._speed = speed_lambda * self._speed
665
- result = {
666
- "success": True,
667
- "message": f"Speaking speed is decreased to {speed_lambda} of the previous speed",
668
- }
669
- logger.debug(f"Speed is set to {speed_lambda} of the previous speed, new speed is {self._speed}")
670
- await params.result_callback(result)
671
-
672
- async def tool_tts_set_voice(self, params: FunctionCallParams, accent: str, gender: str):
673
- """
674
- Set the accent and gender of the assistant's voice.
675
- This tool should be called only when the user specifies the accent and/or gender explicitly.
676
-
677
- Inform user of the result of this tool call. After calling this tool, continue the previous
678
- response if it was unfinished and was interrupted by the user, otherwise start a new response
679
- and ask if the user needs help on anything else. Avoid repeating previous responses.
680
-
681
- Args:
682
- accent: Accent for the TTS model. Must be one of 'American English', 'British English'
683
- or 'current' for keeping the current accent.
684
- gender: gender of the assistant's voice. Must be one of 'male', 'female',
685
- or 'current' for keeping the current gender.
686
- """
687
- await params.llm.push_frame(LLMTextFrame("Just a moment."))
688
-
689
- lang_code = "a" if accent == "American English" else "b" if accent == "British English" else "current"
690
- new_lang_code = self._lang_code
691
- new_gender = self._gender
692
- if lang_code != 'current':
693
- new_lang_code = lang_code
694
- if gender != 'current':
695
- new_gender = gender
696
-
697
- if new_lang_code == 'a':
698
- new_voice = 'af_heart' if new_gender == 'female' else 'am_michael'
699
- elif new_lang_code == 'b':
700
- new_voice = 'bf_emma' if new_gender == 'female' else 'bm_george'
701
- else:
702
- await params.result_callback(
703
- {
704
- "success": False,
705
- "message": f"Invalid language code: {new_lang_code} or gender: {new_gender}",
706
- }
707
- )
708
- return
709
-
710
- new_model = await asyncio.to_thread(self._setup_model, new_lang_code, new_voice)
711
- self._model = new_model
712
- self._lang_code = new_lang_code
713
- self._gender = new_gender
714
- self._voice = new_voice
715
- logger.debug(f"Language and voice are set to {new_lang_code} and {new_voice}")
716
- await params.result_callback({"success": True, "message": "Voice has been updated."})
717
-
718
- async def tool_tts_reset_voice(self, params: FunctionCallParams):
719
- """
720
- Reset the accent and voice to the original ones.
721
-
722
- Inform user of the result of this tool call. After calling this tool, continue the previous
723
- response if it was unfinished and was interrupted by the user, otherwise start a new response
724
- and ask if the user needs help on anything else. Avoid repeating previous responses.
725
-
726
- """
727
- await params.llm.push_frame(LLMTextFrame("Of course."))
728
-
729
- new_model = await asyncio.to_thread(self._setup_model, self._original_lang_code, self._original_voice)
730
- self._model = new_model
731
- self._lang_code = self._original_lang_code
732
- self._gender = self._original_gender
733
- self._voice = self._original_voice
734
- logger.debug(
735
- f"Language and voice are reset to the original ones {self._original_lang_code} and {self._original_voice}"
736
- )
737
- await params.result_callback({"success": True, "message": "Voice has been reset to the original one."})
738
-
739
- def setup_tool_calling(self):
740
- """
741
- Setup the tool calling mixin by registering all available tools.
742
- """
743
- self.register_direct_function("tool_tts_reset_speed", self.tool_tts_reset_speed)
744
- self.register_direct_function("tool_tts_speak_faster", self.tool_tts_speak_faster)
745
- self.register_direct_function("tool_tts_speak_slower", self.tool_tts_speak_slower)
746
- self.register_direct_function("tool_tts_set_speed", self.tool_tts_set_speed)
747
- self.register_direct_function("tool_tts_set_voice", self.tool_tts_set_voice)
748
- self.register_direct_function("tool_tts_reset_voice", self.tool_tts_reset_voice)
749
-
750
- def reset(self):
751
- """
752
- Reset the voice and speed to the original ones.
753
- """
754
- self._text_aggregator.reset()
755
- self._speed = self._original_speed
756
- self._model = self._setup_model(self._original_lang_code, self._original_voice)
757
- self._lang_code = self._original_lang_code
758
- self._gender = self._original_gender
759
- self._voice = self._original_voice
760
-
761
-
762
- class MagpieTTSService(BaseNemoTTSService):
763
- """Text-to-Speech service using Magpie TTS model.
764
-
765
- Magpie is a multilingual TTS model with 357 million parameters.
766
- More info: https://huggingface.co/nvidia/magpie_tts_multilingual_357m
767
-
768
- Args:
769
- model: Model name or path to the Magpie TTS model.
770
- language: Language code for the model (default: 'en' for English)
771
- speaker: Speaker to use for the model (default: 'Sofia')
772
- apply_TN: Whether to apply text normalization (default: False)
773
- device: Device to run on (default: 'cuda')
774
- **kwargs: Additional arguments passed to BaseNemoTTSService
775
- """
776
-
777
- SPEAKER_MAP = {"John": 0, "Sofia": 1, "Aria": 2, "Jason": 3, "Leo": 4}
778
-
779
- def __init__(
780
- self,
781
- model: str = "nvidia/magpie_tts_multilingual_357m",
782
- language: str = "en",
783
- speaker: str = "Sofia",
784
- apply_TN: bool = False,
785
- device: str = "cuda",
786
- **kwargs,
787
- ):
788
- if speaker not in self.SPEAKER_MAP:
789
- raise ValueError(f"Invalid speaker: {speaker}, must be one of {list(self.SPEAKER_MAP.keys())}")
790
- self._language = language
791
- self._current_speaker = speaker
792
- self._apply_TN = apply_TN
793
- super().__init__(model=model, device=device, **kwargs)
794
- self.setup_tool_calling()
795
-
796
- def _setup_model(self):
797
- from nemo.collections.tts.models import MagpieTTSModel
798
-
799
- if self._model_name.endswith(".nemo"):
800
- model = MagpieTTSModel.restore_from(self._model_name, map_location=torch.device(self._device))
801
- else:
802
- model = MagpieTTSModel.from_pretrained(self._model_name, map_location=torch.device(self._device))
803
- model.eval()
804
-
805
- text = "Warming up the Magpie TTS model, this will help the model to respond faster for later requests."
806
- with torch.no_grad():
807
- _, _ = model.do_tts(
808
- text,
809
- language=self._language,
810
- apply_TN=self._apply_TN,
811
- speaker_index=self.SPEAKER_MAP[self._current_speaker],
812
- )
813
- torch.cuda.empty_cache()
814
- return model
815
-
816
- def _generate_audio(self, text: str) -> Iterator[np.ndarray]:
817
- audio, audio_len = self._model.do_tts(
818
- text,
819
- language=self._language,
820
- apply_TN=self._apply_TN,
821
- speaker_index=self.SPEAKER_MAP[self._current_speaker],
822
- )
823
- audio_len = audio_len.view(-1).item()
824
- audio = audio.detach().view(-1).cpu().numpy()
825
- yield audio[:audio_len]
826
-
827
- def setup_tool_calling(self):
828
- """No tools for now for Magpie TTS service."""
829
- pass
830
-
831
-
832
- def get_tts_service_from_config(config: DictConfig, audio_logger: Optional[AudioLogger] = None) -> BaseNemoTTSService:
833
- """Get the TTS service from the configuration.
834
-
835
- Args:
836
- config: The DictConfig object containing the TTS configuration.
837
- audio_logger: The audio logger to use for audio logging.
838
- Returns:
839
- The TTS service.
840
- """
841
- if isinstance(config, DictConfig):
842
- config = OmegaConf.to_container(config, resolve=True)
843
- model = config.get("model", None)
844
- device = config.get("device", "cuda")
845
- if config.get("type", None) != "nemo":
846
- raise ValueError(f"Invalid TTS type: {config.get('type', None)}, only 'nemo' is supported")
847
- if model is None:
848
- raise ValueError("Model is required for Nemo TTS service")
849
-
850
- text_aggregator = SimpleSegmentedTextAggregator(
851
- punctuation_marks=config.get("extra_separator", None),
852
- ignore_marks=config.get("ignore_strings", None),
853
- min_sentence_length=config.get("min_sentence_length", 5),
854
- use_legacy_eos_detection=config.get("use_legacy_eos_detection", False),
855
- )
856
-
857
- if model == "fastpitch-hifigan":
858
- return NeMoFastPitchHiFiGANTTSService(
859
- fastpitch_model=config.get("main_model_id", None),
860
- hifigan_model=config.get("sub_model_id", None),
861
- device=device,
862
- text_aggregator=text_aggregator,
863
- think_tokens=config.get("think_tokens", None),
864
- audio_logger=audio_logger,
865
- ignore_strings=config.get("ignore_strings", None),
866
- )
867
- elif model == "magpie":
868
- return MagpieTTSService(
869
- model=config.get("main_model_id", None),
870
- language=config.get("language", "en"),
871
- speaker=config.get("speaker", "Sofia"),
872
- apply_TN=config.get("apply_TN", False),
873
- device=device,
874
- text_aggregator=text_aggregator,
875
- think_tokens=config.get("think_tokens", None),
876
- audio_logger=audio_logger,
877
- ignore_strings=config.get("ignore_strings", None),
878
- )
879
- elif model == "kokoro":
880
- return KokoroTTSService(
881
- model=config.get("main_model_id", "hexgrad/Kokoro-82M"),
882
- voice=config.get("sub_model_id", "af_heart"),
883
- device=device,
884
- speed=config.get("speed", 1.0),
885
- text_aggregator=text_aggregator,
886
- think_tokens=config.get("think_tokens", None),
887
- sample_rate=24000,
888
- audio_logger=audio_logger,
889
- ignore_strings=config.get("ignore_strings", None),
890
- )
891
- else:
892
- raise ValueError(f"Invalid model: {model}, only 'fastpitch-hifigan', 'magpie' and 'kokoro' are supported")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/pipecat/services/nemo/turn_taking.py DELETED
@@ -1,441 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import time
16
- from datetime import datetime
17
- from pathlib import Path
18
- from typing import List, Optional, Union
19
-
20
- import yaml
21
- from loguru import logger
22
- from pipecat.frames.frames import (
23
- BotStartedSpeakingFrame,
24
- BotStoppedSpeakingFrame,
25
- Frame,
26
- InterimTranscriptionFrame,
27
- StartInterruptionFrame,
28
- TranscriptionFrame,
29
- UserStartedSpeakingFrame,
30
- UserStoppedSpeakingFrame,
31
- VADUserStartedSpeakingFrame,
32
- VADUserStoppedSpeakingFrame,
33
- )
34
- from pipecat.processors.frame_processor import FrameDirection, FrameProcessor
35
- from pipecat.transcriptions.language import Language
36
- from pipecat.utils.time import time_now_iso8601
37
-
38
- from nemo.agents.voice_agent.pipecat.frames.frames import DiarResultFrame
39
- from nemo.agents.voice_agent.pipecat.services.nemo.audio_logger import AudioLogger
40
-
41
-
42
- class NeMoTurnTakingService(FrameProcessor):
43
- """Service for handling turn-taking in voice conversations with backchannel detection."""
44
-
45
- def __init__(
46
- self,
47
- backchannel_phrases: Union[str, List[str]] = None,
48
- eou_string: str = "<EOU>",
49
- eob_string: str = "<EOB>",
50
- language: Language = Language.EN_US,
51
- use_vad: bool = True,
52
- use_diar: bool = False,
53
- max_buffer_size: int = 2,
54
- bot_stop_delay: float = 0.5,
55
- audio_logger: Optional[AudioLogger] = None,
56
- can_create_user_frames: bool = True,
57
- **kwargs,
58
- ):
59
- super().__init__(**kwargs)
60
- self.eou_string = eou_string
61
- self.eob_string = eob_string
62
- self.language = language
63
- self.use_vad = use_vad
64
- self.use_diar = use_diar
65
- self.max_buffer_size = max_buffer_size
66
-
67
- self.backchannel_phrases = self._load_backchannel_phrases(backchannel_phrases)
68
- self.backchannel_phrases_nopc = set([self.clean_text(phrase) for phrase in self.backchannel_phrases])
69
- self.bot_stop_delay = bot_stop_delay
70
- self.can_create_user_frames = can_create_user_frames
71
- # internal data
72
- self._current_speaker_id = None
73
- self._prev_speaker_id = None
74
- self._bot_stop_time = None
75
- self._bot_speaking = False
76
- self._vad_user_speaking = False
77
- self._have_sent_user_started_speaking = False
78
- self._user_speaking_buffer = ""
79
- self._audio_logger = audio_logger
80
- if not self.use_vad:
81
- # if vad is not used, we assume the user is always speaking
82
- self._vad_user_speaking = True
83
-
84
- def _load_backchannel_phrases(self, backchannel_phrases: Optional[Union[str, List[str]]] = None):
85
- if not backchannel_phrases:
86
- return []
87
-
88
- if isinstance(backchannel_phrases, str) and Path(backchannel_phrases).is_file():
89
- logger.info(f"Loading backchannel phrases from file: {backchannel_phrases}")
90
- if not Path(backchannel_phrases).exists():
91
- raise FileNotFoundError(f"Backchannel phrases file not found: {backchannel_phrases}")
92
- with open(backchannel_phrases, "r") as f:
93
- backchannel_phrases = yaml.safe_load(f)
94
- if not isinstance(backchannel_phrases, list):
95
- raise ValueError(f"Backchannel phrases must be a list, got {type(backchannel_phrases)}")
96
- logger.info(f"Loaded {len(backchannel_phrases)} backchannel phrases from file: {backchannel_phrases}")
97
- elif isinstance(backchannel_phrases, list):
98
- logger.info(f"Using backchannel phrases from list: {backchannel_phrases}")
99
- else:
100
- raise ValueError(f"Invalid backchannel phrases: {backchannel_phrases}")
101
- return backchannel_phrases
102
-
103
- def reset(self):
104
- """
105
- Reset the turn-taking service.
106
- """
107
- self._current_speaker_id = None
108
- self._prev_speaker_id = None
109
- self._bot_stop_time = None
110
- self._bot_speaking = False
111
- self._vad_user_speaking = False
112
- self._have_sent_user_started_speaking = False
113
- self._user_speaking_buffer = ""
114
- if not self.use_vad:
115
- # if vad is not used, we assume the user is always speaking
116
- self._vad_user_speaking = True
117
- logger.debug("TurnTaking service reset complete")
118
-
119
- def clean_text(self, text: str) -> str:
120
- """
121
- Clean the text so that it can be used for backchannel detection.
122
- """
123
- if self.language != Language.EN_US:
124
- raise ValueError(f"Language {self.language} not supported, currently only English is supported.")
125
- for eou_string in [self.eou_string, self.eob_string]:
126
- if text.endswith(eou_string):
127
- text = text[: -len(eou_string)].strip()
128
- text = text.lower()
129
- valid_chars = "abcdefghijklmnopqrstuvwxyz'"
130
- text = ''.join([c for c in text if c in valid_chars or c.isspace() or c == "'"])
131
- return " ".join(text.split()).strip()
132
-
133
- def is_backchannel(self, text: str) -> bool:
134
- """
135
- Check if the text is a backchannel phrase.
136
- """
137
- if not self.backchannel_phrases:
138
- return False
139
- if text.startswith("<speaker_"):
140
- # if the text starts with a speaker tag, we remove it
141
- text = text[len("<speaker_0>") :]
142
- text = self.clean_text(text)
143
- return text in self.backchannel_phrases_nopc
144
-
145
- async def process_frame(self, frame: Frame, direction: FrameDirection):
146
- """Process incoming frames and handle turn-taking logic."""
147
- await super().process_frame(frame, direction)
148
-
149
- if self._bot_stop_time is not None:
150
- # check if the bot has stopped speaking for more than the delay
151
- if time.time() - self._bot_stop_time > self.bot_stop_delay:
152
- # set the _bot_speaking flag to False to actually consider the bot as stopped speaking
153
- logger.debug(
154
- f"Bot stopped speaking for more than {self.bot_stop_delay} seconds, setting _bot_speaking to False"
155
- )
156
- self._bot_stop_time = None
157
- self._bot_speaking = False
158
-
159
- if isinstance(frame, (TranscriptionFrame, InterimTranscriptionFrame)):
160
- await self._handle_transcription(frame, direction)
161
- elif isinstance(frame, VADUserStartedSpeakingFrame):
162
- await self._handle_vad_user_started_speaking(frame, direction)
163
- elif isinstance(frame, VADUserStoppedSpeakingFrame):
164
- await self._handle_vad_user_stopped_speaking(frame, direction)
165
- elif isinstance(frame, BotStartedSpeakingFrame):
166
- logger.debug("BotStartedSpeakingFrame received")
167
- self._bot_speaking = True
168
- # Capture the actual start time when audio starts playing
169
- # This is more accurate than capturing during TTS generation
170
- if self._audio_logger:
171
- self._audio_logger.set_agent_turn_start_time()
172
- elif isinstance(frame, BotStoppedSpeakingFrame):
173
- logger.debug("BotStoppedSpeakingFrame received")
174
- self._bot_stop_time = time.time()
175
- if self.bot_stop_delay is None or self.bot_stop_delay <= 0:
176
- # only set the flag if the delay is not set or is 0
177
- self._bot_speaking = False
178
- logger.debug("Setting _bot_speaking to False")
179
- elif isinstance(frame, DiarResultFrame):
180
- logger.debug("DiarResultFrame received")
181
- await self._handle_diar_result(frame, direction)
182
- else:
183
- await self.push_frame(frame, direction)
184
-
185
- async def _handle_backchannel_text(self, text: str):
186
- # ignore the backchannel string while bot is speaking
187
- # push the backchannel string upstream, not downstream
188
- await self.push_frame(
189
- TranscriptionFrame(
190
- text=f"({text})",
191
- user_id="",
192
- timestamp=time_now_iso8601(),
193
- language=self.language if self.language else Language.EN_US,
194
- result={"text": f"Backchannel detected: {text}"},
195
- ),
196
- direction=FrameDirection.UPSTREAM,
197
- )
198
-
199
- async def _handle_transcription(
200
- self, frame: TranscriptionFrame | InterimTranscriptionFrame, direction: FrameDirection
201
- ):
202
- text_segment = frame.text
203
- if self._vad_user_speaking:
204
- self._user_speaking_buffer += text_segment
205
- has_eou = self._user_speaking_buffer.endswith(self.eou_string)
206
- has_eob = self._user_speaking_buffer.endswith(self.eob_string)
207
- if has_eou:
208
- # EOU detected, user is done speaking - push completed text and interrupt bot
209
- logger.debug(f"<EOU> Detected: `{self._user_speaking_buffer}`")
210
- completed_text = self._user_speaking_buffer[: -len(self.eou_string)].strip()
211
- if self._bot_speaking and self.is_backchannel(completed_text):
212
- logger.debug(f"<EOU> detected for a backchannel phrase while bot is speaking: `{completed_text}`")
213
- await self._handle_backchannel_text(completed_text)
214
- if self._audio_logger:
215
- if self._audio_logger.staged_metadata is None:
216
- self._audio_logger.staged_metadata = {"is_backchannel": True, "start_time": datetime.now()}
217
- else:
218
- self._audio_logger.staged_metadata["is_backchannel"] = True
219
-
220
- else:
221
- await self._handle_completed_text(completed_text, direction)
222
- await self._handle_user_interruption(UserStoppedSpeakingFrame())
223
- self._user_speaking_buffer = ""
224
- self._have_sent_user_started_speaking = False # user is done speaking, so we reset the flag
225
- elif has_eob and self._bot_speaking:
226
- logger.debug(f"<EOB> detected while bot is speaking: `{self._user_speaking_buffer}`")
227
- await self._handle_backchannel_text(str(self._user_speaking_buffer))
228
- if self._audio_logger:
229
- if self._audio_logger.staged_metadata is None:
230
- self._audio_logger.staged_metadata = {"is_backchannel": True, "start_time": datetime.now()}
231
- else:
232
- self._audio_logger.staged_metadata["is_backchannel"] = True
233
- self._user_speaking_buffer = ""
234
- self._have_sent_user_started_speaking = False # user is done speaking, so we reset the flag
235
- else:
236
- # if bot is not speaking, the backchannel string is not considered a backchannel phrase
237
- # user is still speaking, so we append the text segment to the buffer
238
- logger.debug(f"User is speaking: `{self._user_speaking_buffer}`")
239
- if has_eob:
240
- logger.debug(
241
- f"{self.eob_string} detected but ignored (bot NOT speaking): "
242
- f"`{self._user_speaking_buffer}`"
243
- )
244
- self._user_speaking_buffer = self._user_speaking_buffer[: -len(self.eob_string)].strip()
245
- # assume the last word is not completed
246
- completed_words = self._user_speaking_buffer.strip().split()[:-1]
247
- if len(completed_words) >= self.max_buffer_size:
248
- completed_text = " ".join(completed_words)
249
- await self._handle_completed_text(completed_text, direction, is_final=False)
250
-
251
- else:
252
- # if vad is not detecting user speaking
253
- logger.debug(
254
- f"VAD is not detecting user speaking, but still received text segment from STT: `{text_segment}`"
255
- )
256
- is_backchannel = self.is_backchannel(text_segment)
257
- if text_segment.endswith(self.eob_string):
258
- is_backchannel = True
259
- logger.debug(f"Dropping EOB token: `{text_segment}`")
260
- text_segment = text_segment[: -len(self.eob_string)].strip()
261
- elif text_segment.endswith(self.eou_string):
262
- logger.debug(f"Dropping EOU token: `{text_segment}`")
263
- text_segment = text_segment[: -len(self.eou_string)].strip()
264
-
265
- if not text_segment.strip():
266
- return
267
- if is_backchannel and self._bot_speaking:
268
- logger.debug(f"Backchannel detected while bot is speaking: `{text_segment}`")
269
- # push the backchannel string upstream, not downstream
270
- curr_text = str(self._user_speaking_buffer + text_segment)
271
- self._user_speaking_buffer = ""
272
- if self._audio_logger:
273
- if self._audio_logger.staged_metadata is None:
274
- self._audio_logger.staged_metadata = {"is_backchannel": True, "start_time": datetime.now()}
275
- else:
276
- self._audio_logger.staged_metadata["is_backchannel"] = True
277
- await self.push_frame(
278
- TranscriptionFrame(
279
- text=f"({curr_text})",
280
- user_id="",
281
- timestamp=time_now_iso8601(),
282
- language=self.language if self.language else Language.EN_US,
283
- result={"text": f"Backchannel detected: {self._user_speaking_buffer+text_segment}"},
284
- ),
285
- direction=FrameDirection.UPSTREAM,
286
- )
287
-
288
- else:
289
- # if the text segment is not empty and have non-space characters, we append it to the buffer
290
- self._user_speaking_buffer += text_segment
291
- if self.is_backchannel(self._user_speaking_buffer):
292
- logger.debug(f"Backchannel detected: `{self._user_speaking_buffer}`")
293
- self._user_speaking_buffer = ""
294
- self._have_sent_user_started_speaking = False
295
- return
296
- logger.debug(f"Appending text segment to user speaking buffer: `{self._user_speaking_buffer}`")
297
-
298
- async def _handle_completed_text(self, completed_text: str, direction: FrameDirection, is_final: bool = True):
299
- if not self._have_sent_user_started_speaking:
300
- # if we haven't sent the user started speaking frame, we send it now
301
- # so that the bot can be interrupted and be ready to respond to the new user turn
302
- await self._handle_user_interruption(UserStartedSpeakingFrame())
303
- self._have_sent_user_started_speaking = True
304
-
305
- completed_text = completed_text.strip()
306
- completed_text = completed_text.replace(self.eou_string, "").replace(self.eob_string, "")
307
-
308
- if self.use_diar and not completed_text.startswith("<speaker_") and self._prev_speaker_id is not None:
309
- # Add the previous speaker tag to the beginning of the text
310
- completed_text = f"<speaker_{self._prev_speaker_id}> {completed_text}"
311
-
312
- frame_type = TranscriptionFrame if is_final else InterimTranscriptionFrame
313
- text_frame = frame_type(
314
- text=completed_text,
315
- user_id="", # No speaker ID in this implementation
316
- timestamp=time_now_iso8601(),
317
- language=self.language if self.language else Language.EN_US,
318
- result={"text": completed_text},
319
- )
320
- logger.debug(f"Pushing text frame: {text_frame}")
321
- await self.push_frame(text_frame, direction)
322
-
323
- def _contains_only_speaker_tags(self, text: str) -> bool:
324
- """
325
- Check if the text contains only speaker tags.
326
- """
327
- return text.strip().startswith("<speaker_") and text.strip().endswith(">")
328
-
329
- async def _handle_vad_user_started_speaking(self, frame: VADUserStartedSpeakingFrame, direction: FrameDirection):
330
- """
331
- Handle the user started speaking frame.
332
-
333
- If there are no backchannel phrases and we haven't sent the user started speaking frame, we send it now
334
- so that the bot can be interrupted and be ready to respond to the new user turn
335
- """
336
- self._vad_user_speaking = True
337
- logger.debug("NeMoTurnTakingService: VADUserStartedSpeakingFrame")
338
- await self.push_frame(frame, direction)
339
- if not self.backchannel_phrases and not self._have_sent_user_started_speaking:
340
- await self._handle_user_interruption(UserStartedSpeakingFrame())
341
- self._have_sent_user_started_speaking = True
342
-
343
- async def _handle_vad_user_stopped_speaking(self, frame: VADUserStoppedSpeakingFrame, direction: FrameDirection):
344
- """
345
- Handle the user stopped speaking frame.
346
-
347
- If the buffer is not empty:
348
- - If bot is not speaking: push completed text regardless of backchannel
349
- - If bot is speaking: ignore backchannel strings
350
- If the buffer is empty, do nothing.
351
- """
352
- if self.use_vad:
353
- self._vad_user_speaking = False
354
- logger.debug("NeMoTurnTakingService: VADUserStoppedSpeakingFrame")
355
- await self.push_frame(frame, direction)
356
-
357
- # if user buffer only contains speaker tags, we don't push the completed text frame
358
- if self._contains_only_speaker_tags(self._user_speaking_buffer):
359
- logger.debug(f"User buffer only contains speaker tags: `{self._user_speaking_buffer}`, ignoring")
360
- return
361
-
362
- is_backchannel = self.is_backchannel(self._user_speaking_buffer)
363
- if not self._user_speaking_buffer:
364
- return
365
- if not self._bot_speaking or not is_backchannel:
366
- logger.debug(f"Bot talking: {self._bot_speaking}, backchannel: {is_backchannel}")
367
- logger.debug(f"Pushing completed text frame for VAD user stopped speaking: {self._user_speaking_buffer}")
368
- await self._handle_completed_text(self._user_speaking_buffer, direction)
369
- self._user_speaking_buffer = ""
370
- if self._have_sent_user_started_speaking:
371
- await self._handle_user_interruption(UserStoppedSpeakingFrame())
372
- self._have_sent_user_started_speaking = False
373
- elif is_backchannel:
374
- logger.debug(f"Backchannel detected: `{self._user_speaking_buffer}`")
375
- if self._audio_logger:
376
- self._audio_logger.save_user_audio(is_backchannel=True)
377
- logger.debug(
378
- f"[TurnTakingService] Saved backchannel audio (VAD stopped): {self._user_speaking_buffer}"
379
- )
380
-
381
- await self.push_frame(
382
- TranscriptionFrame(
383
- text=f"({self._user_speaking_buffer})",
384
- user_id="",
385
- timestamp=time_now_iso8601(),
386
- language=self.language if self.language else Language.EN_US,
387
- result={"text": f"Backchannel detected: {self._user_speaking_buffer}"},
388
- ),
389
- direction=FrameDirection.UPSTREAM,
390
- )
391
- self._user_speaking_buffer = ""
392
- self._have_sent_user_started_speaking = False
393
-
394
- async def _handle_user_interruption(self, frame: Frame):
395
- # Adapted from BaseInputTransport._handle_user_interruption
396
- if isinstance(frame, UserStartedSpeakingFrame):
397
- logger.debug("User started speaking")
398
- if self.can_create_user_frames:
399
- logger.debug("Pushing UserStartedSpeakingFrame and StartInterruptionFrame")
400
- await self.push_frame(frame)
401
- await self.push_frame(StartInterruptionFrame(), direction=FrameDirection.DOWNSTREAM)
402
- else:
403
- logger.debug(
404
- "Skipping UserStartedSpeakingFrame and StartInterruptionFrame because can_create_user_frames is False"
405
- )
406
- # Record cutoff time for agent audio when TTS is interrupted
407
- if self._audio_logger and self._bot_speaking:
408
- self._audio_logger.set_agent_cutoff_time()
409
- # Increment turn index when user starts speaking (only if speaker changed)
410
- if self._audio_logger:
411
- self._audio_logger.increment_turn_index(speaker="user")
412
- elif isinstance(frame, UserStoppedSpeakingFrame):
413
- logger.debug("User stopped speaking")
414
- if self.can_create_user_frames:
415
- logger.debug("Pushing UserStoppedSpeakingFrame")
416
- await self.push_frame(frame)
417
- else:
418
- logger.debug("Skipping UserStoppedSpeakingFrame because can_create_user_frames is False")
419
- else:
420
- logger.debug(f"Unknown frame type for _handle_user_interruption: {type(frame)}")
421
-
422
- async def _handle_diar_result(self, frame: DiarResultFrame, direction: FrameDirection):
423
- if not self.use_diar:
424
- logger.debug("Diarization is disabled, skipping")
425
- return
426
-
427
- new_speaker_id = frame.diar_result # speaker id of the dominant speaker
428
-
429
- # logger.debug(f"Dominant speaker ID: {dominant_speaker_id}")
430
- self._prev_speaker_id = self._current_speaker_id
431
- last_speaker_id = self._current_speaker_id
432
-
433
- if not self._user_speaking_buffer.startswith("<speaker_"):
434
- # add speaker tag <speaker_{speaker_id}> to the beginning of the current utterance
435
- self._user_speaking_buffer = f"<speaker_{new_speaker_id}> {self._user_speaking_buffer}"
436
- elif last_speaker_id != new_speaker_id:
437
- # change the speaker tag to the dominant speaker id
438
- self._user_speaking_buffer = self._user_speaking_buffer[len("<speaker_0>") :]
439
- self._user_speaking_buffer = f"<speaker_{new_speaker_id}> {self._user_speaking_buffer}"
440
- logger.debug(f"Speaker changed from {last_speaker_id} to {new_speaker_id}")
441
- self._current_speaker_id = new_speaker_id
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/pipecat/services/nemo/utils.py DELETED
@@ -1,197 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # NOTE: This file will be deprecated in the future, as the new inference pipeline will replace it.
15
-
16
- import math
17
-
18
- import numpy as np
19
- import torch
20
- from omegaconf import DictConfig
21
-
22
- import nemo.collections.asr as nemo_asr
23
-
24
- LOG_MEL_ZERO = -16.635
25
-
26
-
27
- class AudioBufferer:
28
- def __init__(self, sample_rate: int, buffer_size_in_secs: float):
29
- self.buffer_size = int(buffer_size_in_secs * sample_rate)
30
- self.sample_buffer = torch.zeros(self.buffer_size, dtype=torch.float32)
31
-
32
- def reset(self) -> None:
33
- """
34
- Reset the buffer to zero
35
- """
36
- self.sample_buffer.zero_()
37
-
38
- def update(self, audio: np.ndarray) -> None:
39
- """
40
- Update the buffer with the new frame
41
- Args:
42
- frame (Frame): frame to update the buffer with
43
- """
44
- if not isinstance(audio, torch.Tensor):
45
- audio = torch.from_numpy(audio)
46
-
47
- audio_size = audio.shape[0]
48
- if audio_size > self.buffer_size:
49
- raise ValueError(f"Frame size ({audio_size}) exceeds buffer size ({self.buffer_size})")
50
-
51
- shift = audio_size
52
- self.sample_buffer[:-shift] = self.sample_buffer[shift:].clone()
53
- self.sample_buffer[-shift:] = audio.clone()
54
-
55
- def get_buffer(self) -> torch.Tensor:
56
- """
57
- Get the current buffer
58
- Returns:
59
- torch.Tensor: current state of the buffer
60
- """
61
- return self.sample_buffer.clone()
62
-
63
- def is_buffer_empty(self) -> bool:
64
- """
65
- Check if the buffer is empty
66
- Returns:
67
- bool: True if the buffer is empty, False otherwise
68
- """
69
- return self.sample_buffer.sum() == 0
70
-
71
-
72
- class CacheFeatureBufferer:
73
- def __init__(
74
- self,
75
- sample_rate: int,
76
- buffer_size_in_secs: float,
77
- chunk_size_in_secs: float,
78
- preprocessor_cfg: DictConfig,
79
- device: torch.device,
80
- fill_value: float = LOG_MEL_ZERO,
81
- ):
82
-
83
- if buffer_size_in_secs < chunk_size_in_secs:
84
- raise ValueError(
85
- f"Buffer size ({buffer_size_in_secs}s) should be no less than chunk size ({chunk_size_in_secs}s)"
86
- )
87
-
88
- self.sample_rate = sample_rate
89
- self.buffer_size_in_secs = buffer_size_in_secs
90
- self.chunk_size_in_secs = chunk_size_in_secs
91
- self.device = device
92
-
93
- if hasattr(preprocessor_cfg, 'log') and preprocessor_cfg.log:
94
- self.ZERO_LEVEL_SPEC_DB_VAL = LOG_MEL_ZERO # Log-Mel spectrogram value for zero signals
95
- else:
96
- self.ZERO_LEVEL_SPEC_DB_VAL = fill_value
97
-
98
- self.n_feat = preprocessor_cfg.features
99
- self.timestep_duration = preprocessor_cfg.window_stride
100
- self.n_chunk_look_back = int(self.timestep_duration * self.sample_rate)
101
- self.chunk_size = int(self.chunk_size_in_secs * self.sample_rate)
102
- self.sample_buffer = AudioBufferer(sample_rate, buffer_size_in_secs)
103
-
104
- self.feature_buffer_len = int(buffer_size_in_secs / self.timestep_duration)
105
- self.feature_chunk_len = int(chunk_size_in_secs / self.timestep_duration)
106
- self.feature_buffer = torch.full(
107
- [self.n_feat, self.feature_buffer_len],
108
- self.ZERO_LEVEL_SPEC_DB_VAL,
109
- dtype=torch.float32,
110
- device=self.device,
111
- )
112
-
113
- self.preprocessor = nemo_asr.models.ASRModel.from_config_dict(preprocessor_cfg)
114
- self.preprocessor.to(self.device)
115
-
116
- def is_buffer_empty(self) -> bool:
117
- """
118
- Check if the buffer is empty
119
- Returns:
120
- bool: True if the buffer is empty, False otherwise
121
- """
122
- return self.sample_buffer.is_buffer_empty()
123
-
124
- def reset(self) -> None:
125
- """
126
- Reset the buffer to zero
127
- """
128
- self.sample_buffer.reset()
129
- self.feature_buffer.fill_(self.ZERO_LEVEL_SPEC_DB_VAL)
130
-
131
- def _update_feature_buffer(self, feat_chunk: torch.Tensor) -> None:
132
- """
133
- Add an extracted feature to `feature_buffer`
134
- """
135
- self.feature_buffer[:, : -self.feature_chunk_len] = self.feature_buffer[:, self.feature_chunk_len :].clone()
136
- self.feature_buffer[:, -self.feature_chunk_len :] = feat_chunk.clone()
137
-
138
- def preprocess(self, audio_signal: torch.Tensor) -> torch.Tensor:
139
- """
140
- Preprocess the audio signal using the preprocessor
141
- Args:
142
- audio_signal (torch.Tensor): audio signal
143
- Returns:
144
- torch.Tensor: preprocessed features
145
- """
146
- audio_signal = audio_signal.unsqueeze_(0).to(self.device)
147
- audio_signal_len = torch.tensor([audio_signal.shape[1]], device=self.device)
148
- features, _ = self.preprocessor(
149
- input_signal=audio_signal,
150
- length=audio_signal_len,
151
- )
152
- features = features.squeeze()
153
- return features
154
-
155
- def update(self, audio: np.ndarray) -> None:
156
- """
157
- Update the sample anf feature buffers with the new frame
158
- Args:
159
- frame (Frame): frame to update the buffer with
160
- """
161
-
162
- # Update the sample buffer with the new frame
163
- self.sample_buffer.update(audio)
164
-
165
- if math.isclose(self.buffer_size_in_secs, self.chunk_size_in_secs):
166
- # If the buffer size is equal to the chunk size, just take the whole buffer
167
- samples = self.sample_buffer.sample_buffer.clone()
168
- else:
169
- # Add look_back to have context for the first feature
170
- samples = self.sample_buffer.sample_buffer[-(self.n_chunk_look_back + self.chunk_size) :]
171
-
172
- # Get the mel spectrogram
173
- features = self.preprocess(samples)
174
-
175
- # If the features are longer than supposed to be, drop the last frames
176
- # Drop the last diff frames because they might be incomplete
177
- if (diff := features.shape[1] - self.feature_chunk_len - 1) > 0:
178
- features = features[:, :-diff]
179
-
180
- # Update the feature buffer with the new features
181
- self._update_feature_buffer(features[:, -self.feature_chunk_len :])
182
-
183
- def get_buffer(self) -> torch.Tensor:
184
- """
185
- Get the current sample buffer
186
- Returns:
187
- torch.Tensor: current state of the buffer
188
- """
189
- return self.sample_buffer.get_buffer()
190
-
191
- def get_feature_buffer(self) -> torch.Tensor:
192
- """
193
- Get the current feature buffer
194
- Returns:
195
- torch.Tensor: current state of the feature buffer
196
- """
197
- return self.feature_buffer.clone()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/pipecat/transports/__init__.py DELETED
@@ -1,13 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/pipecat/transports/base_input.py DELETED
@@ -1,58 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
-
16
- from loguru import logger
17
- from pipecat.audio.vad.vad_analyzer import VADState
18
- from pipecat.frames.frames import (
19
- InputAudioRawFrame,
20
- UserStartedSpeakingFrame,
21
- UserStoppedSpeakingFrame,
22
- VADUserStartedSpeakingFrame,
23
- VADUserStoppedSpeakingFrame,
24
- )
25
- from pipecat.transports.base_input import BaseInputTransport as _BaseInputTransport
26
-
27
-
28
- class BaseInputTransport(_BaseInputTransport):
29
- async def _handle_vad(self, audio_frame: InputAudioRawFrame, vad_state: VADState):
30
- """Handle Voice Activity Detection results and generate appropriate frames."""
31
- new_vad_state = await self._vad_analyze(audio_frame)
32
- if new_vad_state != vad_state and new_vad_state != VADState.STARTING and new_vad_state != VADState.STOPPING:
33
- frame = None
34
- # If the turn analyser is enabled, this will prevent:
35
- # - Creating the UserStoppedSpeakingFrame
36
- # - Creating the UserStartedSpeakingFrame multiple times
37
- can_create_user_frames = (
38
- self._params.turn_analyzer is None or not self._params.turn_analyzer.speech_triggered
39
- ) and self._params.can_create_user_frames
40
-
41
- if new_vad_state == VADState.SPEAKING:
42
- await self.push_frame(VADUserStartedSpeakingFrame())
43
- if can_create_user_frames:
44
- frame = UserStartedSpeakingFrame()
45
- else:
46
- logger.debug("base_input: VAD state changed to SPEAKING but can_create_user_frames is False")
47
- elif new_vad_state == VADState.QUIET:
48
- await self.push_frame(VADUserStoppedSpeakingFrame())
49
- if can_create_user_frames:
50
- frame = UserStoppedSpeakingFrame()
51
- else:
52
- logger.debug("base_input: VAD state changed to QUIET but can_create_user_frames is False")
53
-
54
- if frame:
55
- await self._handle_user_interruption(frame)
56
-
57
- vad_state = new_vad_state
58
- return vad_state
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/pipecat/transports/base_transport.py DELETED
@@ -1,20 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
-
16
- from pipecat.transports.base_transport import TransportParams as _TransportParams
17
-
18
-
19
- class TransportParams(_TransportParams):
20
- can_create_user_frames: bool = True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/pipecat/transports/network/__init__.py DELETED
@@ -1,13 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/pipecat/transports/network/websocket_server.py DELETED
@@ -1,304 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
-
16
- import asyncio
17
- from typing import Optional
18
-
19
- from loguru import logger
20
- from pipecat.frames.frames import CancelFrame, EndFrame, InputAudioRawFrame, StartFrame
21
- from pipecat.serializers.base_serializer import FrameSerializer
22
- from pipecat.transports.base_transport import BaseTransport
23
- from pipecat.transports.network.websocket_server import (
24
- WebsocketServerCallbacks,
25
- WebsocketServerOutputTransport,
26
- WebsocketServerParams,
27
- )
28
-
29
- from nemo.agents.voice_agent.pipecat.transports.base_input import BaseInputTransport
30
- from nemo.agents.voice_agent.pipecat.transports.base_transport import TransportParams
31
-
32
- try:
33
- import websockets
34
- except ModuleNotFoundError as e:
35
- logger.error(f"Exception: {e}")
36
- logger.error("In order to use websockets, you need to `pip install pipecat-ai[websocket]`.")
37
- raise Exception(f"Missing module: {e}")
38
-
39
-
40
- class WebsocketServerParams(TransportParams):
41
- """Configuration parameters for WebSocket server transport.
42
-
43
- Parameters:
44
- add_wav_header: Whether to add WAV headers to audio frames.
45
- serializer: Frame serializer for message encoding/decoding.
46
- session_timeout: Timeout in seconds for client sessions.
47
- """
48
-
49
- add_wav_header: bool = False
50
- serializer: Optional[FrameSerializer] = None
51
- session_timeout: Optional[int] = None
52
-
53
-
54
- class WebsocketServerInputTransport(BaseInputTransport):
55
- """WebSocket server input transport for receiving client data.
56
-
57
- Handles incoming WebSocket connections, message processing, and client
58
- session management including timeout monitoring and connection lifecycle.
59
- """
60
-
61
- def __init__(
62
- self,
63
- transport: BaseTransport,
64
- host: str,
65
- port: int,
66
- params: WebsocketServerParams,
67
- callbacks: WebsocketServerCallbacks,
68
- **kwargs,
69
- ):
70
- """Initialize the WebSocket server input transport.
71
-
72
- Args:
73
- transport: The parent transport instance.
74
- host: Host address to bind the WebSocket server to.
75
- port: Port number to bind the WebSocket server to.
76
- params: WebSocket server configuration parameters.
77
- callbacks: Callback functions for WebSocket events.
78
- **kwargs: Additional arguments passed to parent class.
79
- """
80
- super().__init__(params, **kwargs)
81
-
82
- self._transport = transport
83
- self._host = host
84
- self._port = port
85
- self._params = params
86
- self._callbacks = callbacks
87
-
88
- self._websocket: Optional[websockets.WebSocketServerProtocol] = None
89
-
90
- self._server_task = None
91
-
92
- # This task will monitor the websocket connection periodically.
93
- self._monitor_task = None
94
-
95
- self._stop_server_event = asyncio.Event()
96
-
97
- # Whether we have seen a StartFrame already.
98
- self._initialized = False
99
-
100
- async def start(self, frame: StartFrame):
101
- """Start the WebSocket server and initialize components.
102
-
103
- Args:
104
- frame: The start frame containing initialization parameters.
105
- """
106
- await super().start(frame)
107
-
108
- if self._initialized:
109
- return
110
-
111
- self._initialized = True
112
-
113
- if self._params.serializer:
114
- await self._params.serializer.setup(frame)
115
- if not self._server_task:
116
- self._server_task = self.create_task(self._server_task_handler())
117
- await self.set_transport_ready(frame)
118
-
119
- async def stop(self, frame: EndFrame):
120
- """Stop the WebSocket server and cleanup resources.
121
-
122
- Args:
123
- frame: The end frame signaling transport shutdown.
124
- """
125
- await super().stop(frame)
126
- self._stop_server_event.set()
127
- if self._monitor_task:
128
- await self.cancel_task(self._monitor_task)
129
- self._monitor_task = None
130
- if self._server_task:
131
- await self.wait_for_task(self._server_task)
132
- self._server_task = None
133
-
134
- async def cancel(self, frame: CancelFrame):
135
- """Cancel the WebSocket server and stop all processing.
136
-
137
- Args:
138
- frame: The cancel frame signaling immediate cancellation.
139
- """
140
- await super().cancel(frame)
141
- if self._monitor_task:
142
- await self.cancel_task(self._monitor_task)
143
- self._monitor_task = None
144
- if self._server_task:
145
- await self.cancel_task(self._server_task)
146
- self._server_task = None
147
-
148
- async def cleanup(self):
149
- """Cleanup resources and parent transport."""
150
- await super().cleanup()
151
- await self._transport.cleanup()
152
-
153
- async def _server_task_handler(self):
154
- """Handle WebSocket server startup and client connections."""
155
- logger.info(f"Starting websocket server on {self._host}:{self._port}")
156
- async with websockets.serve(self._client_handler, self._host, self._port) as server:
157
- await self._callbacks.on_websocket_ready()
158
- await self._stop_server_event.wait()
159
-
160
- async def _client_handler(self, websocket: websockets.WebSocketServerProtocol, path: Optional[str] = None):
161
- """Handle individual client connections and message processing."""
162
- logger.info(f"New client connection from {websocket.remote_address}")
163
- if self._websocket:
164
- await self._websocket.close()
165
- logger.warning("Only one client connected, using new connection")
166
-
167
- self._websocket = websocket
168
-
169
- # Notify
170
- await self._callbacks.on_client_connected(websocket)
171
-
172
- # Create a task to monitor the websocket connection
173
- if not self._monitor_task and self._params.session_timeout:
174
- self._monitor_task = self.create_task(self._monitor_websocket(websocket, self._params.session_timeout))
175
-
176
- # Handle incoming messages
177
- try:
178
- async for message in websocket:
179
- if not self._params.serializer:
180
- continue
181
-
182
- frame = await self._params.serializer.deserialize(message)
183
-
184
- if not frame:
185
- continue
186
-
187
- if isinstance(frame, InputAudioRawFrame):
188
- await self.push_audio_frame(frame)
189
- else:
190
- await self.push_frame(frame)
191
- except Exception as e:
192
- logger.error(f"{self} exception receiving data: {e.__class__.__name__} ({e})")
193
-
194
- # Notify disconnection
195
- await self._callbacks.on_client_disconnected(websocket)
196
-
197
- await self._websocket.close()
198
- self._websocket = None
199
-
200
- logger.info(f"Client {websocket.remote_address} disconnected")
201
-
202
- async def _monitor_websocket(self, websocket: websockets.WebSocketServerProtocol, session_timeout: int):
203
- """Monitor WebSocket connection for session timeout."""
204
- try:
205
- await asyncio.sleep(session_timeout)
206
- if not websocket.closed:
207
- await self._callbacks.on_session_timeout(websocket)
208
- except asyncio.CancelledError:
209
- logger.info(f"Monitoring task cancelled for: {websocket.remote_address}")
210
- raise
211
-
212
-
213
- class WebsocketServerTransport(BaseTransport):
214
- """WebSocket server transport for bidirectional real-time communication.
215
-
216
- Provides a complete WebSocket server implementation with separate input and
217
- output transports, client connection management, and event handling for
218
- real-time audio and data streaming applications.
219
- """
220
-
221
- def __init__(
222
- self,
223
- params: WebsocketServerParams,
224
- host: str = "localhost",
225
- port: int = 8765,
226
- input_name: Optional[str] = None,
227
- output_name: Optional[str] = None,
228
- ):
229
- """Initialize the WebSocket server transport.
230
-
231
- Args:
232
- params: WebSocket server configuration parameters.
233
- host: Host address to bind the server to. Defaults to "localhost".
234
- port: Port number to bind the server to. Defaults to 8765.
235
- input_name: Optional name for the input processor.
236
- output_name: Optional name for the output processor.
237
- """
238
- super().__init__(input_name=input_name, output_name=output_name)
239
- self._host = host
240
- self._port = port
241
- self._params = params
242
-
243
- self._callbacks = WebsocketServerCallbacks(
244
- on_client_connected=self._on_client_connected,
245
- on_client_disconnected=self._on_client_disconnected,
246
- on_session_timeout=self._on_session_timeout,
247
- on_websocket_ready=self._on_websocket_ready,
248
- )
249
- self._input: Optional[WebsocketServerInputTransport] = None
250
- self._output: Optional[WebsocketServerOutputTransport] = None
251
- self._websocket: Optional[websockets.WebSocketServerProtocol] = None
252
-
253
- # Register supported handlers. The user will only be able to register
254
- # these handlers.
255
- self._register_event_handler("on_client_connected")
256
- self._register_event_handler("on_client_disconnected")
257
- self._register_event_handler("on_session_timeout")
258
- self._register_event_handler("on_websocket_ready")
259
-
260
- def input(self) -> WebsocketServerInputTransport:
261
- """Get the input transport for receiving client data.
262
-
263
- Returns:
264
- The WebSocket server input transport instance.
265
- """
266
- if not self._input:
267
- self._input = WebsocketServerInputTransport(
268
- self, self._host, self._port, self._params, self._callbacks, name=self._input_name
269
- )
270
- return self._input
271
-
272
- def output(self) -> WebsocketServerOutputTransport:
273
- """Get the output transport for sending data to clients.
274
-
275
- Returns:
276
- The WebSocket server output transport instance.
277
- """
278
- if not self._output:
279
- self._output = WebsocketServerOutputTransport(self, self._params, name=self._output_name)
280
- return self._output
281
-
282
- async def _on_client_connected(self, websocket):
283
- """Handle client connection events."""
284
- if self._output:
285
- await self._output.set_client_connection(websocket)
286
- await self._call_event_handler("on_client_connected", websocket)
287
- else:
288
- logger.error("A WebsocketServerTransport output is missing in the pipeline")
289
-
290
- async def _on_client_disconnected(self, websocket):
291
- """Handle client disconnection events."""
292
- if self._output:
293
- await self._output.set_client_connection(None)
294
- await self._call_event_handler("on_client_disconnected", websocket)
295
- else:
296
- logger.error("A WebsocketServerTransport output is missing in the pipeline")
297
-
298
- async def _on_session_timeout(self, websocket):
299
- """Handle client session timeout events."""
300
- await self._call_event_handler("on_session_timeout", websocket)
301
-
302
- async def _on_websocket_ready(self):
303
- """Handle WebSocket server ready events."""
304
- await self._call_event_handler("on_websocket_ready")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/pipecat/utils/__init__.py DELETED
@@ -1,13 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/pipecat/utils/text/__init__.py DELETED
@@ -1,13 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/pipecat/utils/text/simple_text_aggregator.py DELETED
@@ -1,238 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import re
16
- from typing import AsyncIterator, Optional
17
-
18
- from loguru import logger
19
- from pipecat.utils.string import match_endofsentence
20
- from pipecat.utils.text.base_text_aggregator import Aggregation, AggregationType
21
- from pipecat.utils.text.simple_text_aggregator import SimpleTextAggregator
22
-
23
-
24
- def has_partial_decimal(text: str) -> bool:
25
- """Check if the text ends with a partial decimal.
26
-
27
- Returns True if the text ends with a number that looks like it could
28
- be a partial decimal (e.g., "3.", "3.14", "($3.14)"), but NOT if it's
29
- clearly a complete sentence (e.g., "It costs $3.14.") or a bullet point
30
- (e.g., "1. Alpha; 2.").
31
- """
32
-
33
- # Check for bullet point pattern: ends with 1-3 digits followed by period
34
- # Examples: "1.", "12.", "123.", or "text; 2."
35
- # Bullet points are typically small numbers (1-999) at the end
36
- bullet_match = re.search(r'(?:^|[\s;,]|[^\d])(\d{1,3})\.$', text)
37
- if bullet_match:
38
- # It's likely a bullet point, not a partial decimal
39
- return False
40
-
41
- # Pattern to find decimal numbers near the end, allowing for trailing
42
- # non-word characters like ), ], ", ', etc.
43
- # Match: digit(s) + period + optional digit(s) + optional trailing non-word chars
44
- match = re.search(r'\d+\.(?:\d+)?([^\w\s]*)$', text)
45
-
46
- if not match:
47
- return False
48
-
49
- trailing = match.group(1) # e.g., ")" or "" or "."
50
-
51
- # If trailing contains a period, it's sentence-ending punctuation
52
- # e.g., "3.14." means complete sentence
53
- if '.' in trailing:
54
- return False
55
-
56
- # Otherwise, it's a partial decimal (either incomplete like "3."
57
- # or complete number but sentence not finished like "($3.14)")
58
- return True
59
-
60
-
61
- def find_last_period_index(text: str) -> int:
62
- """
63
- Find the last occurrence of a period in the text,
64
- but return -1 if the text doesn't seem to be a complete sentence.
65
- """
66
- num_periods = text.count(".")
67
- if num_periods == 0:
68
- return -1
69
-
70
- if num_periods == 1:
71
- if has_partial_decimal(text):
72
- # if the only period in the text is part of a number, return -1
73
- return -1
74
- # Check if the only period is a bullet point (e.g., "1. Alpha" or incomplete "1.")
75
- if re.search(r'(?:^|[\s;,]|[^\d])(\d{1,3})\.(?:\s+\w|\s*$)', text):
76
- # The period is after a bullet point number, either:
77
- # - followed by content (e.g., "1. Alpha")
78
- # - or at the end with optional whitespace (e.g., "1." or "1. ")
79
- return -1
80
-
81
- # Check if any of the abbreviations "e.", "i." "g.", "etc." are present in the text
82
- if re.search(r'\b(e\.|i\.|g\.)\b', text):
83
- # The period is after a character/word that is likely to be a abbreviation, return -1
84
- return -1
85
-
86
- # otherwise, check the last occurrence of a period
87
- idx = text.rfind(".")
88
- if idx <= 0:
89
- return idx
90
- if text[idx - 1].isdigit():
91
- # if the period is after a digit, it's likely a partial decimal, return -1
92
- return -1
93
- elif text[idx - 1].isupper():
94
- # if the period is after a capital letter (e.g., "Washington, D.C."), it's likely a abbreviation, return -1
95
- return -1
96
- elif idx > 1 and text[idx - 2 : idx + 1].lower() in ["a.m.", "p.m."]:
97
- # if the period is after a.m. or p.m., it's likely a time, return -1
98
- return -1
99
- elif idx > 2 and text[idx - 3 : idx + 1] in ["e.g.", "i.e.", "etc."]:
100
- # The period is after a character/word that is likely to be a abbreviation, return -1
101
- return -1
102
- elif idx >= 2 and text[idx - 2 : idx + 1].lower() in ["st.", "mr.", "mrs.", "ms.", "dr."]:
103
- # if the period is after a character/word that is likely to be a abbreviation, return -1
104
- return -1
105
-
106
- # the text seems to have a complete sentence, return the index of the last period
107
- return idx
108
-
109
-
110
- def find_last_comma_index(text: str, min_residual_length: int = 5) -> int:
111
- """
112
- Find the last occurrence of a valid comma in the text,
113
- ignoring the commas in the numbers (e.g., "1,234,567").
114
- If the leftover text after the comma is too short, it may be an abbreviation, return -1.
115
-
116
- Args:
117
- text: The text to find the last occurrence of a valid comma.
118
- min_residual_length: The minimum length of the leftover text after the rightmost comma
119
- to be considered as a valid sentence (e.g., "Santa Clara, CA, US.").
120
- Returns:
121
- The index of the last occurrence of a valid comma, or -1 if no valid comma is found.
122
- """
123
- # find the last occurrence of a comma in the text
124
- idx = text.rfind(",")
125
- if idx == -1:
126
- return -1
127
- # check if the comma is in a number
128
- if re.search(r'\d+,\d+', text[: idx + 1]):
129
- # the comma is in a number, return -1
130
- return -1
131
-
132
- # check if the leftover text after the comma is too short
133
- if len(text[idx + 1 :]) <= min_residual_length:
134
- # the leftover text is too short, it may be an abbreviation, return -1
135
- return -1
136
-
137
- # the comma is not in a number, return the index of the comma
138
- return idx
139
-
140
-
141
- class SimpleSegmentedTextAggregator(SimpleTextAggregator):
142
- """A simple text aggregator that segments the text into sentences based on punctuation marks."""
143
-
144
- def __init__(
145
- self,
146
- punctuation_marks: str | list[str] = ".,!?;:\n",
147
- ignore_marks: str | list[str] = "*",
148
- min_sentence_length: int = 0,
149
- use_legacy_eos_detection: bool = False,
150
- **kwargs,
151
- ):
152
- """
153
- Args:
154
- punctuation_marks: The punctuation marks to use for sentence detection.
155
- ignore_marks: The strings to ignore in the text (e.g., "*").
156
- min_sentence_length: The minimum length of a sentence to be considered.
157
- use_legacy_eos_detection: Whether to use the legacy EOS detection from pipecat.
158
- **kwargs: Additional arguments to pass to the SimpleTextAggregator constructor.
159
- """
160
- super().__init__(**kwargs)
161
- self._use_legacy_eos_detection = use_legacy_eos_detection
162
- self._min_sentence_length = min_sentence_length
163
- self._ignore_marks = set(["*"] if ignore_marks is None else set(ignore_marks))
164
- if not punctuation_marks:
165
- self._punctuation_marks = list()
166
- else:
167
- punctuation_marks = (
168
- [c for c in punctuation_marks] if isinstance(punctuation_marks, str) else punctuation_marks
169
- )
170
- if "." in punctuation_marks:
171
- punctuation_marks.remove(".")
172
- # put period at the end of the list to ensure it's the last punctuation mark to be matched
173
- punctuation_marks += ["."]
174
- self._punctuation_marks = punctuation_marks
175
-
176
- def _find_segment_end(self, text: str) -> Optional[int]:
177
- """find the end of text segment.
178
-
179
- Args:
180
- text: The text to find the end of the segment.
181
-
182
- Returns:
183
- The index of the end of the segment, or None if the text is too short.
184
- """
185
- # drop leading whitespace but keep trailing whitespace to
186
- # allow "\n" to trigger the end of the sentence
187
- text_len = len(text)
188
- text = text.lstrip()
189
- offset = text_len - len(text)
190
- if len(text) < self._min_sentence_length:
191
- return None
192
-
193
- for punc in self._punctuation_marks:
194
- if punc == ".":
195
- idx = find_last_period_index(text)
196
- elif punc == ",":
197
- idx = find_last_comma_index(text)
198
- else:
199
- idx = text.find(punc)
200
- if idx != -1:
201
- # add the offset to the index to account for the leading whitespace
202
- return idx + 1 + offset
203
- return None
204
-
205
- async def aggregate(self, text: str) -> AsyncIterator[Aggregation]:
206
- """Aggregate the input text and return the first complete sentence in the text.
207
-
208
- Args:
209
- text: The text to aggregate.
210
-
211
- Returns:
212
- The first complete sentence in the text, or None if none is found.
213
- """
214
- result: Optional[str] = None
215
- self._text += str(text)
216
-
217
- eos_end_index = self._find_segment_end(self._text)
218
-
219
- if not eos_end_index and not has_partial_decimal(self._text) and self._use_legacy_eos_detection:
220
- # if the text doesn't have partial decimal, and no punctuation marks,
221
- # we use match_endofsentence to find the end of the sentence
222
- eos_end_index = match_endofsentence(self._text)
223
-
224
- if eos_end_index:
225
- result = self._text[:eos_end_index]
226
- if len(result.strip()) < self._min_sentence_length:
227
- logger.debug(
228
- f"Text is too short, skipping: `{result}`, full text: `{self._text}`, input text: `{text}`"
229
- )
230
- result = None
231
- else:
232
- logger.debug(f"Text Aggregator Result: `{result}`, full text: `{self._text}`, input text: `{text}`")
233
- self._text = self._text[eos_end_index:]
234
-
235
- if result:
236
- for ignore_mark in self._ignore_marks:
237
- result = result.replace(ignore_mark, "")
238
- yield Aggregation(text=result, type=AggregationType.SENTENCE)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/utils/__init__.py DELETED
@@ -1,15 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- from nemo.agents.voice_agent.utils.config_manager import ConfigManager
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/utils/config_manager.py DELETED
@@ -1,312 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import os
16
- from typing import Any, Dict, Optional
17
-
18
- from loguru import logger
19
- from omegaconf import OmegaConf
20
- from pipecat.audio.vad.silero import VADParams
21
-
22
- from nemo.agents.voice_agent.pipecat.services.nemo.diar import NeMoDiarInputParams
23
- from nemo.agents.voice_agent.pipecat.services.nemo.stt import NeMoSTTInputParams
24
-
25
-
26
- class ConfigManager:
27
- """
28
- Manages configuration for the voice agent server.
29
- Handles loading, merging, and providing access to all configuration parameters.
30
- """
31
-
32
- def __init__(self, server_base_path: str, server_config_path: Optional[str] = None):
33
- """
34
- Initialize the configuration manager.
35
-
36
- Args:
37
- config_path: Path to the main server configuration file.
38
- If None, uses default path from environment variable.
39
- """
40
- if not os.path.exists(server_base_path):
41
- raise FileNotFoundError(f"Server base path not found at {server_base_path}")
42
-
43
- self._server_base_path = server_base_path
44
- if server_config_path is not None:
45
- self._server_config_path = server_config_path
46
- else:
47
- self._server_config_path = f"{os.path.abspath(self._server_base_path)}/server_configs/default.yaml"
48
-
49
- if not os.path.exists(self._server_config_path):
50
- raise FileNotFoundError(f"Server configuration file not found at {self._server_config_path}")
51
-
52
- # Load model registry
53
- self.model_registry_path = f"{os.path.abspath(self._server_base_path)}/model_registry.yaml"
54
- self.model_registry = self._load_model_registry()
55
-
56
- # Load and process main configuration
57
- self.server_config = self._load_server_config()
58
-
59
- # Initialize configuration parameters
60
- self._initialize_config_parameters()
61
-
62
- self._generic_hf_llm_model_id = "hf_llm_generic"
63
-
64
- logger.info(f"Configuration loaded from: {self._server_config_path}")
65
- logger.info(f"Model registry loaded from: {self.model_registry_path}")
66
-
67
- def _load_model_registry(self) -> Dict[str, Any]:
68
- """Load model registry from YAML file."""
69
- try:
70
- return OmegaConf.load(self.model_registry_path)
71
- except Exception as e:
72
- logger.error(f"Failed to load model registry: {e}")
73
- raise ValueError(f"Failed to load model registry: {e}")
74
-
75
- def _load_server_config(self) -> OmegaConf:
76
- """Load and process the main server configuration."""
77
- server_config = OmegaConf.load(self._server_config_path)
78
- server_config = OmegaConf.to_container(server_config, resolve=True)
79
- server_config = OmegaConf.create(server_config)
80
- return server_config
81
-
82
- def _initialize_config_parameters(self):
83
- """Initialize all configuration parameters from the loaded config."""
84
- # Default constants
85
- self.SAMPLE_RATE = 16000
86
- self.RAW_AUDIO_FRAME_LEN_IN_SECS = 0.016
87
- self.SYSTEM_PROMPT = " ".join(
88
- [
89
- "You are a helpful AI agent named Lisa.",
90
- "Begin by warmly greeting the user and introducing yourself in one sentence.",
91
- "Keep your answers concise and to the point.",
92
- ]
93
- )
94
-
95
- # Transport configuration
96
- self.TRANSPORT_AUDIO_OUT_10MS_CHUNKS = self.server_config.transport.audio_out_10ms_chunks
97
-
98
- # VAD configuration
99
- self.vad_params = VADParams(
100
- confidence=self.server_config.vad.confidence,
101
- start_secs=self.server_config.vad.start_secs,
102
- stop_secs=self.server_config.vad.stop_secs,
103
- min_volume=self.server_config.vad.min_volume,
104
- )
105
- # STT configuration
106
- self._configure_stt()
107
-
108
- # Diarization configuration
109
- self._configure_diarization()
110
-
111
- # Turn taking configuration
112
- self._configure_turn_taking()
113
-
114
- # LLM configuration
115
- self._configure_llm()
116
-
117
- # TTS configuration
118
- self._configure_tts()
119
-
120
- def _configure_stt(self):
121
- """Configure STT parameters."""
122
- self.STT_MODEL = self.server_config.stt.model
123
- self.STT_DEVICE = self.server_config.stt.device
124
- # Apply STT-specific configuration based on model type
125
- # Try to get STT config file name from server config first
126
- if self.server_config.stt.get("model_config", None) is not None:
127
- yaml_file_name = os.path.basename(self.server_config.stt.model_config)
128
- else:
129
- # Get STT configuration from registry
130
- if str(self.STT_MODEL).endswith(".nemo"):
131
- model_name = os.path.splitext(os.path.basename(self.STT_MODEL))[0]
132
- else:
133
- model_name = self.STT_MODEL
134
- if model_name in self.model_registry.stt_models:
135
- yaml_file_name = self.model_registry.stt_models[model_name].yaml_id
136
- else:
137
- error_msg = f"STT model {model_name} is not in model registry: {self.model_registry.stt_models}."
138
- logger.error(error_msg)
139
- raise ValueError(error_msg)
140
-
141
- stt_config_path = f"{os.path.abspath(self._server_base_path)}/server_configs/stt_configs/{yaml_file_name}"
142
- if not os.path.exists(stt_config_path):
143
- raise FileNotFoundError(f"STT config file not found at {stt_config_path}")
144
- stt_config = OmegaConf.load(stt_config_path)
145
-
146
- # merge stt config with server config
147
- for key in stt_config:
148
- if key in self.server_config.stt and self.server_config.stt[key] != stt_config[key]:
149
- logger.info(
150
- f"STT config field `{key}` is overridden from `{self.server_config.stt[key]}` "
151
- f"to `{stt_config[key]}` by {stt_config_path}"
152
- )
153
- self.server_config.stt[key] = stt_config[key]
154
-
155
- logger.info(f"Final STT config: {self.server_config.stt}")
156
-
157
- audio_chunk_size_in_secs = self.server_config.stt.get("audio_chunk_size_in_secs", 0.08)
158
- buffer_size = audio_chunk_size_in_secs // self.RAW_AUDIO_FRAME_LEN_IN_SECS
159
- self.stt_params = NeMoSTTInputParams(
160
- att_context_size=self.server_config.stt.att_context_size,
161
- frame_len_in_secs=self.server_config.stt.frame_len_in_secs,
162
- raw_audio_frame_len_in_secs=self.RAW_AUDIO_FRAME_LEN_IN_SECS,
163
- buffer_size=buffer_size,
164
- )
165
-
166
- def _configure_diarization(self):
167
- """
168
- Configure diarization parameters.
169
- Currently only NeMo End-to-End Diarization is supported.
170
- """
171
- self.DIAR_MODEL = self.server_config.diar.model
172
- self.USE_DIAR = self.server_config.diar.enabled
173
- self.diar_params = NeMoDiarInputParams(
174
- frame_len_in_secs=self.server_config.diar.frame_len_in_secs,
175
- threshold=self.server_config.diar.threshold,
176
- )
177
-
178
- def _configure_turn_taking(self):
179
- """Configure turn taking parameters."""
180
- self.TURN_TAKING_BACKCHANNEL_PHRASES_PATH = self.server_config.turn_taking.backchannel_phrases_path
181
- self.TURN_TAKING_MAX_BUFFER_SIZE = self.server_config.turn_taking.max_buffer_size
182
- self.TURN_TAKING_BOT_STOP_DELAY = self.server_config.turn_taking.bot_stop_delay
183
-
184
- def _configure_llm(self):
185
- """Configure LLM parameters."""
186
- llm_model_id = self.server_config.llm.model
187
- is_registry_model = False
188
-
189
- # Try to get LLM config file name from server config first
190
- if self.server_config.llm.get("model_config", None) is not None:
191
- yaml_file_name = os.path.basename(self.server_config.llm.model_config)
192
- else:
193
- # Get LLM configuration from registry
194
- if llm_model_id in self.model_registry.llm_models:
195
- yaml_file_name = self.model_registry.llm_models[llm_model_id].yaml_id
196
- is_registry_model = True
197
- else:
198
- logger.warning(
199
- f"LLM model {llm_model_id} is not included in the model registry. "
200
- "Using a generic HuggingFace LLM config instead."
201
- )
202
- yaml_file_name = self.model_registry.llm_models[self._generic_hf_llm_model_id].yaml_id
203
-
204
- # Load and merge LLM configuration
205
- llm_config_path = f"{os.path.abspath(self._server_base_path)}/server_configs/llm_configs/{yaml_file_name}"
206
-
207
- if (
208
- is_registry_model
209
- and self.model_registry.llm_models[llm_model_id].get("reasoning_supported", False)
210
- and self.server_config.llm.get("enable_reasoning", False)
211
- ):
212
- llm_config_path = llm_config_path.replace(".yaml", "_think.yaml")
213
-
214
- if not os.path.exists(llm_config_path):
215
- raise FileNotFoundError(f"LLM config file not found at {llm_config_path}")
216
- logger.info(f"Loading LLM config from: {llm_config_path}")
217
-
218
- llm_config = OmegaConf.load(llm_config_path)
219
- # merge llm config with server config
220
- # print the override keys
221
- for key in llm_config:
222
- if key in self.server_config.llm and self.server_config.llm[key] != llm_config[key]:
223
- logger.info(
224
- f"LLM config field `{key}` is overridden from `{self.server_config.llm[key]}` to "
225
- f"`{llm_config[key]}` by {llm_config_path}"
226
- )
227
- self.server_config.llm[key] = llm_config[key]
228
-
229
- logger.info(f"Final LLM config: {self.server_config.llm}")
230
-
231
- # Configure system prompt
232
- self.SYSTEM_ROLE = self.server_config.llm.get("system_role", "system")
233
- if self.server_config.llm.get("system_prompt", None) is not None:
234
- system_prompt = self.server_config.llm.system_prompt
235
- if os.path.isfile(system_prompt):
236
- with open(system_prompt, "r") as f:
237
- system_prompt = f.read()
238
- self.SYSTEM_PROMPT = system_prompt
239
- else:
240
- logger.info(f"No system prompt provided, using default system prompt: {self.SYSTEM_PROMPT}")
241
-
242
- if self.server_config.llm.get("system_prompt_suffix", None) is not None:
243
- self.SYSTEM_PROMPT += "\n" + self.server_config.llm.system_prompt_suffix
244
- logger.info(f"Adding system prompt suffix: {self.server_config.llm.system_prompt_suffix}")
245
-
246
- logger.info(f"System prompt: {self.SYSTEM_PROMPT}")
247
-
248
- def _configure_tts(self):
249
- """Configure TTS parameters."""
250
- tts_model_id = self.server_config.tts.model
251
-
252
- # Try to get TTS config file name from server config first
253
- if self.server_config.tts.get("model_config", None) is not None:
254
- yaml_file_name = os.path.basename(self.server_config.tts.model_config)
255
- else:
256
- # Get TTS configuration from registry
257
- if tts_model_id in self.model_registry.tts_models:
258
- yaml_file_name = self.model_registry.tts_models[tts_model_id].yaml_id
259
- else:
260
- error_msg = f"TTS model {tts_model_id} is not in model registry: {self.model_registry.tts_models}"
261
- logger.error(error_msg)
262
- raise ValueError(error_msg)
263
-
264
- tts_config_path = f"{os.path.abspath(self._server_base_path)}/server_configs/tts_configs/{yaml_file_name}"
265
- if not os.path.exists(tts_config_path):
266
- raise FileNotFoundError(f"Default TTS config file not found at {tts_config_path}")
267
- tts_config = OmegaConf.load(tts_config_path)
268
-
269
- # merge tts config with server config
270
- for key in tts_config:
271
- if key in self.server_config.tts and self.server_config.tts[key] != tts_config[key]:
272
- logger.info(
273
- f"TTS config field `{key}` is overridden from `{self.server_config.tts[key]}` to "
274
- f"`{tts_config[key]}` by {tts_config_path}"
275
- )
276
- self.server_config.tts[key] = tts_config[key]
277
-
278
- logger.info(f"Final TTS config: {self.server_config.tts}")
279
-
280
- # Extract TTS parameters
281
- self.TTS_MAIN_MODEL_ID = self.server_config.tts.get("main_model_id", None)
282
- self.TTS_SUB_MODEL_ID = self.server_config.tts.get("sub_model_id", None)
283
- self.TTS_DEVICE = self.server_config.tts.get("device", None)
284
-
285
- # Handle optional TTS parameters
286
- self.TTS_THINK_TOKENS = self.server_config.tts.get("think_tokens", None)
287
- if self.TTS_THINK_TOKENS is not None:
288
- self.TTS_THINK_TOKENS = OmegaConf.to_container(self.TTS_THINK_TOKENS)
289
-
290
- self.TTS_EXTRA_SEPARATOR = self.server_config.tts.get("extra_separator", None)
291
- if self.TTS_EXTRA_SEPARATOR is not None:
292
- self.TTS_EXTRA_SEPARATOR = OmegaConf.to_container(self.TTS_EXTRA_SEPARATOR)
293
-
294
- def get_server_config(self) -> OmegaConf:
295
- """Get the complete server configuration."""
296
- return self.server_config
297
-
298
- def get_model_registry(self) -> Dict[str, Any]:
299
- """Get the model registry configuration."""
300
- return self.model_registry
301
-
302
- def get_vad_params(self) -> VADParams:
303
- """Get VAD parameters."""
304
- return self.vad_params
305
-
306
- def get_stt_params(self) -> NeMoSTTInputParams:
307
- """Get STT parameters."""
308
- return self.stt_params
309
-
310
- def get_diar_params(self) -> NeMoDiarInputParams:
311
- """Get diarization parameters."""
312
- return self.diar_params
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/utils/tool_calling/__init__.py DELETED
@@ -1,13 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/utils/tool_calling/basic_tools.py DELETED
@@ -1,72 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import asyncio
16
- import python_weather
17
- from loguru import logger
18
- from pipecat.frames.frames import LLMTextFrame, TTSSpeakFrame
19
- from pipecat.processors.frame_processor import FrameDirection
20
- from pipecat.services.llm_service import FunctionCallParams
21
-
22
- HTTP_REQUEST_TIMEOUT = 10.0
23
-
24
-
25
- async def tool_get_city_weather(params: FunctionCallParams, city_name: str):
26
- """Get the current weather of a city. The result includes city name, weather description,
27
- temperature, wind speed, wind direction, precipitation, humidity, visibility, and UV index.
28
-
29
- Args:
30
- city_name: The name of the city to get the weather of. For example, "London", "Beijing", "Paris".
31
- Other examples are: "Paris, TX, US", "Paris, FR" and "Tokyo, JP".
32
- """
33
- message = f"Looking up weather data for {city_name}. Please wait a moment..."
34
- # Send the message to upstream so that RTVI can log it while doesn't block the actual tool call.
35
- await params.llm.push_frame(LLMTextFrame(message), direction=FrameDirection.UPSTREAM)
36
- # Send the message to TTS directly so that the user can hear it immediately.
37
- await params.llm.push_frame(TTSSpeakFrame(message))
38
-
39
- # The measuring unit defaults to metric (Celsius)
40
- # Use imperial for Fahrenheit: python_weather.IMPERIAL
41
- async with python_weather.Client(unit=python_weather.METRIC) as client:
42
- # Fetch a weather forecast from a city
43
- logger.debug(f"Fetching weather forecast for `{city_name}`")
44
- try:
45
- weather: python_weather.Forecast = await asyncio.wait_for(
46
- client.get(city_name),
47
- timeout=HTTP_REQUEST_TIMEOUT,
48
- )
49
- except asyncio.TimeoutError:
50
- error_msg = f"python_weather API request timed out after {HTTP_REQUEST_TIMEOUT} seconds for `{city_name}`"
51
- logger.error(error_msg)
52
- await params.result_callback({"error": error_msg})
53
- return
54
- except Exception as e:
55
- error_msg = f"Error fetching weather forecast for `{city_name}`: {str(e)}"
56
- logger.error(error_msg)
57
- await params.result_callback({"error": error_msg})
58
- return
59
-
60
- results = {
61
- "city": city_name,
62
- "description": str(weather.description),
63
- "temperature": f"{weather.temperature} degrees Celsius",
64
- "wind_speed": f"{weather.wind_speed} kilometers per hour",
65
- "wind_direction": str(weather.wind_direction.name),
66
- "precipitation": f"{weather.precipitation} millimeters",
67
- "humidity": f"{weather.humidity} percent",
68
- "visibility": f"{weather.visibility} kilometers",
69
- "uv_index": str(weather.ultraviolet),
70
- }
71
- logger.debug(f"Weather results for {city_name}: {results}")
72
- await params.result_callback(results)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/agents/voice_agent/utils/tool_calling/mixins.py DELETED
@@ -1,104 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- from loguru import logger
16
- from pipecat.adapters.schemas.direct_function import DirectFunction
17
- from pipecat.adapters.schemas.tools_schema import ToolsSchema
18
- from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext
19
- from pipecat.services.openai.llm import OpenAILLMService
20
-
21
-
22
- class ToolCallingMixin:
23
- """
24
- A mixin class for tool calling.
25
- Subclasses must implement the `setup_tool_calling` method to register all available tools
26
- using `self.register_direct_function()`. Then the `__init__` method of the subclass should
27
- call the `setup_tool_calling` method to register the tools.
28
- """
29
-
30
- def setup_tool_calling(self):
31
- """
32
- Setup the tool calling mixin by registering all available tools using self.register_direct_function().
33
- """
34
- raise NotImplementedError(
35
- "Subclasses must implement this method to register all available functions "
36
- "using self.register_direct_function()"
37
- )
38
-
39
- def register_direct_function(self, function_name: str, function: DirectFunction):
40
- """
41
- Register a direct function to be called by the LLM.
42
-
43
- Args:
44
- function_name: The name of the function to register.
45
- function: The direct function to register.
46
- """
47
- if not hasattr(self, "direct_functions"):
48
- self.direct_functions = {}
49
- logger.info(
50
- f"[{self.__class__.__name__}] Registering direct function name {function_name} to "
51
- f"{function.__module__ + '.' + function.__qualname__}"
52
- )
53
- self.direct_functions[function_name] = function
54
-
55
- @property
56
- def available_tools(self) -> dict[str, DirectFunction]:
57
- """
58
- Return a dictionary of available tools, where the key is the tool name and the value is the direct function.
59
- """
60
- tools = {}
61
- for function_name, function in self.direct_functions.items():
62
- tools[function_name] = function
63
- return tools
64
-
65
-
66
- def register_direct_tools_to_llm(
67
- *,
68
- llm: OpenAILLMService,
69
- context: OpenAILLMContext,
70
- tool_mixins: list[ToolCallingMixin] = [],
71
- tools: list[DirectFunction] = [],
72
- cancel_on_interruption: bool = True,
73
- ) -> None:
74
- """
75
- Register direct tools to the LLM.
76
- Args:
77
- llm: The LLM service to use.
78
- context: The LLM context to use.
79
- tools: The list of tools (instances of either `DirectFunction` or `ToolCallingMixin`) to use.
80
- """
81
- all_tools = []
82
- for tool in tool_mixins:
83
- if not isinstance(tool, ToolCallingMixin):
84
- logger.warning(f"Tool {tool.__class__.__name__} is not a ToolCallingMixin, skipping.")
85
- continue
86
- for function_name, function in tool.available_tools.items():
87
- logger.info(f"Registering direct function {function_name} from {tool.__class__.__name__}")
88
- all_tools.append(function)
89
-
90
- for tool in tools:
91
- logger.info(f"Registering direct function: {tool.__module__ + '.' + tool.__qualname__}")
92
- all_tools.append(tool)
93
-
94
- if not all_tools:
95
- logger.warning("No direct tools provided.")
96
- return
97
- else:
98
- logger.info(f"Registering {len(all_tools)} direct tools to the LLM.")
99
-
100
- tools_schema = ToolsSchema(standard_tools=all_tools)
101
- context.set_tools(tools_schema)
102
-
103
- for tool in all_tools:
104
- llm.register_direct_function(tool, cancel_on_interruption=cancel_on_interruption)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/collections/__init__.py DELETED
@@ -1,13 +0,0 @@
1
- # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/collections/asr/README.md DELETED
@@ -1,37 +0,0 @@
1
- # Automatic Speech Recognition (ASR)
2
-
3
- ## Key Features
4
-
5
- * [HuggingFace Space for Audio Transcription (File, Microphone and YouTube)](https://huggingface.co/spaces/smajumdar/nemo_multilingual_language_id)
6
- * [Pretrained models](https://ngc.nvidia.com/catalog/collections/nvidia:nemo_asr) available in 14+ languages
7
- * [Automatic Speech Recognition (ASR)](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/intro.html)
8
- * Supported ASR [models](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/asr/models.html):
9
- * Jasper, QuartzNet, CitriNet, ContextNet
10
- * Conformer-CTC, Conformer-Transducer, FastConformer-CTC, FastConformer-Transducer
11
- * Squeezeformer-CTC and Squeezeformer-Transducer
12
- * LSTM-Transducer (RNNT) and LSTM-CTC
13
- * Supports the following decoders/losses:
14
- * CTC
15
- * Transducer/RNNT
16
- * Hybrid Transducer/CTC
17
- * NeMo Original [Multi-blank Transducers](https://arxiv.org/abs/2211.03541) and [Token-and-Duration Transducers (TDT)](https://arxiv.org/abs/2304.06795)
18
- * Streaming/Buffered ASR (CTC/Transducer) - [Chunked Inference Examples](https://github.com/NVIDIA/NeMo/tree/stable/examples/asr/asr_chunked_inference)
19
- * [Cache-aware Streaming Conformer](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/asr/models.html#cache-aware-streaming-conformer) with multiple lookaheads (including microphone streaming [tutorial](https://github.com/NVIDIA/NeMo/blob/main/tutorials/asr/Online_ASR_Microphone_Demo_Cache_Aware_Streaming.ipynb).
20
- * Beam Search decoding
21
- * [Language Modelling for ASR (CTC and RNNT)](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/asr_language_modeling.html): N-gram LM in fusion with Beam Search decoding, Neural Rescoring with Transformer
22
- * [Support of long audios for Conformer with memory efficient local attention](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/results.html#inference-on-long-audio)
23
- * [Speech Classification, Speech Command Recognition and Language Identification](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/speech_classification/intro.html): MatchboxNet (Command Recognition), AmberNet (LangID)
24
- * [Voice activity Detection (VAD)](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/asr/speech_classification/models.html#marblenet-vad): MarbleNet
25
- * ASR with VAD Inference - [Example](https://github.com/NVIDIA/NeMo/tree/stable/examples/asr/asr_vad)
26
- * [Speaker Recognition](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/speaker_recognition/intro.html): TitaNet, ECAPA_TDNN, SpeakerNet
27
- * [Speaker Diarization](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/speaker_diarization/intro.html)
28
- * Clustering Diarizer: TitaNet, ECAPA_TDNN, SpeakerNet
29
- * Neural Diarizer: Sortformer
30
- * [Speech Intent Detection and Slot Filling](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/speech_intent_slot/intro.html): Conformer-Transformer
31
-
32
- You can also get a high-level overview of NeMo ASR by watching the talk *NVIDIA NeMo: Toolkit for Conversational AI*, presented at PyData Yerevan 2022:
33
-
34
-
35
- [![NVIDIA NeMo: Toolkit for Conversational AI](https://img.youtube.com/vi/J-P6Sczmas8/maxres3.jpg
36
- )](https://www.youtube.com/embed/J-P6Sczmas8?mute=0&start=14&autoplay=0
37
- "NeMo presentation at PyData@Yerevan 2022")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/collections/asr/__init__.py DELETED
@@ -1,25 +0,0 @@
1
- # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- from nemo.collections.asr import data, losses, models, modules
16
- from nemo.package_info import __version__
17
-
18
- # Set collection version equal to NeMo version.
19
- __version = __version__
20
-
21
- # Authorship.
22
- __author__ = "NVIDIA Corporation"
23
-
24
- # Set collection name.
25
- __description__ = "Automatic Speech Recognition collection"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/collections/asr/data/__init__.py DELETED
@@ -1,13 +0,0 @@
1
- # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/collections/asr/data/audio_to_ctm_dataset.py DELETED
@@ -1,95 +0,0 @@
1
- # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import json
16
- import os
17
- from dataclasses import dataclass
18
- from pathlib import Path
19
- from typing import Any, List, Tuple
20
-
21
- from nemo.collections.asr.data.audio_to_text_dataset import ASRPredictionWriter
22
- from nemo.utils import logging
23
-
24
-
25
- @dataclass
26
- class FrameCtmUnit:
27
- """A container class for one CTM unit with start and length countable in frames.
28
- """
29
-
30
- label: str
31
- start_frame: int
32
- length: int
33
- probability: float
34
-
35
- def __repr__(self) -> str:
36
- return f"{self.label}\t({self.probability:1.3f}): [{self.start_frame:6d}, {self.length:6d}]"
37
-
38
- @property
39
- def end_frame(self):
40
- return self.start_frame + self.length
41
-
42
- def to_ctm_str(self, time_per_frame: int) -> str:
43
- """Represents the data as part of the CTM line.
44
-
45
- The CTM line format is
46
- <utterance_name> <channel> <start_time> <duration> <label_str> <probability>
47
- This method prepares the last four entities."""
48
- return f"{self.start_frame * time_per_frame :.3f} {self.length * time_per_frame :.3f} {self.label} {self.probability :1.3f}"
49
-
50
-
51
- class ASRCTMPredictionWriter(ASRPredictionWriter):
52
- def __init__(self, dataset, output_file: str, output_ctm_dir: str, time_per_frame: float):
53
- super().__init__(dataset, output_file)
54
- self.output_ctm_dir = output_ctm_dir
55
- self.time_per_frame = time_per_frame
56
- os.makedirs(self.output_ctm_dir, exist_ok=True)
57
-
58
- def write_ctm(self, name, filepath, frameCtmUnits):
59
- with open(filepath, "tw", encoding="utf-8") as f:
60
- for unit in frameCtmUnits:
61
- f.write(f"{name} 1 {unit.to_ctm_str(self.time_per_frame)}\n")
62
-
63
- def write_on_batch_end(
64
- self,
65
- trainer,
66
- pl_module: 'LightningModule',
67
- prediction: Tuple[int, List[FrameCtmUnit]],
68
- batch_indices: List[int],
69
- batch: Any,
70
- batch_idx: int,
71
- dataloader_idx: int,
72
- ):
73
- for sample_id, units in prediction:
74
- sample = self.dataset.get_manifest_sample(sample_id)
75
- with_ctm = True
76
- if len(units) == 0:
77
- logging.warning(
78
- f"""Do not producing CTM output for item `{sample.audio_file}`.
79
- Check if text is empty or if duration is too short: `{sample.text_raw}`, {sample.duration}"""
80
- )
81
- with_ctm = False
82
- item = {}
83
- item["audio_filepath"] = sample.audio_file
84
- item["duration"] = sample.duration
85
- item["text"] = sample.text_raw
86
- if with_ctm:
87
- utt_name = Path(sample.audio_file).stem
88
- ctm_filepath = os.path.join(self.output_ctm_dir, utt_name) + ".ctm"
89
- self.write_ctm(utt_name, ctm_filepath, units)
90
- item["ctm_filepath"] = ctm_filepath
91
- else:
92
- item["ctm_filepath"] = ""
93
- self.outf.write(json.dumps(item) + "\n")
94
- self.samples_num += 1
95
- return
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/collections/asr/data/audio_to_diar_label.py DELETED
@@ -1,562 +0,0 @@
1
- # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import os
16
- from typing import Dict, List, Optional, Tuple
17
-
18
- import numpy as np
19
- import torch
20
-
21
- from nemo.collections.asr.parts.utils.speaker_utils import convert_rttm_line, get_subsegments
22
- from nemo.collections.common.parts.preprocessing.collections import EndtoEndDiarizationSpeechLabel
23
- from nemo.core.classes import Dataset
24
- from nemo.core.neural_types import AudioSignal, LengthsType, NeuralType, ProbsType
25
- from nemo.utils import logging
26
-
27
-
28
- def get_subsegments_to_timestamps(
29
- subsegments: List[Tuple[float, float]], feat_per_sec: int = 100, max_end_ts: float = None, decimals=2
30
- ):
31
- """
32
- Convert subsegment timestamps to scale timestamps by multiplying with the feature rate (`feat_per_sec`)
33
- and rounding. Segment is consisted of many subsegments and sugsegments are equivalent to `frames`
34
- in end-to-end speaker diarization models.
35
-
36
- Args:
37
- subsegments (List[Tuple[float, float]]):
38
- A list of tuples where each tuple contains the start and end times of a subsegment
39
- (frames in end-to-end models).
40
- >>> subsegments = [[t0_start, t0_duration], [t1_start, t1_duration],..., [tN_start, tN_duration]]
41
- feat_per_sec (int, optional):
42
- The number of feature frames per second. Defaults to 100.
43
- max_end_ts (float, optional):
44
- The maximum end timestamp to clip the results. If None, no clipping is applied. Defaults to None.
45
- decimals (int, optional):
46
- The number of decimal places to round the timestamps. Defaults to 2.
47
-
48
- Example:
49
- Segments starting from 0.0 and ending at 69.2 seconds.
50
- If hop-length is 0.08 and the subsegment (frame) length is 0.16 seconds,
51
- there are 864 = (69.2 - 0.16)/0.08 + 1 subsegments (frames in end-to-end models) in this segment.
52
- >>> subsegments = [[[0.0, 0.16], [0.08, 0.16], ..., [69.04, 0.16], [69.12, 0.08]]
53
-
54
- Returns:
55
- ts (torch.tensor):
56
- A tensor containing the scaled and rounded timestamps for each subsegment.
57
- """
58
- seg_ts = (torch.tensor(subsegments) * feat_per_sec).float()
59
- ts_round = torch.round(seg_ts, decimals=decimals)
60
- ts = ts_round.long()
61
- ts[:, 1] = ts[:, 0] + ts[:, 1]
62
- if max_end_ts is not None:
63
- ts = np.clip(ts, 0, int(max_end_ts * feat_per_sec))
64
- return ts
65
-
66
-
67
- def extract_frame_info_from_rttm(offset, duration, rttm_lines, round_digits=3):
68
- """
69
- Extracts RTTM lines containing speaker labels, start time, and end time for a given audio segment.
70
-
71
- Args:
72
- uniq_id (str): Unique identifier for the audio file and corresponding RTTM file.
73
- offset (float): The starting time offset for the segment of interest.
74
- duration (float): The duration of the segment of interest.
75
- rttm_lines (list): List of RTTM lines in string format.
76
- round_digits (int, optional): Number of decimal places to round the start and end times. Defaults to 3.
77
-
78
- Returns:
79
- rttm_mat (tuple): A tuple containing lists of start times, end times, and speaker labels.
80
- sess_to_global_spkids (dict): A mapping from session-specific speaker indices to global speaker identifiers.
81
- """
82
- rttm_stt, rttm_end = offset, offset + duration
83
- stt_list, end_list, speaker_list, speaker_set = [], [], [], []
84
- sess_to_global_spkids = dict()
85
-
86
- for rttm_line in rttm_lines:
87
- start, end, speaker = convert_rttm_line(rttm_line)
88
-
89
- # Skip invalid RTTM lines where the start time is greater than the end time.
90
- if start > end:
91
- continue
92
-
93
- # Check if the RTTM segment overlaps with the specified segment of interest.
94
- if (end > rttm_stt and start < rttm_end) or (start < rttm_end and end > rttm_stt):
95
- # Adjust the start and end times to fit within the segment of interest.
96
- start, end = max(start, rttm_stt), min(end, rttm_end)
97
- else:
98
- continue
99
-
100
- # Round the start and end times to the specified number of decimal places.
101
- end_list.append(round(end, round_digits))
102
- stt_list.append(round(start, round_digits))
103
-
104
- # Assign a unique index to each speaker and maintain a mapping.
105
- if speaker not in speaker_set:
106
- speaker_set.append(speaker)
107
- speaker_list.append(speaker_set.index(speaker))
108
- sess_to_global_spkids.update({speaker_set.index(speaker): speaker})
109
-
110
- rttm_mat = (stt_list, end_list, speaker_list)
111
- return rttm_mat, sess_to_global_spkids
112
-
113
-
114
- def get_frame_targets_from_rttm(
115
- rttm_timestamps: list,
116
- offset: float,
117
- duration: float,
118
- round_digits: int,
119
- feat_per_sec: int,
120
- max_spks: int,
121
- ):
122
- """
123
- Create a multi-dimensional vector sequence containing speaker timestamp information in RTTM.
124
- The unit-length is the frame shift length of the acoustic feature. The feature-level annotations
125
- `feat_level_target` will later be converted to base-segment level diarization label.
126
-
127
- Args:
128
- rttm_timestamps (list):
129
- List containing start and end time for each speaker segment label.
130
- stt_list, end_list and speaker_list are contained.
131
- feat_per_sec (int):
132
- Number of feature frames per second.
133
- This quantity is determined by window_stride variable in preprocessing module.
134
- target_spks (tuple):
135
- Speaker indices that are generated from combinations. If there are only one or two speakers,
136
- only a single target_spks variable is generated.
137
-
138
- Returns:
139
- feat_level_target (torch.tensor):
140
- Tensor containing label for each feature level frame.
141
- """
142
- stt_list, end_list, speaker_list = rttm_timestamps
143
- sorted_speakers = sorted(list(set(speaker_list)))
144
- total_fr_len = int(duration * feat_per_sec)
145
- if len(sorted_speakers) > max_spks:
146
- logging.warning(
147
- f"Number of speakers in RTTM file {len(sorted_speakers)} exceeds the maximum number of speakers: "
148
- f"{max_spks}! Only {max_spks} first speakers remain, and this will affect frame metrics!"
149
- )
150
- feat_level_target = torch.zeros(total_fr_len, max_spks)
151
- for count, (stt, end, spk_rttm_key) in enumerate(zip(stt_list, end_list, speaker_list)):
152
- if end < offset or stt > offset + duration:
153
- continue
154
- stt, end = max(offset, stt), min(offset + duration, end)
155
- spk = spk_rttm_key
156
- if spk < max_spks:
157
- stt_fr, end_fr = int((stt - offset) * feat_per_sec), int((end - offset) * feat_per_sec)
158
- feat_level_target[stt_fr:end_fr, spk] = 1
159
- return feat_level_target
160
-
161
-
162
- class _AudioToSpeechE2ESpkDiarDataset(Dataset):
163
- """
164
- Dataset class that loads a json file containing paths to audio files,
165
- RTTM files and number of speakers. This Dataset class is designed for
166
- training or fine-tuning speaker embedding extractor and diarization decoder
167
- at the same time.
168
-
169
- Example:
170
- {"audio_filepath": "/path/to/audio_0.wav", "num_speakers": 2,
171
- "rttm_filepath": "/path/to/diar_label_0.rttm}
172
- ...
173
- {"audio_filepath": "/path/to/audio_n.wav", "num_speakers": 2,
174
- "rttm_filepath": "/path/to/diar_label_n.rttm}
175
-
176
- Args:
177
- manifest_filepath (str):
178
- Path to input manifest json files.
179
- multiargs_dict (dict):
180
- Dictionary containing the parameters for multiscale segmentation and clustering.
181
- soft_label_thres (float):
182
- Threshold that determines the label of each segment based on RTTM file information.
183
- featurizer:
184
- Featurizer instance for generating audio_signal from the raw waveform.
185
- window_stride (float):
186
- Window stride for acoustic feature. This value is used for calculating the numbers of feature-level frames.
187
- """
188
-
189
- @property
190
- def output_types(self) -> Optional[Dict[str, NeuralType]]:
191
- """Returns definitions of module output ports."""
192
- output_types = {
193
- "audio_signal": NeuralType(('B', 'T'), AudioSignal()),
194
- "audio_length": NeuralType(('B'), LengthsType()),
195
- "targets": NeuralType(('B', 'T', 'C'), ProbsType()),
196
- "target_len": NeuralType(('B'), LengthsType()),
197
- }
198
-
199
- return output_types
200
-
201
- def __init__(
202
- self,
203
- *,
204
- manifest_filepath: str,
205
- soft_label_thres: float,
206
- session_len_sec: float,
207
- num_spks: int,
208
- featurizer,
209
- fb_featurizer,
210
- window_stride: float,
211
- min_subsegment_duration: float = 0.03,
212
- global_rank: int = 0,
213
- dtype=torch.float16,
214
- round_digits: int = 2,
215
- soft_targets: bool = False,
216
- subsampling_factor: int = 8,
217
- device: str = 'cpu',
218
- ):
219
- super().__init__()
220
- self.collection = EndtoEndDiarizationSpeechLabel(
221
- manifests_files=manifest_filepath.split(','),
222
- round_digits=round_digits,
223
- )
224
- self.featurizer = featurizer
225
- self.fb_featurizer = fb_featurizer
226
- # STFT and subsampling factor parameters
227
- self.n_fft = self.fb_featurizer.n_fft
228
- self.hop_length = self.fb_featurizer.hop_length
229
- self.stft_pad_amount = self.fb_featurizer.stft_pad_amount
230
- self.subsampling_factor = subsampling_factor
231
- # Annotation and target length parameters
232
- self.round_digits = round_digits
233
- self.feat_per_sec = int(1 / window_stride)
234
- self.diar_frame_length = round(subsampling_factor * window_stride, round_digits)
235
- self.session_len_sec = session_len_sec
236
- self.soft_label_thres = soft_label_thres
237
- self.max_spks = num_spks
238
- self.min_subsegment_duration = min_subsegment_duration
239
- self.dtype = dtype
240
- self.use_asr_style_frame_count = True
241
- self.soft_targets = soft_targets
242
- self.round_digits = 2
243
- self.floor_decimal = 10**self.round_digits
244
- self.device = device
245
- self.global_rank = global_rank
246
-
247
- def __len__(self):
248
- return len(self.collection)
249
-
250
- def get_frame_count_from_time_series_length(self, seq_len):
251
- """
252
- This function is used to get the sequence length of the audio signal. This is required to match
253
- the feature frame length with ASR (STT) models. This function is copied from
254
- NeMo/nemo/collections/asr/parts/preprocessing/features.py::FilterbankFeatures::get_seq_len.
255
-
256
- Args:
257
- seq_len (int):
258
- The sequence length of the time-series data.
259
-
260
- Returns:
261
- seq_len (int):
262
- The sequence length of the feature frames.
263
- """
264
- pad_amount = self.stft_pad_amount * 2 if self.stft_pad_amount is not None else self.n_fft // 2 * 2
265
- seq_len = torch.floor_divide((seq_len + pad_amount - self.n_fft), self.hop_length).to(dtype=torch.long)
266
- frame_count = int(np.ceil(seq_len / self.subsampling_factor))
267
- return frame_count
268
-
269
- def get_uniq_id_with_range(self, sample, deci=3):
270
- """
271
- Generate unique training sample ID from unique file ID, offset and duration. The start-end time added
272
- unique ID is required for identifying the sample since multiple short audio samples are generated from a single
273
- audio file. The start time and end time of the audio stream uses millisecond units if `deci=3`.
274
-
275
- Args:
276
- sample:
277
- `EndtoEndDiarizationSpeechLabel` instance from collections.
278
-
279
- Returns:
280
- uniq_id (str):
281
- Unique sample ID which includes start and end time of the audio stream.
282
- Example: abc1001_3122_6458
283
- """
284
- bare_uniq_id = os.path.splitext(os.path.basename(sample.rttm_file))[0]
285
- offset = str(int(round(sample.offset, deci) * pow(10, deci)))
286
- endtime = str(int(round(sample.offset + sample.duration, deci) * pow(10, deci)))
287
- uniq_id = f"{bare_uniq_id}_{offset}_{endtime}"
288
- return uniq_id
289
-
290
- def parse_rttm_for_targets_and_lens(self, rttm_file, offset, duration, target_len):
291
- """
292
- Generate target tensor variable by extracting groundtruth diarization labels from an RTTM file.
293
- This function converts (start, end, speaker_id) format into base-scale (the finest scale) segment level
294
- diarization label in a matrix form.
295
-
296
- Example of seg_target:
297
- [[0., 1.], [0., 1.], [1., 1.], [1., 0.], [1., 0.], ..., [0., 1.]]
298
- """
299
- if rttm_file in [None, '']:
300
- num_seg = torch.max(target_len)
301
- targets = torch.zeros(num_seg, self.max_spks)
302
- return targets
303
-
304
- with open(rttm_file, 'r') as f:
305
- rttm_lines = f.readlines()
306
-
307
- rttm_timestamps, sess_to_global_spkids = extract_frame_info_from_rttm(offset, duration, rttm_lines)
308
-
309
- fr_level_target = get_frame_targets_from_rttm(
310
- rttm_timestamps=rttm_timestamps,
311
- offset=offset,
312
- duration=duration,
313
- round_digits=self.round_digits,
314
- feat_per_sec=self.feat_per_sec,
315
- max_spks=self.max_spks,
316
- )
317
-
318
- soft_target_seg = self.get_soft_targets_seg(feat_level_target=fr_level_target, target_len=target_len)
319
- if self.soft_targets:
320
- step_target = soft_target_seg
321
- else:
322
- step_target = (soft_target_seg >= self.soft_label_thres).float()
323
- return step_target
324
-
325
- def get_soft_targets_seg(self, feat_level_target, target_len):
326
- """
327
- Generate the final targets for the actual diarization step.
328
- Here, frame level means step level which is also referred to as segments.
329
- We follow the original paper and refer to the step level as "frames".
330
-
331
- Args:
332
- feat_level_target (torch.tensor):
333
- Tensor variable containing hard-labels of speaker activity in each feature-level segment.
334
- target_len (torch.tensor):
335
- Numbers of ms segments
336
-
337
- Returns:
338
- soft_target_seg (torch.tensor):
339
- Tensor variable containing soft-labels of speaker activity in each step-level segment.
340
- """
341
- num_seg = torch.max(target_len)
342
- targets = torch.zeros(num_seg, self.max_spks)
343
- stride = int(self.feat_per_sec * self.diar_frame_length)
344
- for index in range(num_seg):
345
- if index == 0:
346
- seg_stt_feat = 0
347
- else:
348
- seg_stt_feat = stride * index - 1 - int(stride / 2)
349
- if index == num_seg - 1:
350
- seg_end_feat = feat_level_target.shape[0]
351
- else:
352
- seg_end_feat = stride * index - 1 + int(stride / 2)
353
- targets[index] = torch.mean(feat_level_target[seg_stt_feat : seg_end_feat + 1, :], axis=0)
354
- return targets
355
-
356
- def get_segment_timestamps(
357
- self,
358
- duration: float,
359
- offset: float = 0,
360
- sample_rate: int = 16000,
361
- ):
362
- """
363
- Get start and end time of segments in each scale.
364
-
365
- Args:
366
- sample:
367
- `EndtoEndDiarizationSpeechLabel` instance from preprocessing.collections
368
- Returns:
369
- segment_timestamps (torch.tensor):
370
- Tensor containing Multiscale segment timestamps.
371
- target_len (torch.tensor):
372
- Number of segments for each scale. This information is used for reshaping embedding batch
373
- during forward propagation.
374
- """
375
- subsegments = get_subsegments(
376
- offset=offset,
377
- window=round(self.diar_frame_length * 2, self.round_digits),
378
- shift=self.diar_frame_length,
379
- duration=duration,
380
- min_subsegment_duration=self.min_subsegment_duration,
381
- use_asr_style_frame_count=self.use_asr_style_frame_count,
382
- sample_rate=sample_rate,
383
- feat_per_sec=self.feat_per_sec,
384
- )
385
- if self.use_asr_style_frame_count:
386
- effective_dur = (
387
- np.ceil((1 + duration * sample_rate) / int(sample_rate / self.feat_per_sec)).astype(int)
388
- / self.feat_per_sec
389
- )
390
- else:
391
- effective_dur = duration
392
- ts_tensor = get_subsegments_to_timestamps(
393
- subsegments, self.feat_per_sec, decimals=2, max_end_ts=(offset + effective_dur)
394
- )
395
- target_len = torch.tensor([ts_tensor.shape[0]])
396
- return target_len
397
-
398
- def __getitem__(self, index):
399
- sample = self.collection[index]
400
- if sample.offset is None:
401
- sample.offset = 0
402
- offset = sample.offset
403
- if self.session_len_sec < 0:
404
- session_len_sec = sample.duration
405
- else:
406
- session_len_sec = min(sample.duration, self.session_len_sec)
407
-
408
- audio_signal = self.featurizer.process(sample.audio_file, offset=offset, duration=session_len_sec)
409
-
410
- # We should resolve the length mis-match from the round-off errors between these two variables:
411
- # `session_len_sec` and `audio_signal.shape[0]`
412
- session_len_sec = (
413
- np.floor(audio_signal.shape[0] / self.featurizer.sample_rate * self.floor_decimal) / self.floor_decimal
414
- )
415
- audio_signal = audio_signal[: round(self.featurizer.sample_rate * session_len_sec)]
416
- audio_signal_length = torch.tensor(audio_signal.shape[0]).long()
417
-
418
- # Target length should be following the ASR feature extraction convention: Use self.get_frame_count_from_time_series_length.
419
- target_len = self.get_segment_timestamps(duration=session_len_sec, sample_rate=self.featurizer.sample_rate)
420
- target_len = torch.clamp(target_len, max=self.get_frame_count_from_time_series_length(audio_signal.shape[0]))
421
-
422
- targets = self.parse_rttm_for_targets_and_lens(
423
- rttm_file=sample.rttm_file, offset=offset, duration=session_len_sec, target_len=target_len
424
- )
425
- targets = targets[:target_len, :]
426
- return audio_signal, audio_signal_length, targets, target_len
427
-
428
-
429
- def _eesd_train_collate_fn(self, batch):
430
- """
431
- Collate a batch of variables needed for training the end-to-end speaker diarization (EESD) model
432
- from raw waveforms to diarization labels. The following variables are included in the training/validation batch:
433
-
434
- Args:
435
- batch (tuple):
436
- A tuple containing the variables for diarization training.
437
-
438
- Returns:
439
- audio_signal (torch.Tensor):
440
- A tensor containing the raw waveform samples (time series) loaded from the `audio_filepath`
441
- in the input manifest file.
442
- feature_length (torch.Tensor):
443
- A tensor containing the lengths of the raw waveform samples.
444
- targets (torch.Tensor):
445
- Groundtruth speaker labels for the given input embedding sequence.
446
- target_lens (torch.Tensor):
447
- A tensor containing the number of segments for each sample in the batch, necessary for
448
- reshaping inputs to the EESD model.
449
- """
450
- packed_batch = list(zip(*batch))
451
- audio_signal, feature_length, targets, target_len = packed_batch
452
- audio_signal_list, feature_length_list = [], []
453
- target_len_list, targets_list = [], []
454
-
455
- max_raw_feat_len = max([x.shape[0] for x in audio_signal])
456
- max_target_len = max([x.shape[0] for x in targets])
457
- if max([len(feat.shape) for feat in audio_signal]) > 1:
458
- max_ch = max([feat.shape[1] for feat in audio_signal])
459
- else:
460
- max_ch = 1
461
- for feat, feat_len, tgt, segment_ct in batch:
462
- seq_len = tgt.shape[0]
463
- if len(feat.shape) > 1:
464
- pad_feat = (0, 0, 0, max_raw_feat_len - feat.shape[0])
465
- else:
466
- pad_feat = (0, max_raw_feat_len - feat.shape[0])
467
- if feat.shape[0] < feat_len:
468
- feat_len_pad = feat_len - feat.shape[0]
469
- feat = torch.nn.functional.pad(feat, (0, feat_len_pad))
470
- pad_tgt = (0, 0, 0, max_target_len - seq_len)
471
- padded_feat = torch.nn.functional.pad(feat, pad_feat)
472
- padded_tgt = torch.nn.functional.pad(tgt, pad_tgt)
473
- if max_ch > 1 and padded_feat.shape[1] < max_ch:
474
- feat_ch_pad = max_ch - padded_feat.shape[1]
475
- padded_feat = torch.nn.functional.pad(padded_feat, (0, feat_ch_pad))
476
- audio_signal_list.append(padded_feat)
477
- feature_length_list.append(feat_len.clone().detach())
478
- target_len_list.append(segment_ct.clone().detach())
479
- targets_list.append(padded_tgt)
480
- audio_signal = torch.stack(audio_signal_list)
481
- feature_length = torch.stack(feature_length_list)
482
- target_lens = torch.stack(target_len_list).squeeze(1)
483
- targets = torch.stack(targets_list)
484
- return audio_signal, feature_length, targets, target_lens
485
-
486
-
487
- class AudioToSpeechE2ESpkDiarDataset(_AudioToSpeechE2ESpkDiarDataset):
488
- """
489
- Dataset class for loading a JSON file containing paths to audio files,
490
- RTTM (Rich Transcription Time Marked) files, and the number of speakers.
491
- This class is designed for training or fine-tuning a speaker embedding
492
- extractor and diarization decoder simultaneously.
493
-
494
- The JSON manifest file should have entries in the following format:
495
-
496
- Example:
497
- {
498
- "audio_filepath": "/path/to/audio_0.wav",
499
- "num_speakers": 2,
500
- "rttm_filepath": "/path/to/diar_label_0.rttm"
501
- }
502
- ...
503
- {
504
- "audio_filepath": "/path/to/audio_n.wav",
505
- "num_speakers": 2,
506
- "rttm_filepath": "/path/to/diar_label_n.rttm"
507
- }
508
-
509
- Args:
510
- manifest_filepath (str):
511
- Path to the input manifest JSON file containing paths to audio and RTTM files.
512
- soft_label_thres (float):
513
- Threshold for assigning soft labels to segments based on RTTM file information.
514
- session_len_sec (float):
515
- Duration of each session (in seconds) for training or fine-tuning.
516
- num_spks (int):
517
- Number of speakers in the audio files.
518
- featurizer:
519
- Instance of a featurizer for generating features from the raw waveform.
520
- window_stride (float):
521
- Window stride (in seconds) for extracting acoustic features, used to calculate
522
- the number of feature frames.
523
- global_rank (int):
524
- Global rank of the current process (used for distributed training).
525
- soft_targets (bool):
526
- Whether or not to use soft targets during training.
527
-
528
- Methods:
529
- eesd_train_collate_fn(batch):
530
- Collates a batch of data for end-to-end speaker diarization training.
531
- """
532
-
533
- def __init__(
534
- self,
535
- *,
536
- manifest_filepath: str,
537
- soft_label_thres: float,
538
- session_len_sec: float,
539
- num_spks: int,
540
- featurizer,
541
- fb_featurizer,
542
- window_stride,
543
- global_rank: int,
544
- soft_targets: bool,
545
- device: str,
546
- ):
547
- super().__init__(
548
- manifest_filepath=manifest_filepath,
549
- soft_label_thres=soft_label_thres,
550
- session_len_sec=session_len_sec,
551
- num_spks=num_spks,
552
- featurizer=featurizer,
553
- fb_featurizer=fb_featurizer,
554
- window_stride=window_stride,
555
- global_rank=global_rank,
556
- soft_targets=soft_targets,
557
- device=device,
558
- )
559
-
560
- def eesd_train_collate_fn(self, batch):
561
- """Collate a batch of data for end-to-end speaker diarization training."""
562
- return _eesd_train_collate_fn(self, batch)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/collections/asr/data/audio_to_diar_label_lhotse.py DELETED
@@ -1,114 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- from typing import Dict, Optional, Tuple
16
-
17
- import torch.utils.data
18
- from lhotse.dataset import AudioSamples
19
- from lhotse.dataset.collation import collate_matrices
20
-
21
- from nemo.collections.asr.parts.utils.asr_multispeaker_utils import (
22
- get_hidden_length_from_sample_length,
23
- speaker_to_target,
24
- )
25
- from nemo.core.neural_types import AudioSignal, LabelsType, LengthsType, NeuralType
26
- from nemo.utils import logging
27
-
28
-
29
- class LhotseAudioToSpeechE2ESpkDiarDataset(torch.utils.data.Dataset):
30
- """
31
- This dataset is a Lhotse version of diarization dataset in audio_to_diar_label.py.
32
- Unlike native NeMo datasets, Lhotse dataset defines only the mapping from
33
- a CutSet (meta-data) to a mini-batch with PyTorch tensors.
34
- Specifically, it performs tokenization, I/O, augmentation, and feature extraction (if any).
35
- Managing data, sampling, de-duplication across workers/nodes etc. is all handled
36
- by Lhotse samplers instead.
37
- """
38
-
39
- @property
40
- def output_types(self) -> Optional[Dict[str, NeuralType]]:
41
- """Define the output types of the dataset."""
42
- return {
43
- 'audio_signal': NeuralType(('B', 'T'), AudioSignal()),
44
- 'a_sig_length': NeuralType(tuple('B'), LengthsType()),
45
- 'targets': NeuralType(('B', 'T', 'N'), LabelsType()),
46
- 'target_length': NeuralType(tuple('B'), LengthsType()),
47
- 'sample_id': NeuralType(tuple('B'), LengthsType(), optional=True),
48
- }
49
-
50
- def __init__(self, cfg):
51
- super().__init__()
52
- self.load_audio = AudioSamples(fault_tolerant=True)
53
- self.cfg = cfg
54
- self.num_speakers = self.cfg.get('num_speakers', 4)
55
- self.num_sample_per_mel_frame = int(
56
- self.cfg.get('window_stride', 0.01) * self.cfg.get('sample_rate', 16000)
57
- ) # 160 samples for every 1ms by default
58
- self.num_mel_frame_per_target_frame = int(self.cfg.get('subsampling_factor', 8))
59
-
60
- def __getitem__(self, cuts) -> Tuple[torch.Tensor, ...]:
61
- # NOTE: This end-to-end diarization dataloader only loads the 1st ch of the audio file.
62
- # Process cuts in a single loop: convert to mono and compute speaker activities
63
- mono_cuts = []
64
- speaker_activities = []
65
- for cut in cuts:
66
- if cut.num_channels is not None and cut.num_channels > 1:
67
- logging.warning(
68
- "Multiple channels detected in cut '%s' (%d channels). "
69
- "Only the first channel will be used; remaining channels are ignored.",
70
- cut.id,
71
- cut.num_channels,
72
- )
73
- mono_cut = cut.with_channels(channels=[0])
74
- mono_cuts.append(mono_cut)
75
-
76
- speaker_activity = speaker_to_target(
77
- a_cut=mono_cut,
78
- num_speakers=self.num_speakers,
79
- num_sample_per_mel_frame=self.num_sample_per_mel_frame,
80
- num_mel_frame_per_asr_frame=self.num_mel_frame_per_target_frame,
81
- boundary_segments=True,
82
- )
83
- # This line prevents dimension mismatch error in the collate_matrices function.
84
- if speaker_activity.shape[1] > self.num_speakers:
85
- logging.warning(
86
- "Number of speakers in the target %s is greater than "
87
- "the maximum number of speakers %s. Truncating extra speakers. "
88
- "Set the `num_speakers` to higher value to avoid this warning.",
89
- speaker_activity.shape[1],
90
- self.num_speakers,
91
- )
92
- speaker_activity = speaker_activity[:, : self.num_speakers]
93
- speaker_activities.append(speaker_activity)
94
-
95
- cuts = type(cuts).from_cuts(mono_cuts)
96
- audio, audio_lens, cuts = self.load_audio(cuts)
97
- targets = collate_matrices(speaker_activities).to(audio.dtype) # (B, T, N)
98
-
99
- if targets.shape[2] > self.num_speakers:
100
- targets = targets[:, :, : self.num_speakers]
101
- elif targets.shape[2] < self.num_speakers:
102
- targets = torch.nn.functional.pad(
103
- targets, (0, self.num_speakers - targets.shape[2]), mode='constant', value=0
104
- )
105
-
106
- target_lens_list = []
107
- for audio_len in audio_lens:
108
- target_fr_len = get_hidden_length_from_sample_length(
109
- audio_len, self.num_sample_per_mel_frame, self.num_mel_frame_per_target_frame
110
- )
111
- target_lens_list.append(target_fr_len)
112
- target_lens = torch.tensor(target_lens_list)
113
-
114
- return audio, audio_lens, targets, target_lens
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/collections/asr/data/audio_to_eou_label_lhotse.py DELETED
@@ -1,524 +0,0 @@
1
- # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import math
16
- from dataclasses import dataclass
17
- from typing import Dict, List, Optional
18
-
19
- import numpy as np
20
- import torch.utils.data
21
- from lhotse.cut import Cut, CutSet, MixedCut
22
- from lhotse.dataset import AudioSamples
23
- from lhotse.dataset.collation import collate_vectors
24
- from omegaconf import DictConfig, OmegaConf
25
-
26
- from nemo.collections.asr.parts.preprocessing.perturb import process_augmentations
27
- from nemo.collections.asr.parts.preprocessing.segment import AudioSegment
28
- from nemo.collections.common.tokenizers.aggregate_tokenizer import TokenizerWrapper
29
- from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
30
- from nemo.core.neural_types import AudioSignal, LabelsType, LengthsType, NeuralType
31
- from nemo.utils import logging
32
-
33
- NON_SPEECH_LABEL = 0
34
- SPEECH_LABEL = 1
35
- EOU_LABEL = 2
36
- EOB_LABEL = 3
37
- EOU_STRING = '<EOU>'
38
- EOB_STRING = '<EOB>'
39
-
40
- # These augmentations are not supported yet, since they will need to change the SOU/EOU timestamps
41
- EOU_INVALID_AUGMENTATIONS = ['random_segment', 'speed', 'time_stretch']
42
-
43
-
44
- @dataclass
45
- class AudioToTextEOUBatch:
46
- """
47
- Data class for ASR-EOU batch.
48
- """
49
-
50
- sample_ids: List | None = None
51
- audio_filepaths: List | None = None
52
- audio_signal: torch.Tensor | None = None
53
- audio_lengths: torch.Tensor | None = None
54
- text_tokens: torch.Tensor | None = None
55
- text_token_lengths: torch.Tensor | None = None
56
- eou_targets: torch.Tensor | None = None
57
- eou_target_lengths: torch.Tensor | None = None
58
-
59
-
60
- @dataclass
61
- class RandomPaddingConfig:
62
- prob: float = 0.9 # probability of applying padding
63
- min_pad_duration: float = 0.0 # minimum duration of pre/post padding in seconds
64
- max_pad_duration: float = 5.0 # maximum duration of pre/post padding in seconds
65
- max_total_duration: float = 40.0 # maximum total duration of the padded audio in seconds
66
- min_pre_pad_duration: float = 0.0 # minimum duration of pre-padding in seconds
67
- min_post_pad_duration: float = 2.0 # minimum duration of post-padding in seconds
68
- pad_distribution: str = 'uniform' # distribution of padding duration, 'uniform' or 'normal' or 'constant'
69
- normal_mean: float = 0.5 # mean of normal distribution for padding duration
70
- normal_std: float = 2.0 # standard deviation of normal distribution for padding duration
71
- pre_pad_duration: float = 0.2 # amount of left-padding when pad_distribution='constant'
72
- post_pad_duration: float = 3.0 # amount of right-padding when pad_distribution='constant'
73
-
74
-
75
- class LhotseSpeechToTextBpeEOUDataset(torch.utils.data.Dataset):
76
- """
77
- This dataset processes the audio data and the corresponding text data to generate the ASR labels,
78
- along with EOU labels for each frame. The audios used in this dataset should only contain speech with
79
- NO precedding or following silence. The dataset also randomly pads non-speech frames before and after
80
- the audio signal for training EOU prediction task.
81
-
82
- To generate EOU labels, the last frame of utterance will be marked as "end of utterance" (labeled as `2`),
83
- while if it's a backchannel utterance it'll be marked asd "end of backchannel" (labeled as `3`).
84
- The rest of the speech frames will be marked as "speech" (labeled as `1`).
85
- The padded non-speech signals will be marked as "non-speech" (labeled as 0).
86
-
87
- Args:
88
- cfg: DictConfig object container following keys, usually taken from your `model.train_ds`
89
- or `model.validation_ds` config:
90
- ```
91
- sample_rate: # int, Sample rate of the audio signal
92
- window_stride: # float, Window stride for audio encoder
93
- subsampling_factor: # Subsampling factor for audio encoder
94
- random_padding: # Random padding configuration
95
- prob: 0.9 # probability of applying padding
96
- min_pad_duration: 0.5 # minimum duration of pre/post padding in seconds
97
- max_pad_duration: 2.0 # maximum duration of pre/post padding in seconds
98
- max_total_duration: 30.0 # maximum total duration of the padded audio in seconds
99
- pad_distribution: 'uniform' # distribution of padding duration, 'uniform' or 'normal' or 'constant'
100
- normal_mean: 0.5 # mean of normal distribution for padding duration
101
- normal_std: 2.0 # standard deviation of normal distribution for padding duration
102
- pre_pad_duration: 0.2 # amount of left-padding when pad_distribution='constant'
103
- post_pad_duration: 3.0 # amount of right-padding when pad_distribution='constant'
104
- ```
105
-
106
- Returns:
107
- audio: torch.Tensor of audio signal
108
- audio_lens: torch.Tensor of audio signal length
109
- text_tokens: torch.Tensor of text text_tokens
110
- text_token_lens: torch.Tensor of text token length
111
- eou_targets (optional): torch.Tensor of EOU labels
112
- eou_target_lens (optional): torch.Tensor of EOU label length
113
-
114
- The input manifest should be a jsonl file where each line is a python dictionary.
115
- Example manifest sample:
116
- {
117
- "audio_filepath": "/path/to/audio.wav",
118
- "offset": 0.0,
119
- "duration": 6.0,
120
- "sou_time": [0.3, 4.0],
121
- "eou_time": [1.3, 4.5],
122
- "utterances": ["Tell me a joke", "Ah-ha"],
123
- "is_backchannel": [False, True],
124
- }
125
-
126
- Padding logic:
127
- 0. Don't pad when `random_padding` is None or during validation/test
128
- 1. randomly draw a probability to decide whether to apply padding
129
- 2. if not padding or audio duration is longer than the maximum duration,
130
- 1) return the original audio and EOU labels
131
- 3. if apply padding,
132
- 1) get the max padding duration based on the maximum total duration and the audio duration
133
- 2) randomly draw a total padding duration based on the given distribution
134
- 3) randomly split the total padding duration into pre-padding and post-padding
135
- 4) randomly generate the non-speech signal (audio signal=0) for pre-padding and post-padding
136
- 5) concatenate the pre-padding, audio, and post-padding to get the padded audio signal
137
- 6) update the EOU labels accordingly
138
-
139
- """
140
-
141
- @property
142
- def output_types(self) -> Optional[Dict[str, NeuralType]]:
143
- """Define the output types of the dataset."""
144
- return {
145
- 'audio': NeuralType(('B', 'T'), AudioSignal()),
146
- 'audio_lens': NeuralType(tuple('B'), LengthsType()),
147
- 'eou_targets': NeuralType(('B', 'T'), LabelsType()),
148
- 'eou_target_lens': NeuralType(tuple('B'), LengthsType()),
149
- 'text_tokens': NeuralType(tuple('B', 'T'), LengthsType(), optional=True),
150
- 'text_token_lens': NeuralType(tuple('B'), LengthsType(), optional=True),
151
- }
152
-
153
- def __init__(self, cfg: DictConfig, tokenizer: TokenizerSpec, return_cuts: bool = False):
154
- super().__init__()
155
- self.cfg = cfg
156
- self.return_cuts = return_cuts
157
- self.eou_string = self.cfg.get('eou_string', EOU_STRING)
158
- self.eob_string = self.cfg.get('eob_string', EOB_STRING)
159
- if cfg.get('check_tokenizer', True):
160
- self._check_special_tokens(tokenizer)
161
-
162
- self.tokenizer = TokenizerWrapper(tokenizer)
163
- self.load_audio = AudioSamples(fault_tolerant=True)
164
- self.sample_rate = self.cfg.get('sample_rate', 16000)
165
- self.window_stride = self.cfg.get('window_stride', 0.01)
166
- self.num_sample_per_mel_frame = int(
167
- self.window_stride * self.sample_rate
168
- ) # 160 samples for every 1ms by default
169
- self.num_mel_frame_per_target_frame = int(self.cfg.get('subsampling_factor', 8))
170
- self.add_sep_before_eou = self.cfg.get('add_sep_before_eou', False)
171
- self.add_eou_to_text = self.cfg.get('add_eou_to_text', True)
172
- self.pad_eou_label_secs = self.cfg.get('pad_eou_label_secs', 0.0)
173
- self.padding_cfg = self.cfg.get('random_padding', None)
174
- if self.padding_cfg is not None:
175
- self.padding_cfg = OmegaConf.to_container(self.padding_cfg, resolve=True)
176
- self.padding_cfg = RandomPaddingConfig(**self.padding_cfg)
177
- self.ignore_eob_label = self.cfg.get('ignore_eob_label', False)
178
- self.augmentor = None
179
- if self.cfg.get('augmentor', None) is not None:
180
- augmentor = {}
181
- aug_cfg = OmegaConf.to_container(self.cfg.augmentor, resolve=True)
182
- for k, v in aug_cfg.items():
183
- if k in EOU_INVALID_AUGMENTATIONS:
184
- logging.warning(f"EOU dataset does not support {k} augmentation yet, skipping.")
185
- continue
186
- augmentor[k] = v
187
-
188
- if len(augmentor) > 0:
189
- logging.info(f"EOU dataset will apply augmentations: {augmentor}")
190
- self.augmentor = process_augmentations(augmentor)
191
-
192
- def _check_special_tokens(self, tokenizer: TokenizerSpec):
193
- """
194
- Check if the special tokens are in the tokenizer vocab.
195
- """
196
- special_tokens = set([self.eou_string, self.eob_string])
197
- vocab_size = tokenizer.vocab_size
198
- special_tokens_in_vocab = set([tokenizer.ids_to_text(vocab_size - 1), tokenizer.ids_to_text(vocab_size - 2)])
199
- if special_tokens != special_tokens_in_vocab:
200
- raise ValueError(
201
- f"Input special tokens {special_tokens} don't match with the tokenizer vocab {special_tokens_in_vocab}. "
202
- f"Please add them to tokenizer or change input `eou_string` and/or `eob_string` accordingly. "
203
- "Special tokens should be added as the last two tokens in the new tokenizer. "
204
- "Please refer to scripts/asr_end_of_utterance/tokenizers/add_special_tokens_to_sentencepiece.py for details."
205
- )
206
-
207
- def __getitem__(self, cuts: CutSet) -> AudioToTextEOUBatch:
208
- audio, audio_lens, cuts = self.load_audio(cuts)
209
- audio_signals = []
210
- audio_lengths = []
211
- eou_targets = []
212
- text_tokens = []
213
- sample_ids = []
214
- audio_filepaths = []
215
-
216
- for i in range(len(cuts)):
217
- c = cuts[i]
218
- if isinstance(c, MixedCut):
219
- c = c.first_non_padding_cut
220
-
221
- sample_ids.append(c.id)
222
- audio_filepaths.append(c.recording.sources[0].source)
223
-
224
- audio_i = audio[i]
225
- audio_len_i = audio_lens[i]
226
-
227
- # Get EOU labels and text tokens
228
- eou_targets_i = self._get_frame_labels(c, audio_len_i)
229
- text_tokens_i = self._get_text_tokens(c)
230
-
231
- # Maybe apply random padding to both sides of the audio
232
- audio_i, audio_len_i, eou_targets_i = self._random_pad_audio(audio_i, audio_len_i, eou_targets_i)
233
-
234
- # Maybe apply augmentations to the audio signal after padding
235
- audio_i, audio_len_i = self._maybe_augment_audio(audio_i, audio_len_i)
236
-
237
- # Append the processed audio, EOU labels, and text tokens to the lists
238
- audio_signals.append(audio_i)
239
- audio_lengths.append(audio_len_i)
240
- eou_targets.append(eou_targets_i)
241
- text_tokens.append(text_tokens_i)
242
-
243
- audio_signals = collate_vectors(audio_signals, padding_value=0)
244
- audio_lengths = torch.tensor(audio_lengths, dtype=torch.long)
245
- eou_target_lens = torch.tensor([t.size(0) for t in eou_targets], dtype=torch.long)
246
- eou_targets = collate_vectors(eou_targets, padding_value=0)
247
- text_token_lens = torch.tensor([t.size(0) for t in text_tokens], dtype=torch.long)
248
- text_tokens = collate_vectors(text_tokens, padding_value=0)
249
-
250
- if self.return_cuts:
251
- return audio_signals, audio_lengths, cuts
252
-
253
- return AudioToTextEOUBatch(
254
- sample_ids=sample_ids,
255
- audio_filepaths=audio_filepaths,
256
- audio_signal=audio_signals,
257
- audio_lengths=audio_lengths,
258
- text_tokens=text_tokens,
259
- text_token_lengths=text_token_lens,
260
- eou_targets=eou_targets,
261
- eou_target_lengths=eou_target_lens,
262
- )
263
-
264
- def _audio_len_to_frame_len(self, num_samples: int):
265
- """
266
- Convert the raw audio length to the number of frames after audio encoder.
267
-
268
- self.num_sample_per_mel_frame = int(
269
- self.cfg.get('window_stride', 0.01) * self.cfg.get('sample_rate', 16000)
270
- ) # 160 samples for every 1ms by default
271
- self.num_mel_frame_per_target_frame = int(self.cfg.get('subsampling_factor', 8))
272
- """
273
- mel_frame_count = math.ceil((num_samples + 1) / self.num_sample_per_mel_frame)
274
- hidden_length = math.ceil(mel_frame_count / self.num_mel_frame_per_target_frame)
275
- return hidden_length
276
-
277
- def _repeat_eou_labels(self, eou_targets: torch.Tensor) -> torch.Tensor:
278
- """
279
- Repeat EOU labels according to self.pad_eou_label_secs
280
- Args:
281
- eou_targets: torch.Tensor of EOU labels, shape [T]
282
- Returns:
283
- eou_targets: torch.Tensor of padded EOU labels, shape [T]
284
- """
285
- if not self.pad_eou_label_secs or self.pad_eou_label_secs <= 0:
286
- return eou_targets
287
-
288
- eou_len = self._audio_len_to_frame_len(int(self.pad_eou_label_secs * self.sample_rate))
289
-
290
- i = 0
291
- while i < eou_targets.size(0):
292
- if eou_targets[i] == EOU_LABEL or eou_targets[i] == EOB_LABEL:
293
- # repeat the label for the next eou_len samples
294
- start = i
295
- end = min(i + eou_len, eou_targets.size(0))
296
- j = start + 1
297
- while j < end:
298
- if eou_targets[j] != NON_SPEECH_LABEL:
299
- # do not overwrite the label if it's not non-speech
300
- break
301
- j += 1
302
- end = min(j, end)
303
- # fill the non-speech label with the current EOU/EOB label
304
- eou_targets[start:end] = eou_targets[i]
305
- i = end
306
- else:
307
- i += 1
308
- return eou_targets
309
-
310
- def _get_frame_labels(self, cut: Cut, num_samples: int):
311
- """
312
- Get the frame-level EOU labels for a single audio segment.
313
- Args:
314
- cut: Cut object
315
- num_samples: int, the number of samples in the audio segment
316
- Returns:
317
- eou_targets: torch.Tensor of EOU labels, shape [T]
318
- """
319
- hidden_length = self._audio_len_to_frame_len(num_samples)
320
- if not "sou_time" in cut.custom or not "eou_time" in cut.custom:
321
- # assume only single speech segment
322
- text = cut.supervisions[0].text
323
- if not text:
324
- # skip empty utterances
325
- return torch.zeros(hidden_length).long()
326
- eou_targets = torch.ones(hidden_length).long() # speech label
327
- eou_targets[-1] = EOU_LABEL # by default it's end of utterance
328
- if cut.has_custom("is_backchannel") and cut.custom["is_backchannel"] and not self.ignore_eob_label:
329
- eou_targets[-1] = EOB_LABEL # end of backchannel
330
- return eou_targets
331
-
332
- sou_time = cut.custom["sou_time"]
333
- eou_time = cut.custom["eou_time"]
334
- if not isinstance(sou_time, list):
335
- sou_time = [sou_time]
336
- if not isinstance(eou_time, list):
337
- eou_time = [eou_time]
338
-
339
- assert len(sou_time) == len(
340
- eou_time
341
- ), f"Number of SOU time and EOU time do not match: SOU ({sou_time}) vs EOU ({eou_time})"
342
-
343
- if cut.has_custom("is_backchannel"):
344
- is_backchannel = cut.custom["is_backchannel"]
345
- if not isinstance(is_backchannel, list):
346
- is_backchannel = [is_backchannel]
347
- assert len(sou_time) == len(
348
- is_backchannel
349
- ), f"Number of SOU and backchannel do not match: SOU ({len(sou_time)}) vs backchannel ({len(is_backchannel)})"
350
- else:
351
- is_backchannel = [False] * len(sou_time)
352
-
353
- eou_targets = torch.zeros(hidden_length).long()
354
- for i in range(len(sou_time)):
355
- if sou_time[i] is None or eou_time[i] is None or sou_time[i] < 0 or eou_time[i] < 0:
356
- # skip empty utterances
357
- continue
358
- sou_idx = self._audio_len_to_frame_len(int((sou_time[i] - cut.start) * self.sample_rate))
359
- seg_len_in_secs = eou_time[i] - sou_time[i]
360
- seg_len = self._audio_len_to_frame_len(int(seg_len_in_secs * self.sample_rate))
361
- eou_targets[sou_idx : sou_idx + seg_len] = SPEECH_LABEL
362
- last_idx = min(sou_idx + seg_len - 1, hidden_length - 1)
363
- if is_backchannel[i] and not self.ignore_eob_label:
364
- eou_targets[last_idx] = EOB_LABEL # end of backchannel
365
- else:
366
- eou_targets[last_idx] = EOU_LABEL # end of utterance
367
-
368
- return eou_targets
369
-
370
- def _get_text_tokens(self, cut: Cut):
371
- """
372
- Add EOU labels to the text and get the text tokens for a single audio segment.
373
- Args:
374
- cut: Cut object
375
- Returns:
376
- text_tokens: torch.Tensor of text tokens, shape [T]
377
- """
378
- if not cut.has_custom("sou_time") or not cut.has_custom("eou_time") or not cut.has_custom("utterances"):
379
- # assume only single speech segment
380
- utterances = [cut.supervisions[0].text]
381
- else:
382
- utterances = cut.custom["utterances"]
383
-
384
- if not isinstance(utterances, list):
385
- utterances = [utterances]
386
-
387
- if cut.has_custom("is_backchannel"):
388
- is_backchannel = cut.custom["is_backchannel"]
389
- if not isinstance(is_backchannel, list):
390
- is_backchannel = [is_backchannel]
391
- assert len(utterances) == len(
392
- is_backchannel
393
- ), f"Number of utterances and backchannel do not match: utterance ({len(utterances)}) vs backchannel ({len(is_backchannel)})"
394
- else:
395
- is_backchannel = [False] * len(utterances)
396
-
397
- total_text = ""
398
- for i, text in enumerate(utterances):
399
- if not text:
400
- # skip empty utterances
401
- continue
402
- if self.add_eou_to_text:
403
- eou_string = self.eob_string if is_backchannel[i] and not self.ignore_eob_label else self.eou_string
404
- if self.add_sep_before_eou:
405
- eou_string = " " + eou_string
406
- else:
407
- eou_string = ""
408
- total_text += text + eou_string + " "
409
- total_text = total_text.strip()
410
- return torch.as_tensor(self.tokenizer(total_text))
411
-
412
- def _random_pad_audio(self, audio: torch.Tensor, audio_len: torch.Tensor, eou_targets: torch.Tensor):
413
- """
414
- Randomly pad the audio signal with non-speech signal before and after the audio signal.
415
- Args:
416
- audio: torch.Tensor of a single audio signal, shape [T]
417
- audio_len: torch.Tensor of audio signal length, shape [1]
418
- eou_targets: torch.Tensor of EOU labels, shape [T]
419
- Returns:
420
- padded_audio: torch.Tensor of padded audio signal, shape [T+padding]
421
- padded_audio_len: torch.Tensor of padded audio signal length, shape [1]
422
- padded_eou_targets: torch.Tensor of padded EOU labels, shape [T+padding]
423
- padded_eou_targets_len: torch.Tensor of padded EOU label length, shape [1]
424
- """
425
- p = np.random.rand()
426
- if self.padding_cfg is None or p > self.padding_cfg.prob:
427
- # don't apply padding
428
- eou_targets = self._repeat_eou_labels(eou_targets)
429
- return audio, audio_len, eou_targets
430
-
431
- duration = audio_len.item() / self.cfg.sample_rate
432
- # if already longer than the maximum duration, return the original audio
433
- if duration >= self.padding_cfg.max_total_duration:
434
- return audio, audio_len, eou_targets
435
-
436
- # apply padding
437
- audio = audio[:audio_len]
438
-
439
- self.padding_cfg.min_pre_pad_duration = max(
440
- self.padding_cfg.min_pre_pad_duration, self.padding_cfg.min_pad_duration
441
- )
442
- self.padding_cfg.min_post_pad_duration = max(
443
- self.padding_cfg.min_post_pad_duration, self.padding_cfg.min_pad_duration
444
- )
445
-
446
- max_padding_duration = max(0, self.padding_cfg.max_total_duration - duration)
447
- if max_padding_duration <= self.padding_cfg.min_pre_pad_duration + self.padding_cfg.min_post_pad_duration:
448
- min_padding_duration = 0
449
- else:
450
- min_padding_duration = self.padding_cfg.min_pre_pad_duration + self.padding_cfg.min_post_pad_duration
451
-
452
- pre_padding_duration = None
453
- post_padding_duration = None
454
-
455
- if self.padding_cfg.pad_distribution == 'uniform':
456
- total_padding_duration = np.random.uniform(min_padding_duration, max_padding_duration)
457
- elif self.padding_cfg.pad_distribution == 'normal':
458
- total_padding_duration = np.random.normal(self.padding_cfg.normal_mean, self.padding_cfg.normal_std)
459
- total_padding_duration = max(min_padding_duration, min(max_padding_duration, total_padding_duration))
460
- elif self.padding_cfg.pad_distribution == 'constant':
461
- pass
462
- else:
463
- raise ValueError(f"Unknown padding distribution: {self.padding_cfg.pad_distribution}")
464
-
465
- if self.padding_cfg.pad_distribution == 'constant':
466
- pre_padding_duration = self.padding_cfg.pre_pad_duration
467
- post_padding_duration = self.padding_cfg.post_pad_duration
468
- elif min_padding_duration == 0:
469
- pre_padding_duration = total_padding_duration / 2
470
- post_padding_duration = total_padding_duration / 2
471
- else:
472
- post_padding_duration = np.random.uniform(
473
- self.padding_cfg.min_post_pad_duration, total_padding_duration - self.padding_cfg.min_pre_pad_duration
474
- )
475
- pre_padding_duration = total_padding_duration - post_padding_duration
476
-
477
- if self.padding_cfg.max_pad_duration is not None:
478
- pre_padding_duration = min(pre_padding_duration, self.padding_cfg.max_pad_duration)
479
- post_padding_duration = min(post_padding_duration, self.padding_cfg.max_pad_duration)
480
-
481
- pre_padding_len = math.ceil(pre_padding_duration * self.cfg.sample_rate)
482
- post_padding_len = math.ceil(post_padding_duration * self.cfg.sample_rate)
483
-
484
- # pad the audio signal
485
- pre_padding = torch.zeros(pre_padding_len, dtype=audio.dtype)
486
- post_padding = torch.zeros(post_padding_len, dtype=audio.dtype)
487
- padded_audio = torch.cat((pre_padding, audio, post_padding), dim=0)
488
- padded_audio_len = audio_len + pre_padding_len + post_padding_len
489
-
490
- # pad the EOU labels
491
- pre_padding_eou_len = self._audio_len_to_frame_len(pre_padding_len)
492
- post_padding_eou_len = self._audio_len_to_frame_len(post_padding_len)
493
- pre_padding_eou = torch.zeros(pre_padding_eou_len, dtype=eou_targets.dtype)
494
- post_padding_eou = torch.zeros(post_padding_eou_len, dtype=eou_targets.dtype)
495
- padded_eou_targets = torch.cat((pre_padding_eou, eou_targets, post_padding_eou), dim=0)
496
- padded_eou_targets = self._repeat_eou_labels(padded_eou_targets)
497
- return padded_audio, padded_audio_len, padded_eou_targets
498
-
499
- def _maybe_augment_audio(self, audio: torch.Tensor, audio_len: torch.Tensor):
500
- """
501
- Apply augmentation to the audio signal if augmentor is provided.
502
- Args:
503
- audio: torch.Tensor of a single audio signal, shape [T]
504
- audio_len: torch.Tensor of audio signal length, shape [1]
505
- Returns:
506
- augmented_audio: torch.Tensor of augmented audio signal, shape [T]
507
- augmented_audio_len: torch.Tensor of augmented audio signal length, shape [1]
508
- """
509
- if self.augmentor is None:
510
- return audio, audio_len
511
-
512
- # Cast to AudioSegment
513
- audio_segment = AudioSegment(
514
- samples=audio[:audio_len].numpy(),
515
- sample_rate=self.sample_rate,
516
- offset=0,
517
- duration=audio_len.item() / self.sample_rate,
518
- )
519
- # Apply augmentation
520
- self.augmentor.perturb(audio_segment)
521
- audio = torch.from_numpy(audio_segment.samples).float()
522
- audio_len = audio.size(0)
523
-
524
- return audio, audio_len
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/collections/asr/data/audio_to_label.py DELETED
@@ -1,1422 +0,0 @@
1
- # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- import io
15
- import os
16
- from typing import Dict, List, Optional, Union
17
-
18
- import torch
19
-
20
- from nemo.collections.asr.data.audio_to_text import cache_datastore_manifests, expand_sharded_filepaths
21
- from nemo.collections.asr.parts.preprocessing.features import WaveformFeaturizer
22
- from nemo.collections.asr.parts.preprocessing.segment import available_formats as valid_sf_formats
23
- from nemo.collections.common.parts.preprocessing import collections
24
- from nemo.core.classes import Dataset, IterableDataset
25
- from nemo.core.neural_types import AudioSignal, LabelsType, LengthsType, NeuralType, RegressionValuesType
26
- from nemo.utils import logging
27
- from nemo.utils import webdataset as wds
28
- from nemo.utils.distributed import webdataset_split_by_workers
29
-
30
- # List of valid file formats (prioritized by order of importance)
31
- VALID_FILE_FORMATS = ';'.join(['wav', 'mp3', 'flac', 'opus'] + [fmt.lower() for fmt in valid_sf_formats.keys()])
32
-
33
-
34
- def repeat_signal(signal: torch.Tensor, sig_len: int, required_length: int) -> torch.Tensor:
35
- """repeat signal to make short signal to have required_length
36
- Args:
37
- signal (Tensor): input signal
38
- sig_len (int): length of input signal
39
- required_length (int): length of generated signal
40
- Returns:
41
- signal (Tensor): generated signal of required_length by repeating itself.
42
- """
43
- sub: torch.Tensor = torch.tensor([])
44
- repeat = int(required_length // sig_len)
45
- rem = int(required_length % sig_len)
46
- sub: torch.Tensor = torch.tensor([])
47
- rep_sig: torch.Tensor = torch.cat(repeat * [signal])
48
- if rem > 0:
49
- sub = signal[-rem:]
50
- signal = torch.cat((rep_sig, sub))
51
- else:
52
- signal = rep_sig
53
- return signal
54
-
55
-
56
- def normalize(signal):
57
- """normalize signal
58
- Args:
59
- signal(FloatTensor): signal to be normalized.
60
- """
61
- signal_minusmean = signal - signal.mean()
62
- return signal_minusmean / signal_minusmean.abs().max()
63
-
64
-
65
- def count_occurence(manifest_file_id):
66
- """Count number of wav files in Dict manifest_file_id. Use for _TarredAudioToLabelDataset.
67
- Args:
68
- manifest_file_id (Dict): Dict of files and their corresponding id. {'A-sub0' : 1, ..., 'S-sub10':100}
69
- Returns:
70
- count (Dict): Dict of wav files {'A' : 2, ..., 'S':10}
71
- """
72
- count = dict()
73
- for i in manifest_file_id:
74
- audio_filename = i.split("-sub")[0]
75
- count[audio_filename] = count.get(audio_filename, 0) + 1
76
- return count
77
-
78
-
79
- def _speech_collate_fn(batch, pad_id):
80
- """collate batch of audio sig, audio len, tokens, tokens len
81
- Args:
82
- batch (Optional[FloatTensor], Optional[LongTensor], LongTensor,
83
- LongTensor): A tuple of tuples of signal, signal lengths,
84
- encoded tokens, and encoded tokens length. This collate func
85
- assumes the signals are 1d torch tensors (i.e. mono audio).
86
- """
87
- _, audio_lengths, _, tokens_lengths = zip(*batch)
88
- max_audio_len = 0
89
- has_audio = audio_lengths[0] is not None
90
- if has_audio:
91
- max_audio_len = max(audio_lengths).item()
92
- max_tokens_len = max(tokens_lengths).item()
93
-
94
- audio_signal, tokens = [], []
95
- for sig, sig_len, tokens_i, tokens_i_len in batch:
96
- if has_audio:
97
- sig_len = sig_len.item()
98
- if sig_len < max_audio_len:
99
- pad = (0, max_audio_len - sig_len)
100
- sig = torch.nn.functional.pad(sig, pad)
101
- audio_signal.append(sig)
102
- tokens_i_len = tokens_i_len.item()
103
- if tokens_i_len < max_tokens_len:
104
- pad = (0, max_tokens_len - tokens_i_len)
105
- tokens_i = torch.nn.functional.pad(tokens_i, pad, value=pad_id)
106
- tokens.append(tokens_i)
107
-
108
- if has_audio:
109
- audio_signal = torch.stack(audio_signal)
110
- audio_lengths = torch.stack(audio_lengths)
111
- else:
112
- audio_signal, audio_lengths = None, None
113
- tokens = torch.stack(tokens)
114
- tokens_lengths = torch.stack(tokens_lengths)
115
-
116
- return audio_signal, audio_lengths, tokens, tokens_lengths
117
-
118
-
119
- def _fixed_seq_collate_fn(self, batch):
120
- """collate batch of audio sig, audio len, tokens, tokens len
121
- Args:
122
- batch (Optional[FloatTensor], Optional[LongTensor], LongTensor,
123
- LongTensor): A tuple of tuples of signal, signal lengths,
124
- encoded tokens, and encoded tokens length. This collate func
125
- assumes the signals are 1d torch tensors (i.e. mono audio).
126
- """
127
- _, audio_lengths, _, tokens_lengths = zip(*batch)
128
-
129
- has_audio = audio_lengths[0] is not None
130
- fixed_length = int(max(audio_lengths))
131
-
132
- audio_signal, tokens, new_audio_lengths = [], [], []
133
- for sig, sig_len, tokens_i, _ in batch:
134
- if has_audio:
135
- sig_len = sig_len.item()
136
- chunck_len = sig_len - fixed_length
137
-
138
- if chunck_len < 0:
139
- repeat = fixed_length // sig_len
140
- rem = fixed_length % sig_len
141
- sub = sig[-rem:] if rem > 0 else torch.tensor([])
142
- rep_sig = torch.cat(repeat * [sig])
143
- sig = torch.cat((rep_sig, sub))
144
- new_audio_lengths.append(torch.tensor(fixed_length))
145
-
146
- audio_signal.append(sig)
147
-
148
- tokens.append(tokens_i)
149
-
150
- if has_audio:
151
- audio_signal = torch.stack(audio_signal)
152
- audio_lengths = torch.stack(new_audio_lengths)
153
- else:
154
- audio_signal, audio_lengths = None, None
155
- tokens = torch.stack(tokens)
156
- tokens_lengths = torch.stack(tokens_lengths)
157
-
158
- return audio_signal, audio_lengths, tokens, tokens_lengths
159
-
160
-
161
- def _vad_frame_seq_collate_fn(self, batch):
162
- """collate batch of audio sig, audio len, tokens, tokens len
163
- Args:
164
- batch (Optional[FloatTensor], Optional[LongTensor], LongTensor,
165
- LongTensor): A tuple of tuples of signal, signal lengths,
166
- encoded tokens, and encoded tokens length. This collate func
167
- assumes the signals are 1d torch tensors (i.e. mono audio).
168
- batch size equals to 1.
169
- """
170
- slice_length = int(self.featurizer.sample_rate * self.window_length_in_sec)
171
- _, audio_lengths, _, tokens_lengths = zip(*batch)
172
- slice_length = int(min(slice_length, max(audio_lengths)))
173
- shift = int(self.featurizer.sample_rate * self.shift_length_in_sec)
174
- has_audio = audio_lengths[0] is not None
175
-
176
- audio_signal, num_slices, tokens, audio_lengths = [], [], [], []
177
-
178
- append_len_start = slice_length // 2
179
- append_len_end = slice_length - slice_length // 2
180
- for sig, sig_len, tokens_i, _ in batch:
181
- if self.normalize_audio:
182
- sig = normalize(sig)
183
- start = torch.zeros(append_len_start)
184
- end = torch.zeros(append_len_end)
185
- sig = torch.cat((start, sig, end))
186
- sig_len += slice_length
187
-
188
- if has_audio:
189
- slices = torch.div(sig_len - slice_length, shift, rounding_mode='trunc')
190
- for slice_id in range(slices):
191
- start_idx = slice_id * shift
192
- end_idx = start_idx + slice_length
193
- signal = sig[start_idx:end_idx]
194
- audio_signal.append(signal)
195
-
196
- num_slices.append(slices)
197
- tokens.extend([tokens_i] * slices)
198
- audio_lengths.extend([slice_length] * slices)
199
-
200
- if has_audio:
201
- audio_signal = torch.stack(audio_signal)
202
- audio_lengths = torch.tensor(audio_lengths)
203
- else:
204
- audio_signal, audio_lengths = None, None
205
-
206
- tokens = torch.stack(tokens)
207
- tokens_lengths = torch.tensor(num_slices)
208
- return audio_signal, audio_lengths, tokens, tokens_lengths
209
-
210
-
211
- class _AudioLabelDataset(Dataset):
212
- """
213
- Dataset that loads tensors via a json file containing paths to audio files,
214
- labels, and durations and offsets(in seconds). Each new line is a
215
- different sample. Example below:
216
- and their target labels. JSON files should be of the following format::
217
- {"audio_filepath": "/path/to/audio_wav_0.wav", "duration": time_in_sec_0, "label": \
218
- target_label_0, "offset": offset_in_sec_0}
219
- ...
220
- {"audio_filepath": "/path/to/audio_wav_n.wav", "duration": time_in_sec_n, "label": \
221
- target_label_n, "offset": offset_in_sec_n}
222
- Args:
223
- manifest_filepath (Union[str, List[str]]): Dataset parameter. Path to JSON containing data.
224
- labels (list): Dataset parameter. List of target classes that can be output by the speaker recognition model.
225
- featurizer
226
- min_duration (float): Dataset parameter. All training files which have a duration less than min_duration
227
- are dropped. Note: Duration is read from the manifest JSON.
228
- Defaults to 0.1.
229
- max_duration (float): Dataset parameter.
230
- All training files which have a duration more than max_duration
231
- are dropped. Note: Duration is read from the manifest JSON.
232
- Defaults to None.
233
- trim (bool): Whether to use trim silence from beginning and end of audio signal using librosa.effects.trim().
234
- Defaults to False.
235
- channel selector (Union[str, int, List[int]]): string denoting the downmix mode, an integer denoting the channel to be selected, or an iterable
236
- of integers denoting a subset of channels. Channel selector is using zero-based indexing.
237
- If set to `None`, the original signal will be used.
238
- """
239
-
240
- @property
241
- def output_types(self) -> Optional[Dict[str, NeuralType]]:
242
- """Returns definitions of module output ports."""
243
-
244
- output_types = {
245
- 'audio_signal': NeuralType(
246
- ('B', 'T'),
247
- (
248
- AudioSignal(freq=self._sample_rate)
249
- if self is not None and hasattr(self, '_sample_rate')
250
- else AudioSignal()
251
- ),
252
- ),
253
- 'a_sig_length': NeuralType(tuple('B'), LengthsType()),
254
- }
255
-
256
- if self.is_regression_task:
257
- output_types.update(
258
- {
259
- 'targets': NeuralType(tuple('B'), RegressionValuesType()),
260
- 'targets_length': NeuralType(tuple('B'), LengthsType()),
261
- }
262
- )
263
- else:
264
-
265
- output_types.update(
266
- {
267
- 'label': NeuralType(tuple('B'), LabelsType()),
268
- 'label_length': NeuralType(tuple('B'), LengthsType()),
269
- }
270
- )
271
-
272
- return output_types
273
-
274
- def __init__(
275
- self,
276
- *,
277
- manifest_filepath: Union[str, List[str]],
278
- labels: List[str],
279
- featurizer,
280
- min_duration: Optional[float] = 0.1,
281
- max_duration: Optional[float] = None,
282
- trim: bool = False,
283
- channel_selector: Union[str, int, List[int]] = None,
284
- is_regression_task: bool = False,
285
- cal_labels_occurrence: Optional[bool] = False,
286
- ):
287
- super().__init__()
288
- if isinstance(manifest_filepath, str):
289
- manifest_filepath = manifest_filepath.split(',')
290
- cache_datastore_manifests(manifest_filepaths=manifest_filepath, cache_audio=True)
291
- self.collection = collections.ASRSpeechLabel(
292
- manifests_files=manifest_filepath,
293
- min_duration=min_duration,
294
- max_duration=max_duration,
295
- is_regression_task=is_regression_task,
296
- cal_labels_occurrence=cal_labels_occurrence,
297
- )
298
-
299
- self.featurizer = featurizer
300
- self.trim = trim
301
- self.channel_selector = channel_selector
302
- self.is_regression_task = is_regression_task
303
-
304
- if not is_regression_task:
305
- self.labels = labels if labels else self.collection.uniq_labels
306
- self.num_classes = len(self.labels) if self.labels is not None else 1
307
- self.label2id, self.id2label = {}, {}
308
- self.id2occurrence, self.labels_occurrence = {}, []
309
-
310
- for label_id, label in enumerate(self.labels):
311
- self.label2id[label] = label_id
312
- self.id2label[label_id] = label
313
- if cal_labels_occurrence:
314
- self.id2occurrence[label_id] = self.collection.labels_occurrence[label]
315
-
316
- if cal_labels_occurrence:
317
- self.labels_occurrence = [self.id2occurrence[k] for k in sorted(self.id2occurrence)]
318
-
319
- for idx in range(len(self.labels[:5])):
320
- logging.debug(" label id {} and its mapped label {}".format(idx, self.id2label[idx]))
321
-
322
- else:
323
- self.labels = []
324
- self.num_classes = 1
325
-
326
- def __len__(self):
327
- return len(self.collection)
328
-
329
- def __getitem__(self, index):
330
- sample = self.collection[index]
331
-
332
- offset = sample.offset
333
-
334
- if offset is None:
335
- offset = 0
336
-
337
- features = self.featurizer.process(
338
- sample.audio_file,
339
- offset=offset,
340
- duration=sample.duration,
341
- trim=self.trim,
342
- channel_selector=self.channel_selector,
343
- )
344
- f, fl = features, torch.tensor(features.shape[0]).long()
345
-
346
- if not self.is_regression_task:
347
- t = torch.tensor(self.label2id[sample.label]).long()
348
- else:
349
- t = torch.tensor(sample.label).float()
350
-
351
- tl = torch.tensor(1).long() # For compatibility with collate_fn used later
352
-
353
- return f, fl, t, tl
354
-
355
-
356
- # Ported from https://github.com/NVIDIA/OpenSeq2Seq/blob/master/open_seq2seq/data/speech2text/speech_commands.py
357
- class AudioToClassificationLabelDataset(_AudioLabelDataset):
358
- """
359
- Dataset that loads tensors via a json file containing paths to audio
360
- files, command class, and durations (in seconds). Each new line is a
361
- different sample. Example below:
362
- {"audio_filepath": "/path/to/audio_wav_0.wav", "duration": time_in_sec_0, "label": \
363
- target_label_0, "offset": offset_in_sec_0}
364
- ...
365
- {"audio_filepath": "/path/to/audio_wav_n.wav", "duration": time_in_sec_n, "label": \
366
- target_label_n, "offset": offset_in_sec_n}
367
- Args:
368
- manifest_filepath (Union[str, List[str]]): Path to manifest json as described above. Can
369
- be comma-separated paths.
370
- labels (Optional[list]): String containing all the possible labels to map to
371
- if None then automatically picks from ASRSpeechLabel collection.
372
- featurizer: Initialized featurizer class that converts paths of
373
- audio to feature tensors
374
- max_duration: If audio exceeds this length, do not include in dataset
375
- min_duration: If audio is less than this length, do not include
376
- in dataset
377
- trim: Boolean flag whether to trim the audio
378
- """
379
-
380
- def _collate_fn(self, batch):
381
- return _speech_collate_fn(batch, pad_id=0)
382
-
383
-
384
- class AudioToSpeechLabelDataset(_AudioLabelDataset):
385
- """
386
- Dataset that loads tensors via a json file containing paths to audio
387
- files, command class, and durations (in seconds). Each new line is a
388
- different sample. Example below:
389
- {"audio_filepath": "/path/to/audio_wav_0.wav", "duration": time_in_sec_0, "label": \
390
- target_label_0, "offset": offset_in_sec_0}
391
- ...
392
- {"audio_filepath": "/path/to/audio_wav_n.wav", "duration": time_in_sec_n, "label": \
393
- target_label_n, "offset": offset_in_sec_n}
394
- Args:
395
- manifest_filepath (Union[str, List[str]]): Path to manifest json as described above. Can
396
- be comma-separated paths.
397
- labels (Optional[list]): String containing all the possible labels to map to
398
- if None then automatically picks from ASRSpeechLabel collection.
399
- min_duration (float): Dataset parameter.
400
- All training files which have a duration less than min_duration
401
- are dropped. Note: Duration is read from the manifest JSON.
402
- Defaults to 0.1.
403
- max_duration (float): Dataset parameter.
404
- All training files which have a duration more than max_duration
405
- are dropped. Note: Duration is read from the manifest JSON.
406
- Defaults to None.
407
- trim (bool): Whether to use trim silence from beginning and end
408
- of audio signal using librosa.effects.trim().
409
- Defaults to False.
410
- channel selector (Union[str, int, List[int]]): string denoting the downmix mode, an integer denoting the channel to be selected, or an iterable
411
- of integers denoting a subset of channels. Channel selector is using zero-based indexing.
412
- If set to `None`, the original signal will be used.
413
- window_length_in_sec (float): length of window/slice (in seconds)
414
- Use this for speaker recognition and VAD tasks.
415
- shift_length_in_sec (float): amount of shift of window for generating the frame for VAD task in a batch
416
- Use this for VAD task during inference.
417
- normalize_audio (bool): Whether to normalize audio signal.
418
- Defaults to False.
419
- is_regression_task (bool): Whether the dataset is for a regression task instead of classification.
420
- Defaults to False.
421
- cal_labels_occurrence (bool): Whether to calculate occurrence of labels
422
- Defaults to False.
423
- """
424
-
425
- def __init__(
426
- self,
427
- *,
428
- manifest_filepath: Union[str, List[str]],
429
- labels: List[str],
430
- featurizer,
431
- min_duration: Optional[float] = 0.1,
432
- max_duration: Optional[float] = None,
433
- trim: bool = False,
434
- channel_selector: Optional[Union[str, int, List[int]]] = None,
435
- window_length_in_sec: Optional[float] = 8,
436
- shift_length_in_sec: Optional[float] = 1,
437
- normalize_audio: bool = False,
438
- is_regression_task: bool = False,
439
- cal_labels_occurrence: Optional[bool] = False,
440
- ):
441
- self.window_length_in_sec = window_length_in_sec
442
- self.shift_length_in_sec = shift_length_in_sec
443
- self.normalize_audio = normalize_audio
444
-
445
- logging.debug("Window/slice length considered for collate func is {}".format(self.window_length_in_sec))
446
- logging.debug("Shift length considered for collate func is {}".format(self.shift_length_in_sec))
447
-
448
- super().__init__(
449
- manifest_filepath=manifest_filepath,
450
- labels=labels,
451
- featurizer=featurizer,
452
- min_duration=min_duration,
453
- max_duration=max_duration,
454
- trim=trim,
455
- channel_selector=channel_selector,
456
- is_regression_task=is_regression_task,
457
- cal_labels_occurrence=cal_labels_occurrence,
458
- )
459
-
460
- def fixed_seq_collate_fn(self, batch):
461
- return _fixed_seq_collate_fn(self, batch)
462
-
463
- def vad_frame_seq_collate_fn(self, batch):
464
- return _vad_frame_seq_collate_fn(self, batch)
465
-
466
-
467
- class _TarredAudioLabelDataset(IterableDataset):
468
- """
469
- A similar Dataset to the AudioLabelDataSet, but which loads tarred audio files.
470
-
471
- Accepts a single comma-separated JSON manifest file (in the same style as for the AudioToSpeechLabelDataset),
472
- as well as the path(s) to the tarball(s) containing the wav files. Each line of the manifest should
473
- contain the information for one audio file, including at least the label and name of the audio
474
- file within the tarball.
475
-
476
- Valid formats for the audio_tar_filepaths argument include:
477
- (1) a single string that can be brace-expanded, e.g. 'path/to/audio.tar' or 'path/to/audio_{1..100}.tar.gz', or
478
- (2) a list of file paths that will not be brace-expanded, e.g. ['audio_1.tar', 'audio_2.tar', ...].
479
-
480
- Note: For brace expansion in (1), there may be cases where `{x..y}` syntax cannot be used due to shell interference.
481
- This occurs most commonly inside SLURM scripts. Therefore we provide a few equivalent replacements.
482
- Supported opening braces - { <=> (, [, < and the special tag _OP_.
483
- Supported closing braces - } <=> ), ], > and the special tag _CL_.
484
- For SLURM based tasks, we suggest the use of the special tags for ease of use.
485
-
486
- See the documentation for more information about accepted data and input formats.
487
-
488
- If using multiple processes the number of shards should be divisible by the number of workers to ensure an
489
- even split among workers. If it is not divisible, logging will give a warning but training will proceed.
490
- In addition, if using mutiprocessing, each shard MUST HAVE THE SAME NUMBER OF ENTRIES after filtering
491
- is applied. We currently do not check for this, but your program may hang if the shards are uneven!
492
-
493
- Notice that a few arguments are different from the AudioLabelDataSet; for example, shuffle (bool) has been
494
- replaced by shuffle_n (int).
495
-
496
- Additionally, please note that the len() of this DataLayer is assumed to be the length of the manifest
497
- after filtering. An incorrect manifest length may lead to some DataLoader issues down the line.
498
-
499
- Args:
500
- audio_tar_filepaths: Either a list of audio tarball filepaths, or a
501
- string (can be brace-expandable).
502
- manifest_filepath (str): Path to the manifest.
503
- labels (list): Dataset parameter.
504
- List of target classes that can be output by the speaker recognition model.
505
- featurizer
506
- shuffle_n (int): How many samples to look ahead and load to be shuffled.
507
- See WebDataset documentation for more details.
508
- Defaults to 0.
509
- min_duration (float): Dataset parameter.
510
- All training files which have a duration less than min_duration
511
- are dropped. Note: Duration is read from the manifest JSON.
512
- Defaults to 0.1.
513
- max_duration (float): Dataset parameter.
514
- All training files which have a duration more than max_duration
515
- are dropped. Note: Duration is read from the manifest JSON.
516
- Defaults to None.
517
- trim(bool): Whether to use trim silence from beginning and end
518
- of audio signal using librosa.effects.trim().
519
- Defaults to False.
520
- window_length_in_sec (float): length of slice/window (in seconds) # Pass this only for speaker recognition and VAD task
521
- shift_length_in_sec (float): amount of shift of window for generating the frame for VAD task. in a batch # Pass this only for VAD task during inference.
522
- normalize_audio (bool): Whether to normalize audio signal. Defaults to False.
523
- shard_strategy (str): Tarred dataset shard distribution strategy chosen as a str value during ddp.
524
- - `scatter`: The default shard strategy applied by WebDataset, where each node gets
525
- a unique set of shards, which are permanently pre-allocated and never changed at runtime.
526
- - `replicate`: Optional shard strategy, where each node gets all of the set of shards
527
- available in the tarred dataset, which are permanently pre-allocated and never changed at runtime.
528
- The benefit of replication is that it allows each node to sample data points from the entire
529
- dataset independently of other nodes, and reduces dependence on the value of `shuffle_n`.
530
-
531
- .. warning::
532
- Replicated strategy allows every node to sample the entire set of available tarfiles,
533
- and therefore more than one node may sample the same tarfile, and even sample the same
534
- data points! As such, there is no assured guarantee that all samples in the dataset will be
535
- sampled at least once during 1 epoch. Scattered strategy, on the other hand, on specific
536
- occasions (when the number of shards is not divisible with ``world_size``), will not sample
537
- the entire dataset. For these reasons it is not advisable to use tarred datasets as validation
538
- or test datasets.
539
- global_rank (int): Worker rank, used for partitioning shards. Defaults to 0.
540
- world_size (int): Total number of processes, used for partitioning shards. Defaults to 0.
541
- is_regression_task (bool): Whether it is a regression task. Defualts to False.
542
- """
543
-
544
- def __init__(
545
- self,
546
- *,
547
- audio_tar_filepaths: Union[str, List[str]],
548
- manifest_filepath: Union[str, List[str]],
549
- labels: List[str],
550
- featurizer,
551
- shuffle_n: int = 0,
552
- min_duration: Optional[float] = 0.1,
553
- max_duration: Optional[float] = None,
554
- trim: bool = False,
555
- shard_strategy: str = "scatter",
556
- global_rank: int = 0,
557
- world_size: int = 0,
558
- is_regression_task: bool = False,
559
- ):
560
- cache_datastore_manifests(manifest_filepaths=manifest_filepath)
561
- self.collection = collections.ASRSpeechLabel(
562
- manifests_files=manifest_filepath,
563
- min_duration=min_duration,
564
- max_duration=max_duration,
565
- index_by_file_id=True, # Must set this so the manifest lines can be indexed by file ID
566
- )
567
-
568
- self.file_occurence = count_occurence(self.collection.mapping)
569
-
570
- self.featurizer = featurizer
571
- self.trim = trim
572
-
573
- self.labels = labels if labels else self.collection.uniq_labels
574
- self.num_classes = len(self.labels)
575
-
576
- self.label2id, self.id2label = {}, {}
577
- for label_id, label in enumerate(self.labels):
578
- self.label2id[label] = label_id
579
- self.id2label[label_id] = label
580
-
581
- for idx in range(len(self.labels[:5])):
582
- logging.debug(" label id {} and its mapped label {}".format(idx, self.id2label[idx]))
583
-
584
- audio_tar_filepaths = expand_sharded_filepaths(
585
- sharded_filepaths=audio_tar_filepaths,
586
- shard_strategy=shard_strategy,
587
- world_size=world_size,
588
- global_rank=global_rank,
589
- )
590
-
591
- # Put together WebDataset
592
- self._dataset = wds.DataPipeline(
593
- wds.SimpleShardList(urls=audio_tar_filepaths),
594
- webdataset_split_by_workers,
595
- wds.shuffle(shuffle_n),
596
- wds.tarfile_to_samples(),
597
- wds.rename(audio=VALID_FILE_FORMATS, key='__key__'),
598
- wds.to_tuple('audio', 'key'),
599
- self._filter,
600
- wds.map(self._build_sample),
601
- )
602
-
603
- def _filter(self, iterator):
604
- """This function is used to remove samples that have been filtered out by ASRSpeechLabel already.
605
- Otherwise, we would get a KeyError as _build_sample attempts to find the manifest entry for a sample
606
- that was filtered out (e.g. for duration).
607
- Note that if using multi-GPU training, filtering may lead to an imbalance in samples in each shard,
608
- which may make your code hang as one process will finish before the other.
609
- """
610
-
611
- class TarredAudioFilter:
612
- def __init__(self, collection, file_occurence):
613
- self.iterator = iterator
614
- self.collection = collection
615
- self.file_occurence = file_occurence
616
- self._iterable = self._internal_generator()
617
-
618
- def __iter__(self):
619
- self._iterable = self._internal_generator()
620
- return self
621
-
622
- def __next__(self):
623
- try:
624
- values = next(self._iterable)
625
- except StopIteration:
626
- # reset generator
627
- self._iterable = self._internal_generator()
628
- values = next(self._iterable)
629
-
630
- return values
631
-
632
- def _internal_generator(self):
633
- """
634
- WebDataset requires an Iterator, but we require an iterable that yields 1-or-more
635
- values per value inside self.iterator.
636
-
637
- Therefore wrap the iterator with a generator function that will yield 1-or-more
638
- values per sample in the iterator.
639
- """
640
- for _, tup in enumerate(self.iterator):
641
- audio_bytes, audio_filename = tup
642
-
643
- file_id, _ = os.path.splitext(os.path.basename(audio_filename))
644
- if audio_filename in self.file_occurence:
645
- for j in range(0, self.file_occurence[file_id]):
646
- if j == 0:
647
- audio_filename = file_id
648
- else:
649
- audio_filename = file_id + "-sub" + str(j)
650
- yield audio_bytes, audio_filename
651
-
652
- return TarredAudioFilter(self.collection, self.file_occurence)
653
-
654
- def _build_sample(self, tup):
655
- """Builds the training sample by combining the data from the WebDataset with the manifest info."""
656
- audio_bytes, audio_filename = tup
657
- # Grab manifest entry from self.collection
658
- file_id, _ = os.path.splitext(os.path.basename(audio_filename))
659
-
660
- manifest_idx = self.collection.mapping[file_id]
661
- manifest_entry = self.collection[manifest_idx]
662
-
663
- offset = manifest_entry.offset
664
- if offset is None:
665
- offset = 0
666
-
667
- # Convert audio bytes to IO stream for processing (for SoundFile to read)
668
- audio_filestream = io.BytesIO(audio_bytes)
669
- features = self.featurizer.process(
670
- audio_filestream,
671
- offset=offset,
672
- duration=manifest_entry.duration,
673
- trim=self.trim,
674
- )
675
-
676
- audio_filestream.close()
677
-
678
- # Audio features
679
- f, fl = features, torch.tensor(features.shape[0]).long()
680
-
681
- t = self.label2id[manifest_entry.label]
682
- tl = 1 # For compatibility with collate_fn used later
683
-
684
- return f, fl, torch.tensor(t).long(), torch.tensor(tl).long()
685
-
686
- def __iter__(self):
687
- return self._dataset.__iter__()
688
-
689
- def __len__(self):
690
- return len(self.collection)
691
-
692
-
693
- class TarredAudioToClassificationLabelDataset(_TarredAudioLabelDataset):
694
- """
695
- A similar Dataset to the AudioToClassificationLabelDataset, but which loads tarred audio files.
696
-
697
- Accepts a single comma-separated JSON manifest file (in the same style as for the AudioToClassificationLabelDataset),
698
- as well as the path(s) to the tarball(s) containing the wav files. Each line of the manifest should
699
- contain the information for one audio file, including at least the transcript and name of the audio
700
- file within the tarball.
701
-
702
- Valid formats for the audio_tar_filepaths argument include:
703
- (1) a single string that can be brace-expanded, e.g. 'path/to/audio.tar' or 'path/to/audio_{1..100}.tar.gz', or
704
- (2) a list of file paths that will not be brace-expanded, e.g. ['audio_1.tar', 'audio_2.tar', ...].
705
-
706
- See the WebDataset documentation for more information about accepted data and input formats.
707
-
708
- If using multiple processes the number of shards should be divisible by the number of workers to ensure an
709
- even split among workers. If it is not divisible, logging will give a warning but training will proceed.
710
- In addition, if using mutiprocessing, each shard MUST HAVE THE SAME NUMBER OF ENTRIES after filtering
711
- is applied. We currently do not check for this, but your program may hang if the shards are uneven!
712
-
713
- Notice that a few arguments are different from the AudioToBPEDataset; for example, shuffle (bool) has been
714
- replaced by shuffle_n (int).
715
-
716
- Additionally, please note that the len() of this DataLayer is assumed to be the length of the manifest
717
- after filtering. An incorrect manifest length may lead to some DataLoader issues down the line.
718
-
719
- Args:
720
- audio_tar_filepaths: Either a list of audio tarball filepaths, or a
721
- string (can be brace-expandable).
722
- manifest_filepath (str): Path to the manifest.
723
- labels (list): Dataset parameter.
724
- List of target classes that can be output by the speaker recognition model.
725
- featurizer
726
- shuffle_n (int): How many samples to look ahead and load to be shuffled.
727
- See WebDataset documentation for more details.
728
- Defaults to 0.
729
- min_duration (float): Dataset parameter.
730
- All training files which have a duration less than min_duration
731
- are dropped. Note: Duration is read from the manifest JSON.
732
- Defaults to 0.1.
733
- max_duration (float): Dataset parameter.
734
- All training files which have a duration more than max_duration
735
- are dropped. Note: Duration is read from the manifest JSON.
736
- Defaults to None.
737
- trim(bool): Whether to use trim silence from beginning and end
738
- of audio signal using librosa.effects.trim().
739
- Defaults to False.
740
- shard_strategy (str): Tarred dataset shard distribution strategy chosen as a str value during ddp.
741
- - `scatter`: The default shard strategy applied by WebDataset, where each node gets
742
- a unique set of shards, which are permanently pre-allocated and never changed at runtime.
743
- - `replicate`: Optional shard strategy, where each node gets all of the set of shards
744
- available in the tarred dataset, which are permanently pre-allocated and never changed at runtime.
745
- The benefit of replication is that it allows each node to sample data points from the entire
746
- dataset independently of other nodes, and reduces dependence on value of `shuffle_n`.
747
-
748
- .. warning::
749
- Replicated strategy allows every node to sample the entire set of available tarfiles,
750
- and therefore more than one node may sample the same tarfile, and even sample the same
751
- data points! As such, there is no assured guarantee that all samples in the dataset will be
752
- sampled at least once during 1 epoch. Scattered strategy, on the other hand, on specific
753
- occasions (when the number of shards is not divisible with ``world_size``), will not sample
754
- the entire dataset. For these reasons it is not advisable to use tarred datasets as validation
755
- or test datasets.
756
- global_rank (int): Worker rank, used for partitioning shards. Defaults to 0.
757
- world_size (int): Total number of processes, used for partitioning shards. Defaults to 0.
758
- is_regression_task (bool): Whether it is a regression task. Defualts to False.
759
- """
760
-
761
- def _collate_fn(self, batch):
762
- return _speech_collate_fn(batch, pad_id=0)
763
-
764
-
765
- class TarredAudioToSpeechLabelDataset(_TarredAudioLabelDataset):
766
- """
767
- A similar Dataset to the AudioToSpeechLabelDataset, but which loads tarred audio files.
768
-
769
- Accepts a single comma-separated JSON manifest file (in the same style as for the AudioToSpeechLabelDataset),
770
- as well as the path(s) to the tarball(s) containing the wav files. Each line of the manifest should
771
- contain the information for one audio file, including at least the transcript and name of the audio
772
- file within the tarball.
773
-
774
- Valid formats for the audio_tar_filepaths argument include:
775
- (1) a single string that can be brace-expanded, e.g. 'path/to/audio.tar' or 'path/to/audio_{1..100}.tar.gz', or
776
- (2) a list of file paths that will not be brace-expanded, e.g. ['audio_1.tar', 'audio_2.tar', ...].
777
-
778
- See the WebDataset documentation for more information about accepted data and input formats.
779
-
780
- If using multiple processes the number of shards should be divisible by the number of workers to ensure an
781
- even split among workers. If it is not divisible, logging will give a warning but training will proceed.
782
- In addition, if using mutiprocessing, each shard MUST HAVE THE SAME NUMBER OF ENTRIES after filtering
783
- is applied. We currently do not check for this, but your program may hang if the shards are uneven!
784
-
785
- Notice that a few arguments are different from the AudioToBPEDataset; for example, shuffle (bool) has been
786
- replaced by shuffle_n (int).
787
-
788
- Additionally, please note that the len() of this DataLayer is assumed to be the length of the manifest
789
- after filtering. An incorrect manifest length may lead to some DataLoader issues down the line.
790
-
791
- Args:
792
- audio_tar_filepaths: Either a list of audio tarball filepaths, or a
793
- string (can be brace-expandable).
794
- manifest_filepath (str): Path to the manifest.
795
- labels (list): Dataset parameter.
796
- List of target classes that can be output by the speaker recognition model.
797
- featurizer
798
- shuffle_n (int): How many samples to look ahead and load to be shuffled.
799
- See WebDataset documentation for more details.
800
- Defaults to 0.
801
- min_duration (float): Dataset parameter.
802
- All training files which have a duration less than min_duration
803
- are dropped. Note: Duration is read from the manifest JSON.
804
- Defaults to 0.1.
805
- max_duration (float): Dataset parameter.
806
- All training files which have a duration more than max_duration
807
- are dropped. Note: Duration is read from the manifest JSON.
808
- Defaults to None.
809
- trim(bool): Whether to use trim silence from beginning and end
810
- of audio signal using librosa.effects.trim().
811
- Defaults to False.
812
- window_length_in_sec (float): time length of window/slice (in seconds) # Pass this only for speaker recognition and VAD task
813
- shift_length_in_sec (float): amount of shift of window for generating the frame for VAD task. in a batch # Pass this only for VAD task during inference.
814
- normalize_audio (bool): Whether to normalize audio signal. Defaults to False.
815
- shard_strategy (str): Tarred dataset shard distribution strategy chosen as a str value during ddp.
816
- - `scatter`: The default shard strategy applied by WebDataset, where each node gets
817
- a unique set of shards, which are permanently pre-allocated and never changed at runtime.
818
- - `replicate`: Optional shard strategy, where each node gets all of the set of shards
819
- available in the tarred dataset, which are permanently pre-allocated and never changed at runtime.
820
- The benefit of replication is that it allows each node to sample data points from the entire
821
- dataset independently of other nodes, and reduces dependence on value of `shuffle_n`.
822
-
823
- .. warning::
824
- Replicated strategy allows every node to sample the entire set of available tarfiles,
825
- and therefore more than one node may sample the same tarfile, and even sample the same
826
- data points! As such, there is no assured guarantee that all samples in the dataset will be
827
- sampled at least once during 1 epoch. Scattered strategy, on the other hand, on specific
828
- occasions (when the number of shards is not divisible with ``world_size``), will not sample
829
- the entire dataset. For these reasons it is not advisable to use tarred datasets as validation
830
- or test datasets.
831
- global_rank (int): Worker rank, used for partitioning shards. Defaults to 0.
832
- world_size (int): Total number of processes, used for partitioning shards. Defaults to 0.
833
- """
834
-
835
- def __init__(
836
- self,
837
- *,
838
- audio_tar_filepaths: Union[str, List[str]],
839
- manifest_filepath: Union[str, List[str]],
840
- labels: List[str],
841
- featurizer,
842
- shuffle_n: int = 0,
843
- min_duration: Optional[float] = 0.1,
844
- max_duration: Optional[float] = None,
845
- trim: bool = False,
846
- window_length_in_sec: Optional[float] = 8,
847
- shift_length_in_sec: Optional[float] = 1,
848
- normalize_audio: bool = False,
849
- shard_strategy: str = "scatter",
850
- global_rank: int = 0,
851
- world_size: int = 0,
852
- ):
853
- logging.info("Window/slice length considered for collate func is {}".format(window_length_in_sec))
854
- logging.info("Shift length considered for collate func is {}".format(shift_length_in_sec))
855
- self.window_length_in_sec = window_length_in_sec
856
- self.shift_length_in_sec = shift_length_in_sec
857
- self.normalize_audio = normalize_audio
858
-
859
- super().__init__(
860
- audio_tar_filepaths=audio_tar_filepaths,
861
- manifest_filepath=manifest_filepath,
862
- labels=labels,
863
- featurizer=featurizer,
864
- shuffle_n=shuffle_n,
865
- min_duration=min_duration,
866
- max_duration=max_duration,
867
- trim=trim,
868
- shard_strategy=shard_strategy,
869
- global_rank=global_rank,
870
- world_size=world_size,
871
- )
872
-
873
- def fixed_seq_collate_fn(self, batch):
874
- return _fixed_seq_collate_fn(self, batch)
875
-
876
- def sliced_seq_collate_fn(self, batch):
877
- raise NotImplementedError
878
-
879
- def vad_frame_seq_collate_fn(self, batch):
880
- return _vad_frame_seq_collate_fn(self, batch)
881
-
882
-
883
- class AudioToMultiLabelDataset(Dataset):
884
- """
885
- Dataset that loads a json file containing paths to audio files, durations (in seconds), and a sequence of labels.
886
- Each new line is a different sample. Example below:
887
- {"audio_filepath": "/path/to/audio_wav_0.wav", "duration": time_in_sec_0, "label": \
888
- "0 1 1 0 1", "offset": offset_in_sec_0}
889
- ...
890
- {"audio_filepath": "/path/to/audio_wav_n.wav", "duration": time_in_sec_n, "label": \
891
- "0 1 0 0 1", "offset": offset_in_sec_n}
892
- Args:
893
- manifest_filepath (Union[str, List[str]]): Path to manifest json as described above. Can
894
- be comma-separated paths.
895
- labels (Optional[list]): String containing all the possible labels to map to
896
- if None then automatically picks from ASRSpeechLabel collection.
897
- min_duration (float): Dataset parameter.
898
- All training files which have a duration less than min_duration
899
- are dropped. Note: Duration is read from the manifest JSON.
900
- Defaults to 0.1.
901
- max_duration (float): Dataset parameter.
902
- All training files which have a duration more than max_duration
903
- are dropped. Note: Duration is read from the manifest JSON.
904
- Defaults to None.
905
- trim_silence (bool): Whether to use trim silence from beginning and end
906
- of audio signal using librosa.effects.trim().
907
- Defaults to False.
908
- channel selector (Union[str, int, List[int]]): string denoting the downmix mode, an integer denoting the channel to be selected, or an iterable
909
- of integers denoting a subset of channels. Channel selector is using zero-based indexing.
910
- If set to `None`, the original signal will be used.
911
- window_length_in_sec (float): length of window/slice (in seconds)
912
- Use this for speaker recognition and VAD tasks.
913
- shift_length_in_sec (float): amount of shift of window for generating the frame for VAD task in a batch
914
- Use this for VAD task during inference.
915
- normalize_audio (bool): Whether to normalize audio signal.
916
- Defaults to False.
917
- is_regression_task (bool): Whether the dataset is for a regression task instead of classification.
918
- Defaults to False.
919
- cal_labels_occurrence (bool): Whether to calculate occurrence of labels
920
- Defaults to False.
921
- delimiter (Optional[str]): Delimiter to use when splitting the label string, default to None.
922
- normalize_audio_db (Optional[float]): normalize audio signal to a target db, default to None.
923
- """
924
-
925
- @property
926
- def output_types(self) -> Optional[Dict[str, NeuralType]]:
927
- """Returns definitions of module output ports."""
928
-
929
- output_types = {
930
- 'audio_signal': NeuralType(
931
- ('B', 'T'),
932
- (
933
- AudioSignal(freq=self._sample_rate)
934
- if self is not None and hasattr(self, '_sample_rate')
935
- else AudioSignal()
936
- ),
937
- ),
938
- 'a_sig_length': NeuralType(tuple('B'), LengthsType()),
939
- }
940
-
941
- if self.is_regression_task:
942
- output_types.update(
943
- {
944
- 'targets': NeuralType(tuple('B, T'), RegressionValuesType()),
945
- 'targets_length': NeuralType(tuple('B'), LengthsType()),
946
- }
947
- )
948
- else:
949
- output_types.update(
950
- {
951
- 'label': NeuralType(('B', 'T'), LabelsType()),
952
- 'label_length': NeuralType(tuple('B'), LengthsType()),
953
- }
954
- )
955
-
956
- return output_types
957
-
958
- def __init__(
959
- self,
960
- *,
961
- manifest_filepath: Union[str, List[str]],
962
- sample_rate: int,
963
- labels: Optional[List[str]] = None,
964
- int_values: bool = False,
965
- augmentor: 'nemo.collections.asr.parts.perturb.AudioAugmentor' = None,
966
- min_duration: Optional[float] = 0.1,
967
- max_duration: Optional[float] = None,
968
- trim_silence: bool = False,
969
- channel_selector: Optional[Union[str, int, List[int]]] = None,
970
- is_regression_task: bool = False,
971
- cal_labels_occurrence: Optional[bool] = False,
972
- delimiter: Optional[str] = None,
973
- normalize_audio_db: Optional[float] = None,
974
- ):
975
- super().__init__()
976
- if isinstance(manifest_filepath, str):
977
- manifest_filepath = manifest_filepath.split(',')
978
-
979
- self.delimiter = delimiter
980
- self.normalize_audio_db = normalize_audio_db
981
-
982
- self.collection = collections.ASRSpeechLabel(
983
- manifests_files=manifest_filepath,
984
- min_duration=min_duration,
985
- max_duration=max_duration,
986
- is_regression_task=is_regression_task,
987
- cal_labels_occurrence=cal_labels_occurrence,
988
- delimiter=delimiter,
989
- )
990
-
991
- self.featurizer = WaveformFeaturizer(sample_rate=sample_rate, int_values=int_values, augmentor=augmentor)
992
- self.trim = trim_silence
993
- self.channel_selector = channel_selector
994
- self.is_regression_task = is_regression_task
995
- self.id2occurrence = {}
996
- self.labels_occurrence = None
997
-
998
- if not is_regression_task:
999
- self.labels = labels if labels else self._get_label_set()
1000
- self.num_classes = len(self.labels) if self.labels is not None else 1
1001
- self.label2id, self.id2label = {}, {}
1002
- for label_id, label in enumerate(self.labels):
1003
- self.label2id[label] = label_id
1004
- self.id2label[label_id] = label
1005
- if cal_labels_occurrence:
1006
- self.id2occurrence[label_id] = self.collection.labels_occurrence[label]
1007
- self.labels_occurrence.append(self.id2occurrence[label_id])
1008
-
1009
- for idx in range(len(self.labels[:5])):
1010
- logging.debug(" label id {} and its mapped label {}".format(idx, self.id2label[idx]))
1011
- else:
1012
- self.labels = []
1013
- self.num_classes = 1
1014
-
1015
- def _get_label_set(self):
1016
- labels = []
1017
- for sample in self.collection:
1018
- label_str = sample.label
1019
- if label_str:
1020
- label_str_list = label_str.split(self.delimiter) if self.delimiter else label_str.split()
1021
- labels.extend(label_str_list)
1022
- return sorted(set(labels))
1023
-
1024
- def _label_str_to_tensor(self, label_str: str):
1025
- labels = label_str.split(self.delimiter) if self.delimiter else label_str.split()
1026
-
1027
- if self.is_regression_task:
1028
- labels = [float(s) for s in labels]
1029
- labels = torch.tensor(labels).float()
1030
- else:
1031
- labels = [self.label2id[s] for s in labels]
1032
- labels = torch.tensor(labels).long()
1033
- return labels
1034
-
1035
- def __len__(self):
1036
- return len(self.collection)
1037
-
1038
- def __getitem__(self, index):
1039
- sample = self.collection[index]
1040
-
1041
- offset = sample.offset
1042
-
1043
- if offset is None:
1044
- offset = 0
1045
-
1046
- features = self.featurizer.process(
1047
- sample.audio_file,
1048
- offset=offset,
1049
- duration=sample.duration,
1050
- trim=self.trim,
1051
- channel_selector=self.channel_selector,
1052
- normalize_db=self.normalize_audio_db,
1053
- )
1054
-
1055
- f, fl = features, torch.tensor(features.size(0)).long()
1056
-
1057
- t = self._label_str_to_tensor(sample.label)
1058
-
1059
- tl = torch.tensor(t.size(0)).long()
1060
-
1061
- return f, fl, t, tl
1062
-
1063
- def _collate_fn(self, batch):
1064
- return _speech_collate_fn(batch, pad_id=0)
1065
-
1066
-
1067
- class TarredAudioToMultiLabelDataset(IterableDataset):
1068
- """
1069
- A similar Dataset to the AudioToMultiLabelDataset, but which loads tarred audio files.
1070
-
1071
- Accepts a single comma-separated JSON manifest file (in the same style as for the AudioToSpeechLabelDataset),
1072
- as well as the path(s) to the tarball(s) containing the wav files. Each line of the manifest should
1073
- contain the information for one audio file, including at least the transcript and name of the audio
1074
- file within the tarball.
1075
-
1076
- Valid formats for the audio_tar_filepaths argument include:
1077
- (1) a single string that can be brace-expanded, e.g. 'path/to/audio.tar' or 'path/to/audio_{1..100}.tar.gz', or
1078
- (2) a list of file paths that will not be brace-expanded, e.g. ['audio_1.tar', 'audio_2.tar', ...].
1079
-
1080
- See the WebDataset documentation for more information about accepted data and input formats.
1081
-
1082
- If using multiple processes the number of shards should be divisible by the number of workers to ensure an
1083
- even split among workers. If it is not divisible, logging will give a warning but training will proceed.
1084
- In addition, if using mutiprocessing, each shard MUST HAVE THE SAME NUMBER OF ENTRIES after filtering
1085
- is applied. We currently do not check for this, but your program may hang if the shards are uneven!
1086
-
1087
- Notice that a few arguments are different from the AudioToBPEDataset; for example, shuffle (bool) has been
1088
- replaced by shuffle_n (int).
1089
-
1090
- Additionally, please note that the len() of this DataLayer is assumed to be the length of the manifest
1091
- after filtering. An incorrect manifest length may lead to some DataLoader issues down the line.
1092
-
1093
- Args:
1094
- audio_tar_filepaths: Either a list of audio tarball filepaths, or a
1095
- string (can be brace-expandable).
1096
- manifest_filepath (str): Path to the manifest.
1097
- labels (list): Dataset parameter.
1098
- List of target classes that can be output by the speaker recognition model.
1099
- shuffle_n (int): How many samples to look ahead and load to be shuffled.
1100
- See WebDataset documentation for more details.
1101
- Defaults to 0.
1102
- min_duration (float): Dataset parameter.
1103
- All training files which have a duration less than min_duration
1104
- are dropped. Note: Duration is read from the manifest JSON.
1105
- Defaults to 0.1.
1106
- max_duration (float): Dataset parameter.
1107
- All training files which have a duration more than max_duration
1108
- are dropped. Note: Duration is read from the manifest JSON.
1109
- Defaults to None.
1110
- trim(bool): Whether to use trim silence from beginning and end
1111
- of audio signal using librosa.effects.trim().
1112
- Defaults to False.
1113
- window_length_in_sec (float): time length of window/slice (in seconds) # Pass this only for speaker recognition and VAD task
1114
- shift_length_in_sec (float): amount of shift of window for generating the frame for VAD task. in a batch # Pass this only for VAD task during inference.
1115
- normalize_audio (bool): Whether to normalize audio signal. Defaults to False.
1116
- shard_strategy (str): Tarred dataset shard distribution strategy chosen as a str value during ddp.
1117
- - `scatter`: The default shard strategy applied by WebDataset, where each node gets
1118
- a unique set of shards, which are permanently pre-allocated and never changed at runtime.
1119
- - `replicate`: Optional shard strategy, where each node gets all of the set of shards
1120
- available in the tarred dataset, which are permanently pre-allocated and never changed at runtime.
1121
- The benefit of replication is that it allows each node to sample data points from the entire
1122
- dataset independently of other nodes, and reduces dependence on value of `shuffle_n`.
1123
-
1124
- .. warning::
1125
- Replicated strategy allows every node to sample the entire set of available tarfiles,
1126
- and therefore more than one node may sample the same tarfile, and even sample the same
1127
- data points! As such, there is no assured guarantee that all samples in the dataset will be
1128
- sampled at least once during 1 epoch. Scattered strategy, on the other hand, on specific
1129
- occasions (when the number of shards is not divisible with ``world_size``), will not sample
1130
- the entire dataset. For these reasons it is not advisable to use tarred datasets as validation
1131
- or test datasets.
1132
- global_rank (int): Worker rank, used for partitioning shards. Defaults to 0.
1133
- world_size (int): Total number of processes, used for partitioning shards. Defaults to 0.
1134
- delimiter (Optional[str]): Delimiter to use when splitting the label string, default to None.
1135
- normalize_audio_db (Optional[float]): normalize audio signal to a target db, default to None.
1136
- """
1137
-
1138
- def __init__(
1139
- self,
1140
- *,
1141
- audio_tar_filepaths: Union[str, List[str]],
1142
- manifest_filepath: Union[str, List[str]],
1143
- sample_rate: int,
1144
- labels: Optional[List[str]] = None,
1145
- shuffle_n: int = 0,
1146
- int_values: bool = False,
1147
- augmentor: 'nemo.collections.asr.parts.perturb.AudioAugmentor' = None,
1148
- min_duration: Optional[float] = 0.1,
1149
- max_duration: Optional[float] = None,
1150
- trim_silence: bool = False,
1151
- is_regression_task: bool = False,
1152
- shard_strategy: str = "scatter",
1153
- global_rank: int = 0,
1154
- world_size: int = 0,
1155
- delimiter: Optional[str] = None,
1156
- normalize_audio_db: Optional[float] = None,
1157
- ):
1158
- super().__init__()
1159
- if isinstance(manifest_filepath, str):
1160
- manifest_filepath = manifest_filepath.split(',')
1161
-
1162
- self.trim = trim_silence
1163
- self.is_regression_task = is_regression_task
1164
- self.delimiter = delimiter
1165
- self.normalize_audio_db = normalize_audio_db
1166
-
1167
- self.collection = collections.ASRSpeechLabel(
1168
- manifests_files=manifest_filepath,
1169
- min_duration=min_duration,
1170
- max_duration=max_duration,
1171
- is_regression_task=is_regression_task,
1172
- index_by_file_id=True,
1173
- )
1174
- self.file_occurence = count_occurence(self.collection.mapping)
1175
-
1176
- self.featurizer = WaveformFeaturizer(sample_rate=sample_rate, int_values=int_values, augmentor=augmentor)
1177
-
1178
- if not is_regression_task:
1179
- self.labels = labels if labels else self._get_label_set()
1180
- self.num_classes = len(self.labels) if self.labels is not None else 1
1181
- self.label2id, self.id2label = {}, {}
1182
- for label_id, label in enumerate(self.labels):
1183
- self.label2id[label] = label_id
1184
- self.id2label[label_id] = label
1185
- for idx in range(len(self.labels[:5])):
1186
- logging.debug(" label id {} and its mapped label {}".format(idx, self.id2label[idx]))
1187
- else:
1188
- self.labels = []
1189
- self.num_classes = 1
1190
-
1191
- audio_tar_filepaths = expand_sharded_filepaths(
1192
- sharded_filepaths=audio_tar_filepaths,
1193
- shard_strategy=shard_strategy,
1194
- world_size=world_size,
1195
- global_rank=global_rank,
1196
- )
1197
-
1198
- # Put together WebDataset
1199
- self._dataset = wds.DataPipeline(
1200
- wds.SimpleShardList(urls=audio_tar_filepaths),
1201
- webdataset_split_by_workers,
1202
- wds.shuffle(shuffle_n),
1203
- wds.tarfile_to_samples(),
1204
- wds.rename(audio=VALID_FILE_FORMATS, key='__key__'),
1205
- wds.to_tuple('audio', 'key'),
1206
- self._filter,
1207
- wds.map(self._build_sample),
1208
- )
1209
-
1210
- def _get_label_set(self):
1211
- labels = []
1212
- for sample in self.collection:
1213
- label_str = sample.label
1214
- if label_str:
1215
- label_str_list = label_str.split(self.delimiter) if self.delimiter else label_str.split()
1216
- labels.extend(label_str_list)
1217
- return sorted(set(labels))
1218
-
1219
- def _label_str_to_tensor(self, label_str: str):
1220
- labels = label_str.split(self.delimiter) if self.delimiter else label_str.split()
1221
-
1222
- if self.is_regression_task:
1223
- labels = [float(s) for s in labels]
1224
- labels = torch.tensor(labels).float()
1225
- else:
1226
- labels = [self.label2id[s] for s in labels]
1227
- labels = torch.tensor(labels).long()
1228
- return labels
1229
-
1230
- def _filter(self, iterator):
1231
- """This function is used to remove samples that have been filtered out by ASRSpeechLabel already.
1232
- Otherwise, we would get a KeyError as _build_sample attempts to find the manifest entry for a sample
1233
- that was filtered out (e.g. for duration).
1234
- Note that if using multi-GPU training, filtering may lead to an imbalance in samples in each shard,
1235
- which may make your code hang as one process will finish before the other.
1236
- """
1237
-
1238
- class TarredAudioFilter:
1239
- def __init__(self, collection, file_occurence):
1240
- self.iterator = iterator
1241
- self.collection = collection
1242
- self.file_occurence = file_occurence
1243
- self._iterable = self._internal_generator()
1244
-
1245
- def __iter__(self):
1246
- self._iterable = self._internal_generator()
1247
- return self
1248
-
1249
- def __next__(self):
1250
- try:
1251
- values = next(self._iterable)
1252
- except StopIteration:
1253
- # reset generator
1254
- self._iterable = self._internal_generator()
1255
- values = next(self._iterable)
1256
-
1257
- return values
1258
-
1259
- def _internal_generator(self):
1260
- """
1261
- WebDataset requires an Iterator, but we require an iterable that yields 1-or-more
1262
- values per value inside self.iterator.
1263
-
1264
- Therefore wrap the iterator with a generator function that will yield 1-or-more
1265
- values per sample in the iterator.
1266
- """
1267
- for _, tup in enumerate(self.iterator):
1268
- audio_bytes, audio_filename = tup
1269
-
1270
- file_id, _ = os.path.splitext(os.path.basename(audio_filename))
1271
- if audio_filename in self.file_occurence:
1272
- for j in range(0, self.file_occurence[file_id]):
1273
- if j == 0:
1274
- audio_filename = file_id
1275
- else:
1276
- audio_filename = file_id + "-sub" + str(j)
1277
- yield audio_bytes, audio_filename
1278
-
1279
- return TarredAudioFilter(self.collection, self.file_occurence)
1280
-
1281
- def _build_sample(self, tup):
1282
- """Builds the training sample by combining the data from the WebDataset with the manifest info."""
1283
- audio_bytes, audio_filename = tup
1284
- # Grab manifest entry from self.collection
1285
- file_id, _ = os.path.splitext(os.path.basename(audio_filename))
1286
-
1287
- manifest_idx = self.collection.mapping[file_id]
1288
- manifest_entry = self.collection[manifest_idx]
1289
-
1290
- offset = manifest_entry.offset
1291
- if offset is None:
1292
- offset = 0
1293
-
1294
- # Convert audio bytes to IO stream for processing (for SoundFile to read)
1295
- audio_filestream = io.BytesIO(audio_bytes)
1296
- features = self.featurizer.process(
1297
- audio_filestream,
1298
- offset=offset,
1299
- duration=manifest_entry.duration,
1300
- trim=self.trim,
1301
- normalize_db=self.normalize_audio_db,
1302
- )
1303
-
1304
- audio_filestream.close()
1305
-
1306
- # Audio features
1307
- f, fl = features, torch.tensor(features.shape[0]).long()
1308
-
1309
- t = self._label_str_to_tensor(manifest_entry.label)
1310
-
1311
- tl = torch.tensor(t.size(0)).long()
1312
-
1313
- return f, fl, t, tl
1314
-
1315
- def __iter__(self):
1316
- return self._dataset.__iter__()
1317
-
1318
- def __len__(self):
1319
- return len(self.collection)
1320
-
1321
- def _collate_fn(self, batch):
1322
- return _speech_collate_fn(batch, pad_id=0)
1323
-
1324
-
1325
- class AudioPairToLabelDataset(AudioToSpeechLabelDataset):
1326
- """
1327
- Dataset class for audio pairs classification tasks, such as calculating EER for speaker verification.
1328
- The input manifest file should contain pairs of audio files and a label. It's format is almost the same as
1329
- `AudioToSpeechLabelDataset` except that the `audio_filepath` field should be a list of two audio file paths
1330
- instead of one, and that `offset` and `duration` are not used as the dataset class will load the whole audio.
1331
-
1332
- Example of a line in the manifest file:
1333
- {
1334
- "audio_filepath": ["/path/to/audio_wav_0.wav", "/path/to/audio_wav_1.wav"],
1335
- "duration": null, # not used, will load the whole audio
1336
- "offset": 0.0, # not used, will load the whole audio
1337
- "label": "0" # label for the pair, can be a string or an integer
1338
- }
1339
-
1340
- """
1341
-
1342
- @property
1343
- def output_types(self) -> Optional[Dict[str, NeuralType]]:
1344
- """Returns definitions of module output ports."""
1345
-
1346
- output_types = {
1347
- 'audio_signal': NeuralType(
1348
- ('B', 'T'),
1349
- (
1350
- AudioSignal(freq=self._sample_rate)
1351
- if self is not None and hasattr(self, '_sample_rate')
1352
- else AudioSignal()
1353
- ),
1354
- ),
1355
- 'a_sig_length': NeuralType(tuple('B'), LengthsType()),
1356
- 'audio_signal_2': NeuralType(
1357
- ('B', 'T'),
1358
- (
1359
- AudioSignal(freq=self._sample_rate)
1360
- if self is not None and hasattr(self, '_sample_rate')
1361
- else AudioSignal()
1362
- ),
1363
- ),
1364
- 'a_sig_length_2': NeuralType(tuple('B'), LengthsType()),
1365
- 'label': NeuralType(tuple('B'), LabelsType()),
1366
- 'label_length': NeuralType(tuple('B'), LengthsType()),
1367
- }
1368
-
1369
- return output_types
1370
-
1371
- def __init__(
1372
- self,
1373
- *,
1374
- manifest_filepath: str | List[str],
1375
- labels: List[str],
1376
- featurizer,
1377
- min_duration: float | None = 0.1,
1378
- max_duration: float | None = None,
1379
- trim: bool = False,
1380
- window_length_in_sec: float | None = 8,
1381
- shift_length_in_sec: float | None = 1,
1382
- normalize_audio: bool = False,
1383
- **kwargs,
1384
- ):
1385
- super().__init__(
1386
- manifest_filepath=manifest_filepath,
1387
- labels=labels,
1388
- featurizer=featurizer,
1389
- min_duration=min_duration,
1390
- max_duration=max_duration,
1391
- trim=trim,
1392
- window_length_in_sec=window_length_in_sec,
1393
- shift_length_in_sec=shift_length_in_sec,
1394
- normalize_audio=normalize_audio,
1395
- is_regression_task=False,
1396
- cal_labels_occurrence=False,
1397
- )
1398
-
1399
- def __getitem__(self, index):
1400
- sample = self.collection[index]
1401
-
1402
- audio_pair = sample.audio_file
1403
-
1404
- features = self.featurizer.process(audio_pair[0], offset=0, duration=None, trim=self.trim)
1405
- f, fl = features, torch.tensor(features.shape[0]).long()
1406
-
1407
- features2 = self.featurizer.process(audio_pair[1], offset=0, duration=None, trim=self.trim)
1408
- f2, fl2 = features2, torch.tensor(features2.shape[0]).long()
1409
-
1410
- t = torch.tensor(self.label2id[sample.label]).long()
1411
- tl = torch.tensor(1).long() # For compatibility with collate_fn used later
1412
-
1413
- return f, fl, f2, fl2, t, tl
1414
-
1415
- def fixed_seq_collate_fn(self, batch):
1416
- audio1, audio_len1, audio2, audio_len2, label, label_len = zip(*batch)
1417
-
1418
- batch1 = list(zip(audio1, audio_len1, label, label_len))
1419
- a_sig1, a_sig_len1, pair_label, pair_label_len = _fixed_seq_collate_fn(self, batch1)
1420
- batch2 = list(zip(audio2, audio_len2, label, label_len))
1421
- a_sig2, a_sig_len2, _, _ = _fixed_seq_collate_fn(self, batch2)
1422
- return a_sig1, a_sig_len1, a_sig2, a_sig_len2, pair_label, pair_label_len
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/collections/asr/data/audio_to_label_dataset.py DELETED
@@ -1,304 +0,0 @@
1
- # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- import copy
15
-
16
- from omegaconf import DictConfig
17
-
18
- from nemo.collections.asr.data import audio_to_label
19
- from nemo.collections.asr.data.audio_to_text_dataset import convert_to_config_list, get_chain_dataset
20
- from nemo.collections.asr.parts.preprocessing.perturb import process_augmentations
21
- from nemo.collections.common.data.dataset import ConcatDataset
22
-
23
-
24
- def get_classification_label_dataset(featurizer, config: dict) -> audio_to_label.AudioToClassificationLabelDataset:
25
- """
26
- Instantiates a Classification AudioLabelDataset.
27
-
28
- Args:
29
- config: Config of the AudioToClassificationLabelDataset.
30
-
31
- Returns:
32
- An instance of AudioToClassificationLabelDataset.
33
- """
34
- dataset = audio_to_label.AudioToClassificationLabelDataset(
35
- manifest_filepath=config['manifest_filepath'],
36
- labels=config['labels'],
37
- featurizer=featurizer,
38
- max_duration=config.get('max_duration', None),
39
- min_duration=config.get('min_duration', None),
40
- trim=config.get('trim_silence', False),
41
- is_regression_task=config.get('is_regression_task', False),
42
- cal_labels_occurrence=config.get('cal_labels_occurrence', False),
43
- )
44
- return dataset
45
-
46
-
47
- def get_speech_label_dataset(featurizer, config: dict) -> audio_to_label.AudioToSpeechLabelDataset:
48
- """
49
- Instantiates a Speech Label (e.g. VAD, speaker recognition) AudioLabelDataset.
50
-
51
- Args:
52
- config: Config of the AudioToSpeechLabelDataSet.
53
-
54
- Returns:
55
- An instance of AudioToSpeechLabelDataset.
56
- """
57
- dataset = audio_to_label.AudioToSpeechLabelDataset(
58
- manifest_filepath=config['manifest_filepath'],
59
- labels=config['labels'],
60
- featurizer=featurizer,
61
- max_duration=config.get('max_duration', None),
62
- min_duration=config.get('min_duration', None),
63
- trim=config.get('trim_silence', False),
64
- window_length_in_sec=config.get('window_length_in_sec', 0.31),
65
- shift_length_in_sec=config.get('shift_length_in_sec', 0.01),
66
- normalize_audio=config.get('normalize_audio', False),
67
- cal_labels_occurrence=config.get('cal_labels_occurrence', False),
68
- )
69
- return dataset
70
-
71
-
72
- def get_tarred_classification_label_dataset(
73
- featurizer, config: dict, shuffle_n: int, global_rank: int, world_size: int
74
- ) -> audio_to_label.TarredAudioToClassificationLabelDataset:
75
- """
76
- Instantiates a Classification TarredAudioLabelDataset.
77
-
78
- Args:
79
- config: Config of the TarredAudioToClassificationLabelDataset.
80
- shuffle_n: How many samples to look ahead and load to be shuffled.
81
- See WebDataset documentation for more details.
82
- global_rank: Global rank of this device.
83
- world_size: Global world size in the training method.
84
-
85
- Returns:
86
- An instance of TarredAudioToClassificationLabelDataset.
87
- """
88
- tarred_audio_filepaths = config['tarred_audio_filepaths']
89
- manifest_filepaths = config['manifest_filepath']
90
- datasets = []
91
- tarred_audio_filepaths = convert_to_config_list(tarred_audio_filepaths)
92
- manifest_filepaths = convert_to_config_list(manifest_filepaths)
93
-
94
- bucketing_weights = config.get('bucketing_weights', None) # For upsampling buckets
95
- if bucketing_weights:
96
- for idx, weight in enumerate(bucketing_weights):
97
- if not isinstance(weight, int) or weight <= 0:
98
- raise ValueError(f"bucket weights must be positive integers")
99
-
100
- if len(manifest_filepaths) != len(tarred_audio_filepaths):
101
- raise ValueError(
102
- f"manifest_filepaths (length={len(manifest_filepaths)}) and tarred_audio_filepaths (length={len(tarred_audio_filepaths)}) need to have the same number of buckets."
103
- )
104
-
105
- for dataset_idx, (tarred_audio_filepath, manifest_filepath) in enumerate(
106
- zip(tarred_audio_filepaths, manifest_filepaths)
107
- ):
108
- if len(tarred_audio_filepath) == 1:
109
- tarred_audio_filepath = tarred_audio_filepath[0]
110
- dataset = audio_to_label.TarredAudioToClassificationLabelDataset(
111
- audio_tar_filepaths=tarred_audio_filepath,
112
- manifest_filepath=manifest_filepath,
113
- labels=config['labels'],
114
- featurizer=featurizer,
115
- shuffle_n=shuffle_n,
116
- max_duration=config.get('max_duration', None),
117
- min_duration=config.get('min_duration', None),
118
- trim=config.get('trim_silence', False),
119
- shard_strategy=config.get('tarred_shard_strategy', 'scatter'),
120
- global_rank=global_rank,
121
- world_size=world_size,
122
- is_regression_task=config.get('is_regression_task', False),
123
- )
124
-
125
- if bucketing_weights:
126
- [datasets.append(dataset) for _ in range(bucketing_weights[dataset_idx])]
127
- else:
128
- datasets.append(dataset)
129
-
130
- return get_chain_dataset(datasets=datasets, ds_config=config, rank=global_rank)
131
-
132
-
133
- def get_concat_tarred_speech_label_dataset(
134
- featurizer, config: dict, shuffle_n: int, global_rank: int, world_size: int,
135
- ):
136
- tarred_audio_filepaths = config['tarred_audio_filepaths']
137
- manifest_filepaths = config['manifest_filepath']
138
- datasets = []
139
- for dataset_idx, (tarred_audio_filepath, manifest_filepath) in enumerate(
140
- zip(tarred_audio_filepaths, manifest_filepaths)
141
- ):
142
- conf = copy.deepcopy(config)
143
- conf['manifest_filepath'] = manifest_filepath
144
- conf['tarred_audio_filepaths'] = tarred_audio_filepath
145
- dataset = get_tarred_speech_label_dataset(
146
- config=conf, featurizer=featurizer, shuffle_n=shuffle_n, global_rank=global_rank, world_size=world_size,
147
- )
148
- datasets.append(dataset)
149
-
150
- dataset = ConcatDataset(
151
- datasets,
152
- sampling_technique=config.get('concat_sampling_technique', 'temperature'),
153
- sampling_temperature=config.get('concat_sampling_temperature', 5),
154
- sampling_probabilities=config.get('concat_sampling_probabilities', None),
155
- global_rank=global_rank,
156
- world_size=world_size,
157
- shuffle=config['shuffle'],
158
- )
159
- return dataset
160
-
161
-
162
- def get_tarred_speech_label_dataset(
163
- featurizer, config: dict, shuffle_n: int, global_rank: int, world_size: int,
164
- ) -> audio_to_label.TarredAudioToSpeechLabelDataset:
165
- """
166
- InInstantiates a Speech Label (e.g. VAD, speaker recognition) TarredAudioLabelDataset.
167
-
168
- Args:
169
- config: Config of the TarredAudioToSpeechLabelDataset.
170
- shuffle_n: How many samples to look ahead and load to be shuffled.
171
- See WebDataset documentation for more details.
172
- global_rank: Global rank of this device.
173
- world_size: Global world size in the training method.
174
-
175
- Returns:
176
- An instance of TarredAudioToSpeechLabelDataset.
177
- """
178
- tarred_audio_filepaths = config['tarred_audio_filepaths']
179
- manifest_filepaths = config['manifest_filepath']
180
- datasets = []
181
- tarred_audio_filepaths = convert_to_config_list(tarred_audio_filepaths)
182
- manifest_filepaths = convert_to_config_list(manifest_filepaths)
183
-
184
- bucketing_weights = config.get('bucketing_weights', None) # For upsampling buckets
185
- if bucketing_weights:
186
- for idx, weight in enumerate(bucketing_weights):
187
- if not isinstance(weight, int) or weight <= 0:
188
- raise ValueError(f"bucket weights must be positive integers")
189
-
190
- if len(manifest_filepaths) != len(tarred_audio_filepaths):
191
- raise ValueError(
192
- f"manifest_filepaths (length={len(manifest_filepaths)}) and tarred_audio_filepaths (length={len(tarred_audio_filepaths)}) need to have the same number of buckets."
193
- )
194
-
195
- for dataset_idx, (tarred_audio_filepath, manifest_filepath) in enumerate(
196
- zip(tarred_audio_filepaths, manifest_filepaths)
197
- ):
198
- if len(tarred_audio_filepath) == 1:
199
- tarred_audio_filepath = tarred_audio_filepath[0]
200
- dataset = audio_to_label.TarredAudioToSpeechLabelDataset(
201
- audio_tar_filepaths=tarred_audio_filepath,
202
- manifest_filepath=manifest_filepath,
203
- labels=config['labels'],
204
- featurizer=featurizer,
205
- shuffle_n=shuffle_n,
206
- max_duration=config.get('max_duration', None),
207
- min_duration=config.get('min_duration', None),
208
- trim=config.get('trim_silence', False),
209
- window_length_in_sec=config.get('window_length_in_sec', 8),
210
- shift_length_in_sec=config.get('shift_length_in_sec', 0.075),
211
- normalize_audio=config.get('normalize_audio', False),
212
- shard_strategy=config.get('tarred_shard_strategy', 'scatter'),
213
- global_rank=global_rank,
214
- world_size=world_size,
215
- )
216
-
217
- if bucketing_weights:
218
- [datasets.append(dataset) for _ in range(bucketing_weights[dataset_idx])]
219
- else:
220
- datasets.append(dataset)
221
-
222
- return get_chain_dataset(datasets=datasets, ds_config=config, rank=global_rank)
223
-
224
-
225
- def get_audio_multi_label_dataset(cfg: DictConfig) -> audio_to_label.AudioToMultiLabelDataset:
226
- if "augmentor" in cfg:
227
- augmentor = process_augmentations(cfg.augmentor)
228
- else:
229
- augmentor = None
230
-
231
- dataset = audio_to_label.AudioToMultiLabelDataset(
232
- manifest_filepath=cfg.get("manifest_filepath"),
233
- sample_rate=cfg.get("sample_rate"),
234
- labels=cfg.get("labels", None),
235
- int_values=cfg.get("int_values", False),
236
- augmentor=augmentor,
237
- min_duration=cfg.get("min_duration", None),
238
- max_duration=cfg.get("max_duration", None),
239
- trim_silence=cfg.get("trim_silence", False),
240
- is_regression_task=cfg.get("is_regression_task", False),
241
- cal_labels_occurrence=cfg.get("cal_labels_occurrence", False),
242
- delimiter=cfg.get("delimiter", None),
243
- normalize_audio_db=cfg.get("normalize_audio_db", None),
244
- )
245
- return dataset
246
-
247
-
248
- def get_tarred_audio_multi_label_dataset(
249
- cfg: DictConfig, shuffle_n: int, global_rank: int, world_size: int
250
- ) -> audio_to_label.TarredAudioToMultiLabelDataset:
251
-
252
- if "augmentor" in cfg:
253
- augmentor = process_augmentations(cfg.augmentor)
254
- else:
255
- augmentor = None
256
-
257
- tarred_audio_filepaths = cfg['tarred_audio_filepaths']
258
- manifest_filepaths = cfg['manifest_filepath']
259
- datasets = []
260
- tarred_audio_filepaths = convert_to_config_list(tarred_audio_filepaths)
261
- manifest_filepaths = convert_to_config_list(manifest_filepaths)
262
-
263
- bucketing_weights = cfg.get('bucketing_weights', None) # For upsampling buckets
264
- if bucketing_weights:
265
- for idx, weight in enumerate(bucketing_weights):
266
- if not isinstance(weight, int) or weight <= 0:
267
- raise ValueError(f"bucket weights must be positive integers")
268
-
269
- if len(manifest_filepaths) != len(tarred_audio_filepaths):
270
- raise ValueError(
271
- f"manifest_filepaths (length={len(manifest_filepaths)}) and tarred_audio_filepaths (length={len(tarred_audio_filepaths)}) need to have the same number of buckets."
272
- )
273
-
274
- for dataset_idx, (tarred_audio_filepath, manifest_filepath) in enumerate(
275
- zip(tarred_audio_filepaths, manifest_filepaths)
276
- ):
277
- if len(tarred_audio_filepath) == 1:
278
- tarred_audio_filepath = tarred_audio_filepath[0]
279
-
280
- dataset = audio_to_label.TarredAudioToMultiLabelDataset(
281
- audio_tar_filepaths=tarred_audio_filepath,
282
- manifest_filepath=manifest_filepath,
283
- sample_rate=cfg["sample_rate"],
284
- labels=cfg['labels'],
285
- shuffle_n=shuffle_n,
286
- int_values=cfg.get("int_values", False),
287
- augmentor=augmentor,
288
- min_duration=cfg.get('min_duration', None),
289
- max_duration=cfg.get('max_duration', None),
290
- trim_silence=cfg.get('trim_silence', False),
291
- is_regression_task=cfg.get('is_regression_task', False),
292
- delimiter=cfg.get("delimiter", None),
293
- shard_strategy=cfg.get('tarred_shard_strategy', 'scatter'),
294
- global_rank=global_rank,
295
- world_size=world_size,
296
- normalize_audio_db=cfg.get("normalize_audio_db", None),
297
- )
298
-
299
- if bucketing_weights:
300
- [datasets.append(dataset) for _ in range(bucketing_weights[dataset_idx])]
301
- else:
302
- datasets.append(dataset)
303
-
304
- return get_chain_dataset(datasets=datasets, ds_config=cfg, rank=global_rank)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/collections/asr/data/audio_to_text.py DELETED
@@ -1,1389 +0,0 @@
1
- # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- import io
15
- import json
16
- import math
17
- import multiprocessing
18
- import os
19
- from collections.abc import Iterable as IterableABC
20
- from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union
21
-
22
- import braceexpand
23
- import numpy as np
24
- import torch
25
- from torch.utils.data import ChainDataset
26
- from tqdm import tqdm
27
-
28
- from nemo.collections.asr.parts.preprocessing.features import WaveformFeaturizer
29
- from nemo.collections.asr.parts.preprocessing.segment import ChannelSelectorType
30
- from nemo.collections.asr.parts.preprocessing.segment import available_formats as valid_sf_formats
31
- from nemo.collections.common import tokenizers
32
- from nemo.collections.common.parts.preprocessing import collections, parsers
33
- from nemo.core.classes import Dataset, IterableDataset
34
- from nemo.core.neural_types import *
35
- from nemo.utils import logging
36
- from nemo.utils import webdataset as wds
37
- from nemo.utils.data_utils import DataStoreObject, datastore_object_get, is_datastore_cache_shared, is_datastore_path
38
- from nemo.utils.decorators import deprecated
39
- from nemo.utils.distributed import webdataset_split_by_workers
40
- from nemo.utils.get_rank import is_global_rank_zero
41
-
42
- __all__ = [
43
- 'AudioToCharDataset',
44
- 'AudioToBPEDataset',
45
- 'TarredAudioToCharDataset',
46
- 'TarredAudioToBPEDataset',
47
- ]
48
-
49
- VALID_FILE_FORMATS = ';'.join(['wav', 'mp3', 'flac', 'opus'] + [fmt.lower() for fmt in valid_sf_formats.keys()])
50
-
51
-
52
- def _speech_collate_fn(batch, pad_id):
53
- """collate batch of audio sig, audio len, tokens, tokens len
54
- Args:
55
- batch (Optional[FloatTensor], Optional[LongTensor], LongTensor,
56
- LongTensor): A tuple of tuples of signal, signal lengths,
57
- encoded tokens, and encoded tokens length. This collate func
58
- assumes the signals are 1d torch tensors (i.e. mono audio).
59
- """
60
- packed_batch = list(zip(*batch))
61
- if len(packed_batch) == 5:
62
- _, audio_lengths, _, tokens_lengths, sample_ids = packed_batch
63
- elif len(packed_batch) == 4:
64
- sample_ids = None
65
- _, audio_lengths, _, tokens_lengths = packed_batch
66
- else:
67
- raise ValueError("Expects 4 or 5 tensors in the batch!")
68
- max_audio_len = 0
69
- has_audio = audio_lengths[0] is not None
70
- if has_audio:
71
- max_audio_len = max(audio_lengths).item()
72
- has_tokens = tokens_lengths[0] is not None
73
- if has_tokens:
74
- max_tokens_len = max(tokens_lengths).item()
75
-
76
- audio_signal, tokens = [], []
77
- for b in batch:
78
- if len(b) == 5:
79
- sig, sig_len, tokens_i, tokens_i_len, _ = b
80
- else:
81
- sig, sig_len, tokens_i, tokens_i_len = b
82
- if has_audio:
83
- sig_len = sig_len.item()
84
- if sig_len < max_audio_len:
85
- pad = (0, max_audio_len - sig_len)
86
- sig = torch.nn.functional.pad(sig, pad)
87
- audio_signal.append(sig)
88
- if has_tokens:
89
- tokens_i_len = tokens_i_len.item()
90
- if tokens_i_len < max_tokens_len:
91
- pad = (0, max_tokens_len - tokens_i_len)
92
- tokens_i = torch.nn.functional.pad(tokens_i, pad, value=pad_id)
93
- tokens.append(tokens_i)
94
-
95
- if has_audio:
96
- audio_signal = torch.stack(audio_signal)
97
- audio_lengths = torch.stack(audio_lengths)
98
- else:
99
- audio_signal, audio_lengths = None, None
100
- if has_tokens:
101
- tokens = torch.stack(tokens)
102
- tokens_lengths = torch.stack(tokens_lengths)
103
- else:
104
- tokens = None
105
- tokens_lengths = None
106
- if sample_ids is None:
107
- return audio_signal, audio_lengths, tokens, tokens_lengths
108
- else:
109
- sample_ids = torch.tensor(sample_ids, dtype=torch.int32)
110
- return audio_signal, audio_lengths, tokens, tokens_lengths, sample_ids
111
-
112
-
113
- class ASRManifestProcessor:
114
- """
115
- Class that processes a manifest json file containing paths to audio files, transcripts, and durations (in seconds).
116
- Each new line is a different sample. Example below:
117
- {"audio_filepath": "/path/to/audio.wav", "text_filepath": "/path/to/audio.txt", "duration": 23.147}
118
- ...
119
- {"audio_filepath": "/path/to/audio.wav", "text": "the transcription", "offset": 301.75, "duration": 0.82, "utt":
120
- "utterance_id", "ctm_utt": "en_4156", "side": "A"}
121
- Args:
122
- manifest_filepath: Path to manifest json as described above. Can be comma-separated paths.
123
- parser: Str for a language specific preprocessor or a callable.
124
- max_duration: If audio exceeds this length, do not include in dataset.
125
- min_duration: If audio is less than this length, do not include in dataset.
126
- max_utts: Limit number of utterances.
127
- bos_id: Id of beginning of sequence symbol to append if not None.
128
- eos_id: Id of end of sequence symbol to append if not None.
129
- pad_id: Id of pad symbol. Defaults to 0.
130
- """
131
-
132
- def __init__(
133
- self,
134
- manifest_filepath: str,
135
- parser: Union[str, Callable],
136
- max_duration: Optional[float] = None,
137
- min_duration: Optional[float] = None,
138
- max_utts: int = 0,
139
- bos_id: Optional[int] = None,
140
- eos_id: Optional[int] = None,
141
- pad_id: int = 0,
142
- index_by_file_id: bool = False,
143
- manifest_parse_func: Optional[Callable] = None,
144
- ):
145
- self.parser = parser
146
-
147
- self.collection = collections.ASRAudioText(
148
- manifests_files=manifest_filepath,
149
- parser=parser,
150
- min_duration=min_duration,
151
- max_duration=max_duration,
152
- max_number=max_utts,
153
- index_by_file_id=index_by_file_id,
154
- parse_func=manifest_parse_func,
155
- )
156
-
157
- self.eos_id = eos_id
158
- self.bos_id = bos_id
159
- self.pad_id = pad_id
160
-
161
- def process_text_by_id(self, index: int) -> Tuple[List[int], int]:
162
- sample = self.collection[index]
163
- return self.process_text_by_sample(sample)
164
-
165
- def process_text_by_file_id(self, file_id: str) -> Tuple[List[int], int]:
166
- manifest_idx = self.collection.mapping[file_id][0]
167
- sample = self.collection[manifest_idx]
168
- return self.process_text_by_sample(sample)
169
-
170
- def process_text_by_sample(self, sample: collections.ASRAudioText.OUTPUT_TYPE) -> Tuple[List[int], int]:
171
- t, tl = sample.text_tokens, len(sample.text_tokens)
172
-
173
- if self.bos_id is not None:
174
- t = [self.bos_id] + t
175
- tl += 1
176
- if self.eos_id is not None:
177
- t = t + [self.eos_id]
178
- tl += 1
179
-
180
- return t, tl
181
-
182
-
183
- def expand_sharded_filepaths(sharded_filepaths, shard_strategy: str, world_size: int, global_rank: int):
184
- valid_shard_strategies = ['scatter', 'replicate']
185
- if shard_strategy not in valid_shard_strategies:
186
- raise ValueError(f"`shard_strategy` must be one of {valid_shard_strategies}")
187
-
188
- if isinstance(sharded_filepaths, str):
189
- # Replace '(' and '[' with '{'
190
- brace_keys_open = ['(', '[', '<', '_OP_']
191
- for bkey in brace_keys_open:
192
- if bkey in sharded_filepaths:
193
- sharded_filepaths = sharded_filepaths.replace(bkey, "{")
194
-
195
- # Replace ')' and ']' with '}'
196
- brace_keys_close = [')', ']', '>', '_CL_']
197
- for bkey in brace_keys_close:
198
- if bkey in sharded_filepaths:
199
- sharded_filepaths = sharded_filepaths.replace(bkey, "}")
200
-
201
- if isinstance(sharded_filepaths, str):
202
- # Brace expand, set escape=False for Windows compatibility
203
- sharded_filepaths = list(braceexpand.braceexpand(sharded_filepaths, escape=False))
204
-
205
- # Check for distributed and partition shards accordingly
206
- if world_size > 1:
207
- if shard_strategy == 'scatter':
208
- logging.info("All tarred dataset shards will be scattered evenly across all nodes.")
209
-
210
- if len(sharded_filepaths) % world_size != 0:
211
- logging.warning(
212
- f"Number of shards in tarred dataset ({len(sharded_filepaths)}) is not divisible "
213
- f"by number of distributed workers ({world_size})."
214
- )
215
-
216
- begin_idx = (len(sharded_filepaths) // world_size) * global_rank
217
- end_idx = begin_idx + len(sharded_filepaths) // world_size
218
- sharded_filepaths = sharded_filepaths[begin_idx:end_idx]
219
- logging.info(
220
- "Partitioning tarred dataset: process (%d) taking shards [%d, %d)", global_rank, begin_idx, end_idx
221
- )
222
-
223
- elif shard_strategy == 'replicate':
224
- logging.info("All tarred dataset shards will be replicated across all nodes.")
225
- else:
226
- raise ValueError(f"Invalid shard strategy ! Allowed values are : {valid_shard_strategies}")
227
-
228
- return sharded_filepaths
229
-
230
-
231
- def cache_datastore_manifests(
232
- manifest_filepaths: Union[str, List[str]],
233
- cache_audio: bool = False,
234
- shared_cache: Optional[bool] = None,
235
- num_workers: Optional[int] = None,
236
- max_num_workers: int = 20,
237
- ):
238
- """Cache manifests and audio from an object store.
239
- It is assumed that remote manifests are using relative paths.
240
-
241
- Args:
242
- manifest_filepaths: list of paths to manifest files (list of strings or a string with `,` as separator)
243
- cache_audio: If True, audio from manifest will also be cached
244
- shared_cache: Optional, True if cache is shared across all nodes
245
- num_workers: Optional, number of workers to be used for download
246
- max_num_workers: max number of workers to be used for download, used when setting num_workers automatically
247
- """
248
- if isinstance(manifest_filepaths, str):
249
- manifest_filepaths = manifest_filepaths.split(',')
250
-
251
- num_datastore_manifests = sum([is_datastore_path(f) for f in manifest_filepaths])
252
-
253
- if num_datastore_manifests > 0:
254
- # Local utility function
255
- def cache_data(manifest_filepaths, cache_audio, num_workers, max_num_workers):
256
- """Cache manifests and audio data from object store."""
257
- # Determine the number of workers to use
258
- if num_workers is None:
259
- num_workers = os.cpu_count() - 1
260
- num_workers = min(num_workers, max_num_workers)
261
-
262
- # Process each manifest file
263
- for manifest_file in manifest_filepaths:
264
- # If manifest is on a data store, then cache it.
265
- # Otherwise, nothing to do.
266
- if is_datastore_path(manifest_file):
267
- logging.info('Cache manifest file: %s', manifest_file)
268
- cached_manifest_file = DataStoreObject(manifest_file).get()
269
- logging.info('Cached at: %s', str(cached_manifest_file))
270
-
271
- if cache_audio:
272
- # Each audio file from manifest will be cached.
273
- logging.info('Cache audio from manifest file: %s', manifest_file)
274
- # Assumes that manifest is using relative paths
275
- manifest_dir = os.path.dirname(manifest_file)
276
- # Prepare all store objects
277
- audio_objects = []
278
- with open(cached_manifest_file, 'r') as f:
279
- for line in f:
280
- item = json.loads(line)
281
- store_path = os.path.join(manifest_dir, item['audio_filepath'])
282
- audio_objects.append(DataStoreObject(store_path=store_path))
283
-
284
- if num_workers is not None and num_workers > 1:
285
- logging.debug('Using multiprocessing with num_workers: %d.', num_workers)
286
- with multiprocessing.Pool(processes=num_workers) as p:
287
- result = list(
288
- tqdm(p.imap(datastore_object_get, audio_objects), total=len(audio_objects))
289
- )
290
- else:
291
- logging.debug('Using a single process.')
292
- result = []
293
- for audio_object in tqdm(audio_objects):
294
- result.append(audio_object.get() is not None)
295
-
296
- if not all(result):
297
- raise RuntimeError('Some files not downloaded successfully')
298
- logging.info('Caching complete')
299
-
300
- else:
301
- # Nothing to do here
302
- logging.debug('Manifest is not on a data store: %s', manifest_file)
303
-
304
- if torch.distributed.is_available() and torch.distributed.is_initialized():
305
- logging.debug('Distributed environment is available and initialized.')
306
-
307
- # Handle distributed environment
308
- if shared_cache is None:
309
- shared_cache = is_datastore_cache_shared()
310
-
311
- if shared_cache:
312
- logging.debug('Cache is shared among nodes, cache data on global rank zero.')
313
- is_rank_zero = is_global_rank_zero()
314
- else:
315
- logging.debug('Cache is not shared among nodes, cache data on local rank zero.')
316
- local_rank = int(os.environ.get("LOCAL_RANK", 0))
317
- is_rank_zero = local_rank == 0
318
-
319
- if is_rank_zero:
320
- logging.info('Cache data from %s rank 0', 'global' if shared_cache else 'local')
321
- cache_data(
322
- manifest_filepaths=manifest_filepaths,
323
- cache_audio=cache_audio,
324
- num_workers=num_workers,
325
- max_num_workers=max_num_workers,
326
- )
327
- logging.debug('Reached barrier')
328
- torch.distributed.barrier()
329
-
330
- elif is_global_rank_zero():
331
- # Handle non-distributed environment, e.g., if running on a single GPU
332
- logging.warning(
333
- 'Torch distributed is not initialized and caching may be prone to data race conditions. '
334
- 'Now caching data from global rank 0. If there are other ranks and they pass this '
335
- 'before rank 0, errors might result.'
336
- )
337
- cache_data(
338
- manifest_filepaths=manifest_filepaths,
339
- cache_audio=cache_audio,
340
- num_workers=num_workers,
341
- max_num_workers=max_num_workers,
342
- )
343
- else:
344
- raise RuntimeError(
345
- 'Torch distributed is not initialized and caching on nodes other than global rank zero is disabled '
346
- 'to avoid race condition between different ranks. To ensure distributed environment is '
347
- 'initialized, please update data config to use `defer_setup = True`.'
348
- )
349
-
350
-
351
- """Optionally expand / shard the list of manifests
352
- This is made to use the same notation as the sharded audio files
353
-
354
- Args:
355
- manifest_filepaths: list of manifest files (the sharded notation)
356
- shard_strategy: scatter or replicate (scatter by default)
357
- shard_manifests: bool, if False, no sharding / manifest filepath expansion will be attempted
358
- global_rank: int, the rank of this worker
359
- world_size: int, total number of workers
360
- """
361
-
362
-
363
- def shard_manifests_if_needed(
364
- manifest_filepaths: Union[str, List[str]],
365
- shard_strategy: str,
366
- shard_manifests: bool,
367
- global_rank: int,
368
- world_size: int,
369
- ):
370
- if shard_manifests:
371
- if not torch.distributed.is_available():
372
- logging.warning("Not running in torch.distributed mode. Manifest sharding not available")
373
- return manifest_filepaths
374
-
375
- if not torch.distributed.is_initialized():
376
- logging.warning(
377
- 'Manifest sharding was requested but torch.distributed is not initialized '
378
- 'Did you intend to set the defer_setup flag?'
379
- )
380
- return manifest_filepaths
381
-
382
- manifest_filepaths = expand_sharded_filepaths(
383
- sharded_filepaths=manifest_filepaths,
384
- shard_strategy=shard_strategy,
385
- world_size=world_size,
386
- global_rank=global_rank,
387
- )
388
-
389
- return manifest_filepaths
390
-
391
-
392
- class _AudioTextDataset(Dataset):
393
- """
394
- Dataset that loads tensors via a json file containing paths to audio files, transcripts, and durations (in seconds).
395
- Each new line is a different sample. Example below:
396
- {"audio_filepath": "/path/to/audio.wav", "text_filepath": "/path/to/audio.txt", "duration": 23.147}
397
- ...
398
- {"audio_filepath": "/path/to/audio.wav", "text": "the transcription", "offset": 301.75, "duration": 0.82, "utt":
399
- "utterance_id", "ctm_utt": "en_4156", "side": "A"}
400
- Args:
401
- manifest_filepath: Path to manifest json as described above. Can be comma-separated paths.
402
- parser: Str for a language specific preprocessor or a callable.
403
- sample_rate (int): Sample rate to resample loaded audio to
404
- int_values (bool): If true, load samples as 32-bit integers. Defauts to False.
405
- augmentor (nemo.collections.asr.parts.perturb.AudioAugmentor): An AudioAugmentor object used to augment loaded
406
- audio
407
- max_duration: If audio exceeds this length, do not include in dataset
408
- min_duration: If audio is less than this length, do not include in dataset
409
- max_utts: Limit number of utterances
410
- trim: whether or not to trim silence. Defaults to False
411
- bos_id: Id of beginning of sequence symbol to append if not None
412
- eos_id: Id of end of sequence symbol to append if not None
413
- pad_id: Id of pad symbol. Defaults to 0
414
- return_sample_id (bool): whether to return the sample_id as a part of each sample
415
- channel_selector (int | Iterable[int] | str): select a single channel or a subset of channels from multi-channel audio. If set to `'average'`, it performs averaging across channels. Disabled if set to `None`. Defaults to `None`. Uses zero-based indexing.
416
- manifest_parse_func: Optional function to parse manifest entries. Defaults to None.
417
- """
418
-
419
- @property
420
- def output_types(self) -> Optional[Dict[str, NeuralType]]:
421
- """Returns definitions of module output ports."""
422
- return {
423
- 'audio_signal': NeuralType(('B', 'T'), AudioSignal()),
424
- 'a_sig_length': NeuralType(tuple('B'), LengthsType()),
425
- 'transcripts': NeuralType(('B', 'T'), LabelsType()),
426
- 'transcript_length': NeuralType(tuple('B'), LengthsType()),
427
- 'sample_id': NeuralType(tuple('B'), LengthsType(), optional=True),
428
- }
429
-
430
- def __init__(
431
- self,
432
- manifest_filepath: str,
433
- parser: Union[str, Callable],
434
- sample_rate: int,
435
- int_values: bool = False,
436
- augmentor: 'nemo.collections.asr.parts.perturb.AudioAugmentor' = None,
437
- max_duration: Optional[int] = None,
438
- min_duration: Optional[int] = None,
439
- max_utts: int = 0,
440
- trim: bool = False,
441
- bos_id: Optional[int] = None,
442
- eos_id: Optional[int] = None,
443
- pad_id: int = 0,
444
- return_sample_id: bool = False,
445
- channel_selector: Optional[ChannelSelectorType] = None,
446
- manifest_parse_func: Optional[Callable] = None,
447
- ):
448
- if type(manifest_filepath) == str:
449
- manifest_filepath = manifest_filepath.split(",")
450
-
451
- # If necessary, cache manifests and audio from object store
452
- cache_datastore_manifests(manifest_filepaths=manifest_filepath, cache_audio=True)
453
-
454
- self.manifest_processor = ASRManifestProcessor(
455
- manifest_filepath=manifest_filepath,
456
- parser=parser,
457
- max_duration=max_duration,
458
- min_duration=min_duration,
459
- max_utts=max_utts,
460
- bos_id=bos_id,
461
- eos_id=eos_id,
462
- pad_id=pad_id,
463
- manifest_parse_func=manifest_parse_func,
464
- )
465
- self.featurizer = WaveformFeaturizer(sample_rate=sample_rate, int_values=int_values, augmentor=augmentor)
466
- self.trim = trim
467
- self.return_sample_id = return_sample_id
468
- self.channel_selector = channel_selector
469
-
470
- def get_manifest_sample(self, sample_id):
471
- return self.manifest_processor.collection[sample_id]
472
-
473
- def __getitem__(self, index):
474
- if isinstance(index, IterableABC):
475
- return [self._process_sample(_index) for _index in index]
476
- else:
477
- return self._process_sample(index)
478
-
479
- def _process_sample(self, index):
480
- sample = self.manifest_processor.collection[index]
481
- offset = sample.offset
482
-
483
- if offset is None:
484
- offset = 0
485
-
486
- features = self.featurizer.process(
487
- sample.audio_file,
488
- offset=offset,
489
- duration=sample.duration,
490
- trim=self.trim,
491
- orig_sr=sample.orig_sr,
492
- channel_selector=self.channel_selector,
493
- )
494
- f, fl = features, torch.tensor(features.shape[0]).long()
495
-
496
- t, tl = self.manifest_processor.process_text_by_sample(sample=sample)
497
-
498
- if self.return_sample_id:
499
- output = f, fl, torch.tensor(t).long(), torch.tensor(tl).long(), index
500
- else:
501
- output = f, fl, torch.tensor(t).long(), torch.tensor(tl).long()
502
-
503
- return output
504
-
505
- def __len__(self):
506
- return len(self.manifest_processor.collection)
507
-
508
- def _collate_fn(self, batch):
509
- return _speech_collate_fn(batch, pad_id=self.manifest_processor.pad_id)
510
-
511
-
512
- class AudioToCharDataset(_AudioTextDataset):
513
- """
514
- Dataset that loads tensors via a json file containing paths to audio
515
- files, transcripts, and durations (in seconds). Each new line is a
516
- different sample. Example below:
517
- {"audio_filepath": "/path/to/audio.wav", "text_filepath":
518
- "/path/to/audio.txt", "duration": 23.147}
519
- ...
520
- {"audio_filepath": "/path/to/audio.wav", "text": "the
521
- transcription", "offset": 301.75, "duration": 0.82, "utt":
522
- "utterance_id", "ctm_utt": "en_4156", "side": "A"}
523
-
524
- Args:
525
- manifest_filepath: Path to manifest json as described above. Can
526
- be comma-separated paths.
527
- labels: String containing all the possible characters to map to
528
- sample_rate (int): Sample rate to resample loaded audio to
529
- int_values (bool): If true, load samples as 32-bit integers. Defauts to False.
530
- augmentor (nemo.collections.asr.parts.perturb.AudioAugmentor): An AudioAugmentor
531
- object used to augment loaded audio
532
- max_duration: If audio exceeds this length, do not include in dataset
533
- min_duration: If audio is less than this length, do not include
534
- in dataset
535
- max_utts: Limit number of utterances
536
- blank_index: blank character index, default = -1
537
- unk_index: unk_character index, default = -1
538
- normalize: whether to normalize transcript text (default): True
539
- bos_id: Id of beginning of sequence symbol to append if not None
540
- eos_id: Id of end of sequence symbol to append if not None
541
- return_sample_id (bool): whether to return the sample_id as a part of each sample
542
- channel_selector (int | Iterable[int] | str): select a single channel or a subset of channels from multi-channel audio. If set to `'average'`, it performs averaging across channels. Disabled if set to `None`. Defaults to `None`. Uses zero-based indexing.
543
- manifest_parse_func: Optional function to parse manifest entries. Defaults to None.
544
- """
545
-
546
- @property
547
- def output_types(self) -> Optional[Dict[str, NeuralType]]:
548
- """Returns definitions of module output ports."""
549
- return {
550
- 'audio_signal': NeuralType(('B', 'T'), AudioSignal()),
551
- 'a_sig_length': NeuralType(tuple('B'), LengthsType()),
552
- 'transcripts': NeuralType(('B', 'T'), LabelsType()),
553
- 'transcript_length': NeuralType(tuple('B'), LengthsType()),
554
- 'sample_id': NeuralType(tuple('B'), LengthsType(), optional=True),
555
- }
556
-
557
- def __init__(
558
- self,
559
- manifest_filepath: str,
560
- labels: Union[str, List[str]],
561
- sample_rate: int,
562
- int_values: bool = False,
563
- augmentor: 'nemo.collections.asr.parts.perturb.AudioAugmentor' = None,
564
- max_duration: Optional[float] = None,
565
- min_duration: Optional[float] = None,
566
- max_utts: int = 0,
567
- blank_index: int = -1,
568
- unk_index: int = -1,
569
- normalize: bool = True,
570
- trim: bool = False,
571
- bos_id: Optional[int] = None,
572
- eos_id: Optional[int] = None,
573
- pad_id: int = 0,
574
- parser: Union[str, Callable] = 'en',
575
- return_sample_id: bool = False,
576
- channel_selector: Optional[ChannelSelectorType] = None,
577
- manifest_parse_func: Optional[Callable] = None,
578
- ):
579
- self.labels = labels
580
-
581
- parser = parsers.make_parser(
582
- labels=labels, name=parser, unk_id=unk_index, blank_id=blank_index, do_normalize=normalize
583
- )
584
-
585
- super().__init__(
586
- manifest_filepath=manifest_filepath,
587
- parser=parser,
588
- sample_rate=sample_rate,
589
- int_values=int_values,
590
- augmentor=augmentor,
591
- max_duration=max_duration,
592
- min_duration=min_duration,
593
- max_utts=max_utts,
594
- trim=trim,
595
- bos_id=bos_id,
596
- eos_id=eos_id,
597
- pad_id=pad_id,
598
- return_sample_id=return_sample_id,
599
- channel_selector=channel_selector,
600
- manifest_parse_func=manifest_parse_func,
601
- )
602
-
603
-
604
- class AudioToBPEDataset(_AudioTextDataset):
605
- """
606
- Dataset that loads tensors via a json file containing paths to audio
607
- files, transcripts, and durations (in seconds). Each new line is a
608
- different sample. Example below:
609
- {"audio_filepath": "/path/to/audio.wav", "text_filepath":
610
- "/path/to/audio.txt", "duration": 23.147}
611
- ...
612
- {"audio_filepath": "/path/to/audio.wav", "text": "the
613
- transcription", "offset": 301.75, "duration": 0.82, "utt":
614
- "utterance_id", "ctm_utt": "en_4156", "side": "A"}
615
-
616
- In practice, the dataset and manifest used for character encoding and byte pair encoding
617
- are exactly the same. The only difference lies in how the dataset tokenizes the text in
618
- the manifest.
619
-
620
- Args:
621
- manifest_filepath: Path to manifest json as described above. Can
622
- be comma-separated paths.
623
- tokenizer: A subclass of the Tokenizer wrapper found in the common collection,
624
- nemo.collections.common.tokenizers.TokenizerSpec. ASR Models support a subset of
625
- all available tokenizers.
626
- sample_rate (int): Sample rate to resample loaded audio to
627
- int_values (bool): If true, load samples as 32-bit integers. Defauts to False.
628
- augmentor (nemo.collections.asr.parts.perturb.AudioAugmentor): An AudioAugmentor
629
- object used to augment loaded audio
630
- max_duration: If audio exceeds this length, do not include in dataset
631
- min_duration: If audio is less than this length, do not include
632
- in dataset
633
- max_utts: Limit number of utterances
634
- trim: Whether to trim silence segments
635
- use_start_end_token: Boolean which dictates whether to add [BOS] and [EOS]
636
- tokens to beginning and ending of speech respectively.
637
- return_sample_id (bool): whether to return the sample_id as a part of each sample
638
- channel_selector (int | Iterable[int] | str): select a single channel or a subset of channels from multi-channel audio. If set to `'average'`, it performs averaging across channels. Disabled if set to `None`. Defaults to `None`. Uses zero-based indexing.
639
- manifest_parse_func: Optional function to parse manifest entries. Defaults to None.
640
- """
641
-
642
- @property
643
- def output_types(self) -> Optional[Dict[str, NeuralType]]:
644
- """Returns definitions of module output ports."""
645
- return {
646
- 'audio_signal': NeuralType(('B', 'T'), AudioSignal()),
647
- 'a_sig_length': NeuralType(tuple('B'), LengthsType()),
648
- 'transcripts': NeuralType(('B', 'T'), LabelsType()),
649
- 'transcript_length': NeuralType(tuple('B'), LengthsType()),
650
- 'sample_id': NeuralType(tuple('B'), LengthsType(), optional=True),
651
- }
652
-
653
- def __init__(
654
- self,
655
- manifest_filepath: str,
656
- tokenizer: 'nemo.collections.common.tokenizers.TokenizerSpec',
657
- sample_rate: int,
658
- int_values: bool = False,
659
- augmentor: 'nemo.collections.asr.parts.perturb.AudioAugmentor' = None,
660
- max_duration: Optional[int] = None,
661
- min_duration: Optional[int] = None,
662
- max_utts: int = 0,
663
- trim: bool = False,
664
- use_start_end_token: bool = True,
665
- return_sample_id: bool = False,
666
- channel_selector: Optional[ChannelSelectorType] = None,
667
- manifest_parse_func: Optional[Callable] = None,
668
- ):
669
- if use_start_end_token and hasattr(tokenizer, "bos_id") and tokenizer.bos_id > 0:
670
- bos_id = tokenizer.bos_id
671
- else:
672
- bos_id = None
673
-
674
- if use_start_end_token and hasattr(tokenizer, "eos_id") and tokenizer.eos_id > 0:
675
- eos_id = tokenizer.eos_id
676
- else:
677
- eos_id = None
678
-
679
- if hasattr(tokenizer, "pad_id") and tokenizer.pad_id > 0:
680
- pad_id = tokenizer.pad_id
681
- else:
682
- pad_id = 0
683
-
684
- class TokenizerWrapper:
685
- def __init__(self, tokenizer):
686
- if isinstance(tokenizer, tokenizers.aggregate_tokenizer.AggregateTokenizer):
687
- self.is_aggregate = True
688
- else:
689
- self.is_aggregate = False
690
- self._tokenizer = tokenizer
691
-
692
- def __call__(self, *args):
693
- if isinstance(args[0], List) and self.is_aggregate:
694
- t = []
695
- for span in args[0]:
696
- t.extend(self._tokenizer.text_to_ids(span['str'], span['lang']))
697
- return t
698
-
699
- t = self._tokenizer.text_to_ids(*args)
700
- return t
701
-
702
- super().__init__(
703
- manifest_filepath=manifest_filepath,
704
- parser=TokenizerWrapper(tokenizer),
705
- sample_rate=sample_rate,
706
- int_values=int_values,
707
- augmentor=augmentor,
708
- max_duration=max_duration,
709
- min_duration=min_duration,
710
- max_utts=max_utts,
711
- bos_id=bos_id,
712
- eos_id=eos_id,
713
- pad_id=pad_id,
714
- trim=trim,
715
- return_sample_id=return_sample_id,
716
- channel_selector=channel_selector,
717
- manifest_parse_func=manifest_parse_func,
718
- )
719
-
720
-
721
- @deprecated(
722
- explanation='Webdataset support will be removed in v2.1.0 versions, please use LhotseSpeechToTextBpeDataset class instead'
723
- )
724
- class _TarredAudioToTextDataset(IterableDataset):
725
- """
726
- A similar Dataset to the AudioToCharDataset/AudioToBPEDataset, but which loads tarred audio files.
727
-
728
- Accepts a single comma-separated JSON manifest file (in the same style as for the AudioToCharDataset/AudioToBPEDataset),
729
- as well as the path(s) to the tarball(s) containing the wav files. Each line of the manifest should
730
- contain the information for one audio file, including at least the transcript and name of the audio
731
- file within the tarball.
732
-
733
- Valid formats for the audio_tar_filepaths argument include:
734
- (1) a single string that can be brace-expanded, e.g. 'path/to/audio.tar' or 'path/to/audio_{1..100}.tar.gz', or
735
- (2) a list of file paths that will not be brace-expanded, e.g. ['audio_1.tar', 'audio_2.tar', ...].
736
-
737
- Note: For brace expansion in (1), there may be cases where `{x..y}` syntax cannot be used due to shell interference.
738
- This occurs most commonly inside SLURM scripts. Therefore we provide a few equivalent replacements.
739
- Supported opening braces - { <=> (, [, < and the special tag _OP_.
740
- Supported closing braces - } <=> ), ], > and the special tag _CL_.
741
- For SLURM based tasks, we suggest the use of the special tags for ease of use.
742
-
743
- See the WebDataset documentation for more information about accepted data and input formats.
744
-
745
- If using multiple workers the number of shards should be divisible by world_size to ensure an
746
- even split among workers. If it is not divisible, logging will give a warning but training will proceed.
747
- In addition, if using mutiprocessing, each shard MUST HAVE THE SAME NUMBER OF ENTRIES after filtering
748
- is applied. We currently do not check for this, but your program may hang if the shards are uneven!
749
-
750
- Notice that a few arguments are different from the AudioToCharDataset; for example, shuffle (bool) has been
751
- replaced by shuffle_n (int).
752
-
753
- Additionally, please note that the len() of this DataLayer is assumed to be the length of the manifest
754
- after filtering. An incorrect manifest length may lead to some DataLoader issues down the line.
755
-
756
- Args:
757
- audio_tar_filepaths: Either a list of audio tarball filepaths, or a
758
- string (can be brace-expandable).
759
- manifest_filepath (str): Path to the manifest.
760
- parser (callable): A callable which is used to pre-process the text output.
761
- sample_rate (int): Sample rate to resample loaded audio to
762
- int_values (bool): If true, load samples as 32-bit integers. Defauts to False.
763
- augmentor (nemo.collections.asr.parts.perturb.AudioAugmentor): An AudioAugmentor
764
- object used to augment loaded audio
765
- shuffle_n (int): How many samples to look ahead and load to be shuffled.
766
- See WebDataset documentation for more details.
767
- Defaults to 0.
768
- min_duration (float): Dataset parameter.
769
- All training files which have a duration less than min_duration
770
- are dropped. Note: Duration is read from the manifest JSON.
771
- Defaults to 0.1.
772
- max_duration (float): Dataset parameter.
773
- All training files which have a duration more than max_duration
774
- are dropped. Note: Duration is read from the manifest JSON.
775
- Defaults to None.
776
- blank_index (int): Blank character index, defaults to -1.
777
- unk_index (int): Unknown character index, defaults to -1.
778
- normalize (bool): Dataset parameter.
779
- Whether to use automatic text cleaning.
780
- It is highly recommended to manually clean text for best results.
781
- Defaults to True.
782
- trim (bool): Whether to use trim silence from beginning and end
783
- of audio signal using librosa.effects.trim().
784
- Defaults to False.
785
- bos_id (id): Dataset parameter.
786
- Beginning of string symbol id used for seq2seq models.
787
- Defaults to None.
788
- eos_id (id): Dataset parameter.
789
- End of string symbol id used for seq2seq models.
790
- Defaults to None.
791
- pad_id (id): Token used to pad when collating samples in batches.
792
- If this is None, pads using 0s.
793
- Defaults to None.
794
- shard_strategy (str): Tarred dataset shard distribution strategy chosen as a str value during ddp.
795
- - `scatter`: The default shard strategy applied by WebDataset, where each node gets
796
- a unique set of shards, which are permanently pre-allocated and never changed at runtime.
797
- - `replicate`: Optional shard strategy, where each node gets all of the set of shards
798
- available in the tarred dataset, which are permanently pre-allocated and never changed at runtime.
799
- The benefit of replication is that it allows each node to sample data points from the entire
800
- dataset independently of other nodes, and reduces dependence on value of `shuffle_n`.
801
-
802
- .. warning::
803
- Replicated strategy allows every node to sample the entire set of available tarfiles,
804
- and therefore more than one node may sample the same tarfile, and even sample the same
805
- data points! As such, there is no assured guarantee that all samples in the dataset will be
806
- sampled at least once during 1 epoch. Scattered strategy, on the other hand, on specific
807
- occasions (when the number of shards is not divisible with ``world_size``), will not sample
808
- the entire dataset. For these reasons it is not advisable to use tarred datasets as validation
809
- or test datasets.
810
- shard_manifests (bool): Whether or not to try / shard manifests. Defaults to False.
811
- global_rank (int): Worker rank, used for partitioning shards. Defaults to 0.
812
- world_size (int): Total number of processes, used for partitioning shards. Defaults to 0.
813
- return_sample_id (bool): whether to return the sample_id as a part of each sample
814
- manifest_parse_func: Optional function to parse manifest entries. Defaults to None.
815
- """
816
-
817
- def __init__(
818
- self,
819
- audio_tar_filepaths: Union[str, List[str]],
820
- manifest_filepath: str,
821
- parser: Callable,
822
- sample_rate: int,
823
- int_values: bool = False,
824
- augmentor: Optional['nemo.collections.asr.parts.perturb.AudioAugmentor'] = None,
825
- shuffle_n: int = 0,
826
- min_duration: Optional[float] = None,
827
- max_duration: Optional[float] = None,
828
- trim: bool = False,
829
- bos_id: Optional[int] = None,
830
- eos_id: Optional[int] = None,
831
- pad_id: int = 0,
832
- shard_strategy: str = "scatter",
833
- shard_manifests: bool = False,
834
- global_rank: int = 0,
835
- world_size: int = 0,
836
- return_sample_id: bool = False,
837
- manifest_parse_func: Optional[Callable] = None,
838
- ):
839
- self.shard_manifests = shard_manifests
840
-
841
- # Shard manifests if necessary and possible and then expand the paths
842
- manifest_filepath = shard_manifests_if_needed(
843
- shard_manifests=shard_manifests,
844
- shard_strategy=shard_strategy,
845
- manifest_filepaths=manifest_filepath,
846
- world_size=world_size,
847
- global_rank=global_rank,
848
- )
849
-
850
- # If necessary, cache manifests from object store
851
- cache_datastore_manifests(manifest_filepaths=manifest_filepath)
852
-
853
- self.manifest_processor = ASRManifestProcessor(
854
- manifest_filepath=manifest_filepath,
855
- parser=parser,
856
- max_duration=max_duration,
857
- min_duration=min_duration,
858
- max_utts=0,
859
- bos_id=bos_id,
860
- eos_id=eos_id,
861
- pad_id=pad_id,
862
- index_by_file_id=True, # Must set this so the manifest lines can be indexed by file ID
863
- manifest_parse_func=manifest_parse_func,
864
- )
865
-
866
- self.len = self._compute_len()
867
-
868
- self.featurizer = WaveformFeaturizer(sample_rate=sample_rate, int_values=int_values, augmentor=augmentor)
869
- self.trim = trim
870
- self.eos_id = eos_id
871
- self.bos_id = bos_id
872
- self.pad_id = pad_id
873
- self.return_sample_id = return_sample_id
874
-
875
- audio_tar_filepaths = expand_sharded_filepaths(
876
- sharded_filepaths=audio_tar_filepaths,
877
- shard_strategy=shard_strategy,
878
- world_size=world_size,
879
- global_rank=global_rank,
880
- )
881
-
882
- # Put together WebDataset pipeline
883
- self._dataset = wds.DataPipeline(
884
- wds.SimpleShardList(urls=audio_tar_filepaths),
885
- webdataset_split_by_workers,
886
- wds.shuffle(shuffle_n),
887
- wds.tarfile_to_samples(),
888
- wds.rename(audio=VALID_FILE_FORMATS, key='__key__'),
889
- wds.to_tuple('audio', 'key'),
890
- self._filter,
891
- self._loop_offsets,
892
- wds.map(self._build_sample),
893
- )
894
-
895
- def _filter(self, iterator):
896
- """This function is used to remove samples that have been filtered out by ASRAudioText already.
897
- Otherwise, we would get a KeyError as _build_sample attempts to find the manifest entry for a sample
898
- that was filtered out (e.g. for duration).
899
- Note that if using multi-GPU training, filtering may lead to an imbalance in samples in each shard,
900
- which may make your code hang as one process will finish before the other.
901
- """
902
-
903
- class TarredAudioFilter:
904
- def __init__(self, collection):
905
- self.iterator = iterator
906
- self.collection = collection
907
-
908
- def __iter__(self):
909
- return self
910
-
911
- def __next__(self):
912
- while True:
913
- audio_bytes, audio_filename = next(self.iterator)
914
- file_id, _ = os.path.splitext(os.path.basename(audio_filename))
915
- if file_id in self.collection.mapping:
916
- return audio_bytes, audio_filename
917
-
918
- return TarredAudioFilter(self.manifest_processor.collection)
919
-
920
- def _loop_offsets(self, iterator):
921
- """This function is used to iterate through utterances with different offsets for each file."""
922
-
923
- class TarredAudioLoopOffsets:
924
- def __init__(self, collection):
925
- self.iterator = iterator
926
- self.collection = collection
927
- self.current_fn = None
928
- self.current_bytes = None
929
- self.offset_id = 0
930
-
931
- def __iter__(self):
932
- return self
933
-
934
- def __next__(self):
935
- if self.current_fn is None:
936
- self.current_bytes, self.current_fn = next(self.iterator)
937
- self.offset_id = 0
938
- else:
939
- offset_list = self.collection.mapping[self.current_fn]
940
- if len(offset_list) == self.offset_id + 1:
941
- self.current_bytes, self.current_fn = next(self.iterator)
942
- self.offset_id = 0
943
- else:
944
- self.offset_id += 1
945
-
946
- return self.current_bytes, self.current_fn, self.offset_id
947
-
948
- return TarredAudioLoopOffsets(self.manifest_processor.collection)
949
-
950
- def _collate_fn(self, batch):
951
- return _speech_collate_fn(batch, self.pad_id)
952
-
953
- def _build_sample(self, tup):
954
- """Builds the training sample by combining the data from the WebDataset with the manifest info."""
955
- audio_bytes, audio_filename, offset_id = tup
956
-
957
- # Grab manifest entry from self.manifest_preprocessor.collection
958
- file_id, _ = os.path.splitext(os.path.basename(audio_filename))
959
-
960
- manifest_idx = self.manifest_processor.collection.mapping[file_id][offset_id]
961
- manifest_entry = self.manifest_processor.collection[manifest_idx]
962
-
963
- offset = manifest_entry.offset
964
- if offset is None:
965
- offset = 0
966
-
967
- # Convert audio bytes to IO stream for processing (for SoundFile to read)
968
- audio_filestream = io.BytesIO(audio_bytes)
969
- features = self.featurizer.process(
970
- audio_filestream,
971
- offset=offset,
972
- duration=manifest_entry.duration,
973
- trim=self.trim,
974
- orig_sr=manifest_entry.orig_sr,
975
- )
976
- audio_filestream.close()
977
-
978
- # Audio features
979
- f, fl = features, torch.tensor(features.shape[0]).long()
980
-
981
- # Text features
982
- t, tl = manifest_entry.text_tokens, len(manifest_entry.text_tokens)
983
-
984
- self.manifest_processor.process_text_by_sample(sample=manifest_entry)
985
-
986
- if self.bos_id is not None:
987
- t = [self.bos_id] + t
988
- tl += 1
989
- if self.eos_id is not None:
990
- t = t + [self.eos_id]
991
- tl += 1
992
-
993
- if self.return_sample_id:
994
- return f, fl, torch.tensor(t).long(), torch.tensor(tl).long(), manifest_idx
995
- else:
996
- return f, fl, torch.tensor(t).long(), torch.tensor(tl).long()
997
-
998
- def get_manifest_sample(self, sample_id):
999
- return self.manifest_processor.collection[sample_id]
1000
-
1001
- def __iter__(self):
1002
- return self._dataset.__iter__()
1003
-
1004
- def _compute_len(self):
1005
- if self.shard_manifests and torch.distributed.is_available() and torch.distributed.is_initialized():
1006
- my_len = torch.tensor(len(self.manifest_processor.collection), dtype=torch.int32).cuda()
1007
- torch.distributed.all_reduce(my_len)
1008
- my_len = my_len.int()
1009
- logging.info(f'Sharded manifests: Total length: {my_len}')
1010
- else:
1011
- my_len = len(self.manifest_processor.collection)
1012
-
1013
- return my_len
1014
-
1015
- def __len__(self):
1016
- return self.len
1017
-
1018
-
1019
- class TarredAudioToCharDataset(_TarredAudioToTextDataset):
1020
- """
1021
- A similar Dataset to the AudioToCharDataset, but which loads tarred audio files.
1022
-
1023
- Accepts a single comma-separated JSON manifest file (in the same style as for the AudioToCharDataset),
1024
- as well as the path(s) to the tarball(s) containing the wav files. Each line of the manifest should
1025
- contain the information for one audio file, including at least the transcript and name of the audio
1026
- file within the tarball.
1027
-
1028
- Valid formats for the audio_tar_filepaths argument include:
1029
- (1) a single string that can be brace-expanded, e.g. 'path/to/audio.tar' or 'path/to/audio_{1..100}.tar.gz', or
1030
- (2) a list of file paths that will not be brace-expanded, e.g. ['audio_1.tar', 'audio_2.tar', ...].
1031
-
1032
- See the WebDataset documentation for more information about accepted data and input formats.
1033
-
1034
- If using multiple workers the number of shards should be divisible by world_size to ensure an
1035
- even split among workers. If it is not divisible, logging will give a warning but training will proceed.
1036
- In addition, if using mutiprocessing, each shard MUST HAVE THE SAME NUMBER OF ENTRIES after filtering
1037
- is applied. We currently do not check for this, but your program may hang if the shards are uneven!
1038
-
1039
- Notice that a few arguments are different from the AudioToCharDataset; for example, shuffle (bool) has been
1040
- replaced by shuffle_n (int).
1041
-
1042
- Additionally, please note that the len() of this DataLayer is assumed to be the length of the manifest
1043
- after filtering. An incorrect manifest length may lead to some DataLoader issues down the line.
1044
-
1045
- Args:
1046
- audio_tar_filepaths: Either a list of audio tarball filepaths, or a
1047
- string (can be brace-expandable).
1048
- manifest_filepath (str): Path to the manifest.
1049
- labels (list): List of characters that can be output by the ASR model.
1050
- For Jasper, this is the 28 character set {a-z '}. The CTC blank
1051
- symbol is automatically added later for models using ctc.
1052
- sample_rate (int): Sample rate to resample loaded audio to
1053
- int_values (bool): If true, load samples as 32-bit integers. Defauts to False.
1054
- augmentor (nemo.collections.asr.parts.perturb.AudioAugmentor): An AudioAugmentor
1055
- object used to augment loaded audio
1056
- shuffle_n (int): How many samples to look ahead and load to be shuffled.
1057
- See WebDataset documentation for more details.
1058
- Defaults to 0.
1059
- min_duration (float): Dataset parameter.
1060
- All training files which have a duration less than min_duration
1061
- are dropped. Note: Duration is read from the manifest JSON.
1062
- Defaults to 0.1.
1063
- max_duration (float): Dataset parameter.
1064
- All training files which have a duration more than max_duration
1065
- are dropped. Note: Duration is read from the manifest JSON.
1066
- Defaults to None.
1067
- blank_index (int): Blank character index, defaults to -1.
1068
- unk_index (int): Unknown character index, defaults to -1.
1069
- normalize (bool): Dataset parameter.
1070
- Whether to use automatic text cleaning.
1071
- It is highly recommended to manually clean text for best results.
1072
- Defaults to True.
1073
- trim (bool): Whether to use trim silence from beginning and end
1074
- of audio signal using librosa.effects.trim().
1075
- Defaults to False.
1076
- bos_id (id): Dataset parameter.
1077
- Beginning of string symbol id used for seq2seq models.
1078
- Defaults to None.
1079
- eos_id (id): Dataset parameter.
1080
- End of string symbol id used for seq2seq models.
1081
- Defaults to None.
1082
- pad_id (id): Token used to pad when collating samples in batches.
1083
- If this is None, pads using 0s.
1084
- Defaults to None.
1085
- shard_strategy (str): Tarred dataset shard distribution strategy chosen as a str value during ddp.
1086
-
1087
- - `scatter`: The default shard strategy applied by WebDataset, where each node gets
1088
- a unique set of shards, which are permanently pre-allocated and never changed at runtime.
1089
- - `replicate`: Optional shard strategy, where each node gets all of the set of shards
1090
- available in the tarred dataset, which are permanently pre-allocated and never changed at runtime.
1091
- The benefit of replication is that it allows each node to sample data points from the entire
1092
- dataset independently of other nodes, and reduces dependence on value of `shuffle_n`.
1093
-
1094
- .. warning::
1095
-
1096
- Replicated strategy allows every node to sample the entire set of available tarfiles,
1097
- and therefore more than one node may sample the same tarfile, and even sample the same
1098
- data points! As such, there is no assured guarantee that all samples in the dataset will be
1099
- sampled at least once during 1 epoch. Scattered strategy, on the other hand, on specific
1100
- occasions (when the number of shards is not divisible with ``world_size``), will not sample
1101
- the entire dataset. For these reasons it is not advisable to use tarred datasets as validation
1102
- or test datasets.
1103
-
1104
- global_rank (int): Worker rank, used for partitioning shards. Defaults to 0.
1105
- world_size (int): Total number of processes, used for partitioning shards. Defaults to 0.
1106
- return_sample_id (bool): whether to return the sample_id as a part of each sample
1107
- manifest_parse_func: Optional function to parse manifest entries. Defaults to None.
1108
- """
1109
-
1110
- def __init__(
1111
- self,
1112
- audio_tar_filepaths: Union[str, List[str]],
1113
- manifest_filepath: str,
1114
- labels: List[str],
1115
- sample_rate: int,
1116
- int_values: bool = False,
1117
- augmentor: Optional['nemo.collections.asr.parts.perturb.AudioAugmentor'] = None,
1118
- shuffle_n: int = 0,
1119
- min_duration: Optional[float] = None,
1120
- max_duration: Optional[float] = None,
1121
- blank_index: int = -1,
1122
- unk_index: int = -1,
1123
- normalize: bool = True,
1124
- trim: bool = False,
1125
- bos_id: Optional[int] = None,
1126
- eos_id: Optional[int] = None,
1127
- parser: Optional[str] = 'en',
1128
- pad_id: int = 0,
1129
- shard_strategy: str = "scatter",
1130
- shard_manifests: bool = False,
1131
- global_rank: int = 0,
1132
- world_size: int = 0,
1133
- return_sample_id: bool = False,
1134
- manifest_parse_func: Optional[Callable] = None,
1135
- ):
1136
- self.labels = labels
1137
-
1138
- parser = parsers.make_parser(
1139
- labels=labels, name=parser, unk_id=unk_index, blank_id=blank_index, do_normalize=normalize
1140
- )
1141
-
1142
- super().__init__(
1143
- audio_tar_filepaths=audio_tar_filepaths,
1144
- manifest_filepath=manifest_filepath,
1145
- parser=parser,
1146
- sample_rate=sample_rate,
1147
- int_values=int_values,
1148
- augmentor=augmentor,
1149
- shuffle_n=shuffle_n,
1150
- min_duration=min_duration,
1151
- max_duration=max_duration,
1152
- trim=trim,
1153
- bos_id=bos_id,
1154
- eos_id=eos_id,
1155
- pad_id=pad_id,
1156
- shard_strategy=shard_strategy,
1157
- shard_manifests=shard_manifests,
1158
- global_rank=global_rank,
1159
- world_size=world_size,
1160
- return_sample_id=return_sample_id,
1161
- manifest_parse_func=manifest_parse_func,
1162
- )
1163
-
1164
-
1165
- class TarredAudioToBPEDataset(_TarredAudioToTextDataset):
1166
- """
1167
- A similar Dataset to the AudioToBPEDataset, but which loads tarred audio files.
1168
-
1169
- Accepts a single comma-separated JSON manifest file (in the same style as for the AudioToBPEDataset),
1170
- as well as the path(s) to the tarball(s) containing the wav files. Each line of the manifest should
1171
- contain the information for one audio file, including at least the transcript and name of the audio
1172
- file within the tarball.
1173
-
1174
- Valid formats for the audio_tar_filepaths argument include:
1175
- (1) a single string that can be brace-expanded, e.g. 'path/to/audio.tar' or 'path/to/audio_{1..100}.tar.gz', or
1176
- (2) a list of file paths that will not be brace-expanded, e.g. ['audio_1.tar', 'audio_2.tar', ...].
1177
-
1178
- See the WebDataset documentation for more information about accepted data and input formats.
1179
-
1180
- If using multiple workers the number of shards should be divisible by world_size to ensure an
1181
- even split among workers. If it is not divisible, logging will give a warning but training will proceed.
1182
- In addition, if using mutiprocessing, each shard MUST HAVE THE SAME NUMBER OF ENTRIES after filtering
1183
- is applied. We currently do not check for this, but your program may hang if the shards are uneven!
1184
-
1185
- Notice that a few arguments are different from the AudioToBPEDataset; for example, shuffle (bool) has been
1186
- replaced by shuffle_n (int).
1187
-
1188
- Additionally, please note that the len() of this DataLayer is assumed to be the length of the manifest
1189
- after filtering. An incorrect manifest length may lead to some DataLoader issues down the line.
1190
-
1191
- Args:
1192
- audio_tar_filepaths: Either a list of audio tarball filepaths, or a
1193
- string (can be brace-expandable).
1194
- manifest_filepath (str): Path to the manifest.
1195
- tokenizer (TokenizerSpec): Either a Word Piece Encoding tokenizer (BERT),
1196
- or a Sentence Piece Encoding tokenizer (BPE). The CTC blank
1197
- symbol is automatically added later for models using ctc.
1198
- sample_rate (int): Sample rate to resample loaded audio to
1199
- int_values (bool): If true, load samples as 32-bit integers. Defauts to False.
1200
- augmentor (nemo.collections.asr.parts.perturb.AudioAugmentor): An AudioAugmentor
1201
- object used to augment loaded audio
1202
- shuffle_n (int): How many samples to look ahead and load to be shuffled.
1203
- See WebDataset documentation for more details.
1204
- Defaults to 0.
1205
- min_duration (float): Dataset parameter.
1206
- All training files which have a duration less than min_duration
1207
- are dropped. Note: Duration is read from the manifest JSON.
1208
- Defaults to 0.1.
1209
- max_duration (float): Dataset parameter.
1210
- All training files which have a duration more than max_duration
1211
- are dropped. Note: Duration is read from the manifest JSON.
1212
- Defaults to None.
1213
- trim (bool): Whether to use trim silence from beginning and end
1214
- of audio signal using librosa.effects.trim().
1215
- Defaults to False.
1216
- use_start_end_token: Boolean which dictates whether to add [BOS] and [EOS]
1217
- tokens to beginning and ending of speech respectively.
1218
- pad_id (id): Token used to pad when collating samples in batches.
1219
- If this is None, pads using 0s.
1220
- Defaults to None.
1221
- shard_strategy (str): Tarred dataset shard distribution strategy chosen as a str value during ddp.
1222
-
1223
- - `scatter`: The default shard strategy applied by WebDataset, where each node gets
1224
- a unique set of shards, which are permanently pre-allocated and never changed at runtime.
1225
- - `replicate`: Optional shard strategy, where each node gets all of the set of shards
1226
- available in the tarred dataset, which are permanently pre-allocated and never changed at runtime.
1227
- The benefit of replication is that it allows each node to sample data points from the entire
1228
- dataset independently of other nodes, and reduces dependence on value of `shuffle_n`.
1229
-
1230
- .. warning::
1231
-
1232
- Replicated strategy allows every node to sample the entire set of available tarfiles,
1233
- and therefore more than one node may sample the same tarfile, and even sample the same
1234
- data points! As such, there is no assured guarantee that all samples in the dataset will be
1235
- sampled at least once during 1 epoch. Scattered strategy, on the other hand, on specific
1236
- occasions (when the number of shards is not divisible with ``world_size``), will not sample
1237
- the entire dataset. For these reasons it is not advisable to use tarred datasets as validation
1238
- or test datasets.
1239
-
1240
- global_rank (int): Worker rank, used for partitioning shards. Defaults to 0.
1241
- world_size (int): Total number of processes, used for partitioning shards. Defaults to 0.
1242
- return_sample_id (bool): whether to return the sample_id as a part of each sample
1243
- manifest_parse_func: Optional function to parse manifest entries. Defaults to None.
1244
- """
1245
-
1246
- def __init__(
1247
- self,
1248
- audio_tar_filepaths: Union[str, List[str]],
1249
- manifest_filepath: str,
1250
- tokenizer: 'nemo.collections.common.tokenizers.TokenizerSpec',
1251
- sample_rate: int,
1252
- int_values: bool = False,
1253
- augmentor: Optional['nemo.collections.asr.parts.perturb.AudioAugmentor'] = None,
1254
- shuffle_n: int = 0,
1255
- min_duration: Optional[float] = None,
1256
- max_duration: Optional[float] = None,
1257
- trim: bool = False,
1258
- use_start_end_token: bool = True,
1259
- shard_strategy: str = "scatter",
1260
- shard_manifests: bool = False,
1261
- global_rank: int = 0,
1262
- world_size: int = 0,
1263
- return_sample_id: bool = False,
1264
- manifest_parse_func: Optional[Callable] = None,
1265
- ):
1266
- if use_start_end_token and hasattr(tokenizer, "bos_id") and tokenizer.bos_id > 0:
1267
- bos_id = tokenizer.bos_id
1268
- else:
1269
- bos_id = None
1270
-
1271
- if use_start_end_token and hasattr(tokenizer, "eos_id") and tokenizer.eos_id > 0:
1272
- eos_id = tokenizer.eos_id
1273
- else:
1274
- eos_id = None
1275
-
1276
- if hasattr(tokenizer, "pad_id") and tokenizer.pad_id > 0:
1277
- pad_id = tokenizer.pad_id
1278
- else:
1279
- pad_id = 0
1280
-
1281
- class TokenizerWrapper:
1282
- def __init__(self, tokenizer):
1283
- if isinstance(tokenizer, tokenizers.aggregate_tokenizer.AggregateTokenizer):
1284
- self.is_aggregate = True
1285
- else:
1286
- self.is_aggregate = False
1287
- self._tokenizer = tokenizer
1288
-
1289
- def __call__(self, *args):
1290
- if isinstance(args[0], List) and self.is_aggregate:
1291
- t = []
1292
- for span in args[0]:
1293
- t.extend(self._tokenizer.text_to_ids(span['str'], span['lang']))
1294
- return t
1295
-
1296
- t = self._tokenizer.text_to_ids(*args)
1297
- return t
1298
-
1299
- super().__init__(
1300
- audio_tar_filepaths=audio_tar_filepaths,
1301
- manifest_filepath=manifest_filepath,
1302
- parser=TokenizerWrapper(tokenizer),
1303
- sample_rate=sample_rate,
1304
- int_values=int_values,
1305
- augmentor=augmentor,
1306
- shuffle_n=shuffle_n,
1307
- min_duration=min_duration,
1308
- max_duration=max_duration,
1309
- trim=trim,
1310
- bos_id=bos_id,
1311
- eos_id=eos_id,
1312
- pad_id=pad_id,
1313
- shard_strategy=shard_strategy,
1314
- shard_manifests=shard_manifests,
1315
- global_rank=global_rank,
1316
- world_size=world_size,
1317
- return_sample_id=return_sample_id,
1318
- manifest_parse_func=manifest_parse_func,
1319
- )
1320
-
1321
-
1322
- class BucketingDataset(IterableDataset):
1323
- """
1324
- A Dataset which wraps another IterableDataset and adopts it for bucketing
1325
- Args:
1326
- dataset (IterableDataset): The IterableDataset to get wrapped
1327
- bucketing_batch_size (int): Number of samples to build a batch
1328
- """
1329
-
1330
- def __init__(
1331
- self,
1332
- dataset: IterableDataset,
1333
- bucketing_batch_size: int,
1334
- ):
1335
- self.wrapped_dataset = dataset
1336
- self.bucketing_batch_size = bucketing_batch_size
1337
- super().__init__()
1338
-
1339
- def _collate_fn(self, batch):
1340
- return _speech_collate_fn(batch[0], self.wrapped_dataset.pad_id)
1341
-
1342
- def __iter__(self):
1343
- return BucketingIterator(
1344
- wrapped_ds=self.wrapped_dataset._dataset, bucketing_batch_size=self.bucketing_batch_size
1345
- ).__iter__()
1346
-
1347
- def __len__(self):
1348
- return int(math.ceil(len(self.wrapped_dataset) / float(self.bucketing_batch_size)))
1349
-
1350
-
1351
- class BucketingIterator:
1352
- def __init__(self, wrapped_ds, bucketing_batch_size):
1353
- self.wrapped_ds = wrapped_ds
1354
- self.wrapped_iter = None
1355
- self.bucketing_batch_size = bucketing_batch_size
1356
-
1357
- def __iter__(self):
1358
- self.wrapped_iter = iter(self.wrapped_ds)
1359
- return self
1360
-
1361
- def __next__(self):
1362
- batches = []
1363
- for idx in range(self.bucketing_batch_size):
1364
- try:
1365
- sample = next(self.wrapped_iter)
1366
- except StopIteration:
1367
- break
1368
- batches.append(sample)
1369
- if len(batches) == 0:
1370
- raise StopIteration
1371
- return batches
1372
-
1373
-
1374
- class RandomizedChainDataset(ChainDataset):
1375
- def __init__(self, datasets: Iterable[Dataset], rnd_seed=0) -> None:
1376
- super(RandomizedChainDataset, self).__init__(list(datasets))
1377
- self.rnd_gen = np.random.RandomState(rnd_seed)
1378
-
1379
- def __iter__(self):
1380
- shuffled_order = self.rnd_gen.permutation(len(self.datasets))
1381
- for dataset_idx in shuffled_order:
1382
- d = self.datasets[dataset_idx]
1383
- assert isinstance(d, IterableDataset), "ChainDataset only supports IterableDataset"
1384
- for idx, x in enumerate(d):
1385
- yield x
1386
- # in case d is an infinite dataset, we want to break the loop
1387
- # so that the other datasets get a chance to yield too
1388
- if idx >= len(d) - 1:
1389
- break
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/collections/asr/data/audio_to_text_dali.py DELETED
@@ -1,777 +0,0 @@
1
- # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import math
16
- import operator
17
- import os.path
18
- import time
19
- from collections.abc import Iterator
20
- from typing import Callable, List, Optional, Union
21
-
22
- import torch
23
- from omegaconf import DictConfig
24
-
25
- from nemo.collections.asr.data.audio_to_text import ASRManifestProcessor, expand_sharded_filepaths
26
- from nemo.collections.common.parts.preprocessing import parsers
27
- from nemo.utils import logging, model_utils
28
-
29
- try:
30
- import nvidia.dali as dali
31
- from nvidia.dali.pipeline import Pipeline
32
- from nvidia.dali.plugin.pytorch import DALIGenericIterator as DALIPytorchIterator
33
- from nvidia.dali.plugin.pytorch import LastBatchPolicy as LastBatchPolicy
34
-
35
- HAVE_DALI = True
36
- except (ImportError, ModuleNotFoundError):
37
- HAVE_DALI = False
38
-
39
- __all__ = [
40
- 'AudioToCharDALIDataset',
41
- 'AudioToBPEDALIDataset',
42
- ]
43
-
44
- """
45
- Below minimum version is required to access the "read_idxs" argument in
46
- dali.fn.readers.nemo_asr
47
- """
48
- __DALI_MINIMUM_VERSION__ = "1.11"
49
-
50
- DALI_INSTALLATION_MESSAGE = (
51
- "Could not import `nvidia.dali`.\n"
52
- "Please install DALI by following the steps provided here - \n"
53
- "https://docs.nvidia.com/deeplearning/dali/user-guide/docs/installation.html"
54
- )
55
-
56
-
57
- def is_dali_supported(min_version: str, verbose: bool = False) -> bool:
58
- """
59
- Checks if DALI in installed, and version is >= min_verion.
60
-
61
- Args:
62
- min_version: A semver str that is the minimum requirement.
63
- verbose: Whether to log the installation instructions if DALI is not found.
64
-
65
- Returns:
66
- bool - whether DALI could be imported or not.
67
- """
68
- module_available, _ = model_utils.check_lib_version(
69
- 'nvidia.dali', checked_version=min_version, operator=operator.ge
70
- )
71
-
72
- # If DALI is not installed
73
- if module_available is None:
74
- if verbose:
75
- logging.info(DALI_INSTALLATION_MESSAGE)
76
-
77
- return False
78
-
79
- return module_available
80
-
81
-
82
- class DALIOutputs(object):
83
- def __init__(self, out_dict):
84
- self._has_processed_signal = 'processed_signal' in out_dict and 'processed_signal_len' in out_dict
85
- if not self._has_processed_signal:
86
- assert 'audio' in out_dict and 'audio_len' in out_dict
87
- assert 'transcript' in out_dict and 'transcript_len' in out_dict
88
- if self._has_processed_signal:
89
- self._outs = (
90
- out_dict['processed_signal'],
91
- out_dict['processed_signal_len'].reshape(-1),
92
- out_dict['transcript'],
93
- out_dict['transcript_len'].reshape(-1),
94
- )
95
- else:
96
- self._outs = (
97
- out_dict['audio'],
98
- out_dict['audio_len'].reshape(-1),
99
- out_dict['transcript'],
100
- out_dict['transcript_len'].reshape(-1),
101
- )
102
-
103
- @property
104
- def has_processed_signal(self):
105
- return self._has_processed_signal
106
-
107
- def __getitem__(self, key):
108
- return self._outs[key]
109
-
110
- def __len__(self):
111
- return len(self._outs)
112
-
113
-
114
- class _AudioTextDALIDataset(Iterator):
115
- """
116
- NVIDIA DALI pipeline that loads tensors via one or more manifest files where each line containing a sample descriptor in JSON,
117
- including audio files, transcripts, and durations (in seconds).
118
- Here's an example:
119
- {"audio_filepath": "/path/to/audio.wav", "text_filepath": "/path/to/audio.txt", "duration": 23.147}
120
- ...
121
- {"audio_filepath": "/path/to/audio.wav", "text": "the transcription", "offset": 301.75, "duration": 0.82, "utt":
122
- "utterance_id", "ctm_utt": "en_4156", "side": "A"}
123
-
124
- Args:
125
- manifest_filepath: Path to manifest file with the format described above. Can be comma-separated paths.
126
- device (str): Determines the device type to be used for preprocessing. Allowed values are: 'cpu', 'gpu'.
127
- batch_size (int): Number of samples in a batch.
128
- parser (str, callable): A str for an inbuilt parser, or a callable with signature f(str) -> List[int].
129
- sample_rate (int): Sample rate to resample loaded audio to.
130
- num_threads (int): Number of CPU processing threads to be created by the DALI pipeline.
131
- max_duration (float): Determines the maximum allowed duration, in seconds, of the loaded audio files.
132
- min_duration (float): Determines the minimum allowed duration, in seconds, of the loaded audio files.
133
- bos_id (int): Id of beginning of sequence symbol to append if not None
134
- eos_id (int): Id of end of sequence symbol to append if not None
135
- pad_id (int): Id used to pad the input. Defaults to 0 if not provided.
136
- trim (bool): If True, it will extract the nonsilent region of the loaded audio signal.
137
- shuffle (bool): If set to True, the dataset will shuffled after loading.
138
- drop_last (bool): If set to True, the last batch will be dropped if incomplete. This will be the case when the shard size is not divisible by the batch size.
139
- If set to False and the size of dataset is not divisible by the batch size, then the last batch will be smaller.
140
- device_id (int): Index of the GPU to be used (local_rank). Only applicable when device == 'gpu'. Defaults to 0.
141
- global_rank (int): Worker rank, used for partitioning shards. Defaults to 0.
142
- world_size (int): Total number of processes, used for partitioning shards. Defaults to 1.
143
- preprocessor_cfg (DictConfig): Preprocessor configuration. Supports AudioToMelSpectrogramPreprocessor and AudioToMFCCPreprocessor.
144
- return_sample_id (bool): whether to return the sample_id as a part of each sample (not supported yet).
145
- """
146
-
147
- def __init__(
148
- self,
149
- manifest_filepath: str,
150
- device: str,
151
- batch_size: int,
152
- parser: Union[str, Callable],
153
- audio_tar_filepaths: Optional[Union[str, List[str]]] = None,
154
- audio_tar_index_filepaths: Optional[Union[str, List[str]]] = None,
155
- sample_rate: int = 16000,
156
- num_threads: int = 4,
157
- max_duration: float = 0.0,
158
- min_duration: float = 0.0,
159
- bos_id: Optional[int] = None,
160
- eos_id: Optional[int] = None,
161
- pad_id: int = 0,
162
- trim: bool = False,
163
- shuffle: bool = False,
164
- drop_last: bool = False,
165
- shard_strategy: str = "scatter",
166
- device_id: int = 0,
167
- global_rank: int = 0,
168
- world_size: int = 1,
169
- preprocessor_cfg: DictConfig = None,
170
- return_sample_id: bool = False,
171
- ):
172
- self.drop_last = drop_last # used by lr_scheduler
173
- if return_sample_id:
174
- raise ValueError(
175
- "Currently DALI data layers don't support returning the sample_id and return_sample_id can not be enabled."
176
- )
177
- self.return_sample_id = return_sample_id
178
-
179
- if not HAVE_DALI:
180
- raise ModuleNotFoundError(
181
- f"{self} requires NVIDIA DALI to be installed. "
182
- f"See: https://docs.nvidia.com/deeplearning/dali/user-guide/docs/installation.html#id1"
183
- )
184
-
185
- if device not in ('cpu', 'gpu'):
186
- raise ValueError(
187
- f"{self} received an unexpected device argument {device}. Supported values are: 'cpu', 'gpu'"
188
- )
189
-
190
- device_id = device_id if device == 'gpu' else None
191
-
192
- self.batch_size = batch_size # Used by NeMo
193
-
194
- self.device = device
195
- self.device_id = device_id
196
-
197
- if world_size > 1:
198
- self.shard_id = global_rank
199
- self.num_shards = world_size
200
- else:
201
- self.shard_id = None
202
- self.num_shards = None
203
-
204
- self.eos_id = eos_id
205
- self.bos_id = bos_id
206
- self.sample_rate = sample_rate
207
-
208
- self.pipe = Pipeline(
209
- batch_size=batch_size,
210
- num_threads=num_threads,
211
- device_id=self.device_id,
212
- exec_async=True,
213
- exec_pipelined=True,
214
- )
215
-
216
- has_preprocessor = preprocessor_cfg is not None
217
- if has_preprocessor:
218
- if preprocessor_cfg._target_ == "nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor":
219
- feature_type = "mel_spectrogram"
220
- elif preprocessor_cfg._target_ == "nemo.collections.asr.modules.AudioToMFCCPreprocessor":
221
- feature_type = "mfcc"
222
- else:
223
- raise ValueError(
224
- f"{self} received an unexpected preprocessor configuration: {preprocessor_cfg._target_}."
225
- f" Supported preprocessors are: AudioToMelSpectrogramPreprocessor, AudioToMFCCPreprocessor"
226
- )
227
-
228
- # Default values taken from AudioToMelSpectrogramPreprocessor
229
- params = preprocessor_cfg
230
- self.dither = params['dither'] if 'dither' in params else 0.0
231
- self.preemph = params['preemph'] if 'preemph' in params else 0.97
232
- self.window_size_sec = params['window_size'] if 'window_size' in params else 0.02
233
- self.window_stride_sec = params['window_stride'] if 'window_stride' in params else 0.01
234
- self.sample_rate = params['sample_rate'] if 'sample_rate' in params else sample_rate
235
- self.window_size = int(self.window_size_sec * self.sample_rate)
236
- self.window_stride = int(self.window_stride_sec * self.sample_rate)
237
-
238
- normalize = params['normalize'] if 'normalize' in params else 'per_feature'
239
- if normalize == 'per_feature': # Each freq channel independently
240
- self.normalization_axes = (1,)
241
- elif normalize == 'all_features':
242
- self.normalization_axes = (0, 1)
243
- else:
244
- raise ValueError(
245
- f"{self} received {normalize} for the normalize parameter."
246
- f" It must be either 'per_feature' or 'all_features'."
247
- )
248
-
249
- self.window = None
250
- window_name = params['window'] if 'window' in params else 'hann'
251
- torch_windows = {
252
- 'hann': torch.hann_window,
253
- 'hamming': torch.hamming_window,
254
- 'blackman': torch.blackman_window,
255
- 'bartlett': torch.bartlett_window,
256
- 'none': None,
257
- }
258
-
259
- if window_name == 'ones':
260
- window_tensor = torch.ones(self.window_size)
261
- else:
262
- try:
263
- window_fn = torch_windows.get(window_name, None)
264
- except:
265
- raise ValueError(
266
- f"{self} received '{window_name}' for the window parameter."
267
- f" It must be one of: ('hann', 'ones', 'hamming', 'blackman', 'bartlett', None)."
268
- f" None is equivalent to 'hann'."
269
- )
270
- window_tensor = window_fn(self.window_size, periodic=False) if window_fn else None
271
- self.window = window_tensor.numpy().tolist() if window_tensor is not None else None
272
-
273
- self.n_fft = params['n_fft'] if 'n_fft' in params else 2 ** math.ceil(math.log2(self.window_size))
274
- self.n_mels = params['n_mels'] if 'n_mels' in params else 64
275
- self.n_mfcc = params['n_mfcc'] if 'n_mfcc' in params else 64
276
-
277
- features = params['features'] if 'features' in params else 0
278
- if features > 0:
279
- if feature_type == 'mel_spectrogram':
280
- self.n_mels = features
281
- elif feature_type == 'mfcc':
282
- self.n_mfcc = features
283
-
284
- # TODO Implement frame splicing
285
- if 'frame_splicing' in params:
286
- assert params['frame_splicing'] == 1, "Frame splicing is not implemented"
287
-
288
- self.freq_low = params['lowfreq'] if 'lowfreq' in params else 0.0
289
- self.freq_high = params['highfreq'] if 'highfreq' in params else self.sample_rate / 2.0
290
- self.log_features = params['log'] if 'log' in params else True
291
-
292
- # We want to avoid taking the log of zero
293
- # There are two options: either adding or clamping to a small value
294
-
295
- self.log_zero_guard_type = params['log_zero_guard_type'] if 'log_zero_guard_type' in params else 'add'
296
- if self.log_zero_guard_type not in ["add", "clamp"]:
297
- raise ValueError(
298
- f"{self} received {self.log_zero_guard_type} for the "
299
- f"log_zero_guard_type parameter. It must be either 'add' or "
300
- f"'clamp'."
301
- )
302
-
303
- self.log_zero_guard_value = params['log_zero_guard_value'] if 'log_zero_guard_value' in params else 2**-24
304
- if isinstance(self.log_zero_guard_value, str):
305
- if self.log_zero_guard_value == "tiny":
306
- self.log_zero_guard_value = torch.finfo(torch.float32).tiny
307
- elif self.log_zero_guard_value == "eps":
308
- self.log_zero_guard_value = torch.finfo(torch.float32).eps
309
- else:
310
- raise ValueError(
311
- f"{self} received {self.log_zero_guard_value} for the log_zero_guard_type parameter."
312
- f"It must be either a number, 'tiny', or 'eps'"
313
- )
314
-
315
- self.mag_power = params['mag_power'] if 'mag_power' in params else 2
316
- if self.mag_power != 1.0 and self.mag_power != 2.0:
317
- raise ValueError(
318
- f"{self} received {self.mag_power} for the mag_power parameter." f" It must be either 1.0 or 2.0."
319
- )
320
-
321
- self.pad_to = max(params['pad_to'], 1) if 'pad_to' in params else 16
322
- self.pad_value = params['pad_value'] if 'pad_value' in params else 0.0
323
-
324
- with self.pipe:
325
- if audio_tar_filepaths is None and audio_tar_index_filepaths is None:
326
- audio, indices = dali.fn.readers.nemo_asr(
327
- name="Reader",
328
- manifest_filepaths=manifest_filepath.split(','),
329
- dtype=dali.types.FLOAT,
330
- downmix=True,
331
- sample_rate=float(self.sample_rate),
332
- min_duration=min_duration,
333
- max_duration=max_duration,
334
- read_sample_rate=False,
335
- read_text=False,
336
- read_idxs=True,
337
- random_shuffle=shuffle,
338
- shard_id=self.shard_id,
339
- num_shards=self.num_shards,
340
- pad_last_batch=True,
341
- )
342
-
343
- self.is_tarred_dataset = False
344
-
345
- elif audio_tar_filepaths is not None and audio_tar_index_filepaths is not None:
346
- audio_tar_filepaths = expand_sharded_filepaths(
347
- audio_tar_filepaths,
348
- shard_strategy=shard_strategy,
349
- world_size=world_size,
350
- global_rank=global_rank,
351
- )
352
-
353
- audio_tar_index_filepaths = expand_sharded_filepaths(
354
- audio_tar_index_filepaths,
355
- shard_strategy=shard_strategy,
356
- world_size=world_size,
357
- global_rank=global_rank,
358
- )
359
-
360
- if len(audio_tar_filepaths) != len(audio_tar_index_filepaths) and len(audio_tar_index_filepaths) != 0:
361
- raise ValueError(
362
- f"Number of filepaths provided for `audio_tar_filepaths` must match "
363
- f"`audio_tar_index_filepaths`. Got {len(audio_tar_filepaths)} audio_tar_filepaths and "
364
- f"{len(audio_tar_index_filepaths)} audio_tar_index_filepaths."
365
- )
366
-
367
- tar_file = dali.fn.readers.webdataset(
368
- paths=audio_tar_filepaths,
369
- index_paths=audio_tar_index_filepaths,
370
- name="Reader",
371
- ext=["wav"],
372
- missing_component_behavior="error",
373
- random_shuffle=shuffle,
374
- shard_id=self.shard_id,
375
- num_shards=self.num_shards,
376
- pad_last_batch=True,
377
- )
378
- audio, _ = dali.fn.decoders.audio(
379
- tar_file,
380
- dtype=dali.types.FLOAT,
381
- downmix=True,
382
- sample_rate=float(self.sample_rate),
383
- )
384
- indices = dali.fn.get_property(tar_file, key="source_info")
385
- indices = dali.fn.pad(indices)
386
-
387
- self.is_tarred_dataset = True
388
-
389
- else:
390
- raise RuntimeError(
391
- "When using DALI datasets, either `audio_tar_filepaths` "
392
- "and `audio_tar_index_filepaths` should either both be None (sequential dataset)"
393
- "or provided (tarred dataset)."
394
- )
395
-
396
- # Extract nonsilent region, if necessary
397
- if trim:
398
- # Need to extract non-silent region before moving to the GPU
399
- roi_start, roi_len = dali.fn.nonsilent_region(audio, cutoff_db=-60)
400
- audio = audio.gpu() if self.device == 'gpu' else audio
401
- audio = dali.fn.slice(
402
- audio, roi_start, roi_len, normalized_anchor=False, normalized_shape=False, axes=[0]
403
- )
404
- else:
405
- audio = audio.gpu() if self.device == 'gpu' else audio
406
-
407
- if not has_preprocessor:
408
- # No preprocessing, the output is the audio signal
409
- audio_len = dali.fn.shapes(dali.fn.reshape(audio, shape=[-1]))
410
- audio = dali.fn.pad(audio)
411
- self.pipe.set_outputs(audio, audio_len, indices)
412
- else:
413
- # Additive gaussian noise (dither)
414
- if self.dither > 0.0:
415
- gaussian_noise = dali.fn.random.normal(audio)
416
- audio = audio + self.dither * gaussian_noise
417
-
418
- # Preemphasis filter
419
- if self.preemph > 0.0:
420
- audio = dali.fn.preemphasis_filter(audio, preemph_coeff=self.preemph, border='zero')
421
-
422
- # Power spectrogram
423
- spec = dali.fn.spectrogram(
424
- audio,
425
- nfft=self.n_fft,
426
- window_length=self.window_size,
427
- window_step=self.window_stride,
428
- window_fn=self.window,
429
- )
430
-
431
- if feature_type == 'mel_spectrogram' or feature_type == 'mfcc':
432
- # Spectrogram to Mel Spectrogram
433
- spec = dali.fn.mel_filter_bank(
434
- spec,
435
- sample_rate=self.sample_rate,
436
- nfilter=self.n_mels,
437
- normalize=True,
438
- freq_low=self.freq_low,
439
- freq_high=self.freq_high,
440
- )
441
- # Mel Spectrogram to MFCC
442
- if feature_type == 'mfcc':
443
- spec = dali.fn.mfcc(spec, n_mfcc=self.n_mfcc)
444
-
445
- # Logarithm
446
- if self.log_zero_guard_type == 'add':
447
- spec = spec + self.log_zero_guard_value
448
-
449
- spec = dali.fn.to_decibels(
450
- spec, multiplier=math.log(10), reference=1.0, cutoff_db=math.log(self.log_zero_guard_value)
451
- )
452
-
453
- # Normalization
454
- spec = dali.fn.normalize(spec, axes=self.normalization_axes, epsilon=1e-5**2, ddof=1)
455
-
456
- # Extracting the length of the spectrogram
457
- spec_len = dali.fn.slice(dali.fn.shapes(spec), 1, 1, axes=(0,))
458
-
459
- # Pads feature dimension to be a multiple of `pad_to` and the temporal dimension to be as big as the largest sample (shape -1)
460
- spec = dali.fn.pad(spec, fill_value=self.pad_value, axes=(0, 1), align=(self.pad_to, 1), shape=(1, -1))
461
- self.pipe.set_outputs(spec, spec_len, indices)
462
-
463
- x = time.time()
464
- # Building DALI pipeline
465
- self.pipe.build()
466
- y = time.time()
467
-
468
- logging.info(f"Time for pipe.build() : {(y - x)} seconds")
469
-
470
- if has_preprocessor:
471
- output_names = ['processed_signal', 'processed_signal_len', 'manifest_indices']
472
- else:
473
- output_names = ['audio', 'audio_len', 'manifest_indices']
474
-
475
- x = time.time()
476
- last_batch_policy = LastBatchPolicy.DROP if drop_last else LastBatchPolicy.PARTIAL
477
- self._iter = DALIPytorchIterator(
478
- [self.pipe],
479
- output_map=output_names,
480
- reader_name="Reader",
481
- last_batch_policy=last_batch_policy,
482
- dynamic_shape=True,
483
- auto_reset=True,
484
- )
485
- y = time.time()
486
- logging.info(f"Time for DALIPytorchIterator to initialize : {(y - x)} seconds")
487
-
488
- # TODO come up with a better solution
489
- class DummyDataset:
490
- def __init__(self, parent):
491
- self.parent = parent
492
-
493
- def __len__(self):
494
- return self.parent.size
495
-
496
- self.dataset = DummyDataset(self) # Used by NeMo
497
-
498
- x = time.time()
499
- self.manifest_processor = ASRManifestProcessor(
500
- manifest_filepath=manifest_filepath,
501
- parser=parser,
502
- max_duration=max_duration,
503
- min_duration=min_duration,
504
- max_utts=0,
505
- bos_id=bos_id,
506
- eos_id=eos_id,
507
- pad_id=pad_id,
508
- index_by_file_id=self.is_tarred_dataset,
509
- )
510
- y = time.time()
511
- logging.info(f"Time to build nemo manifest processor - {(y - x)} seconds")
512
-
513
- def reset(self):
514
- self._iter.reset()
515
-
516
- def __iter__(self):
517
- return self
518
-
519
- def next(self):
520
- return self.__next__()
521
-
522
- @property
523
- def size(self):
524
- return self._iter.size
525
-
526
- def __len__(self):
527
- return len(self._iter)
528
-
529
- def __next__(self):
530
- outputs = self._iter.next()
531
- assert len(outputs) == 1
532
- dali_out = outputs[0]
533
- manifest_indices = dali_out['manifest_indices'].numpy()
534
-
535
- out = {}
536
- out_names = ['processed_signal', 'processed_signal_len', 'audio', 'audio_len']
537
- for out_name in out_names:
538
- if out_name in dali_out:
539
- out[out_name] = dali_out[out_name].detach().clone()
540
-
541
- text_tokens = []
542
- text_tokens_len = []
543
- max_len = 0
544
- batch_size = manifest_indices.shape[0]
545
- for i, manifest_index in enumerate(manifest_indices):
546
-
547
- if not self.is_tarred_dataset:
548
- # Loose-file dataset. Index is integer based.
549
- manifest_index = manifest_index[0]
550
- text, text_length = self.manifest_processor.process_text_by_id(manifest_index)
551
- else:
552
- # Tarred-file dataset. Index is filename based.
553
- resolved_manifest_indices = manifest_index.tobytes().decode().split(":")
554
- resolved_manifest_index = resolved_manifest_indices[2] # we require just the filename segment
555
- resolved_manifest_index = os.path.splitext(resolved_manifest_index)[0] # we dont need file extension
556
- text, text_length = self.manifest_processor.process_text_by_file_id(resolved_manifest_index)
557
-
558
- text_tokens_len.append(text_length)
559
- text_tokens.append(text)
560
- if text_length > max_len:
561
- max_len = text_length
562
-
563
- transcript_out = torch.full([batch_size, max_len], fill_value=self.manifest_processor.pad_id, dtype=torch.long)
564
- for i, n in enumerate(text_tokens_len):
565
- transcript_out[i, :n] = torch.tensor(text_tokens[i], dtype=torch.long)
566
- transcript_len_out = torch.tensor(text_tokens_len, dtype=torch.long)
567
-
568
- out['transcript'] = transcript_out
569
- out['transcript_len'] = transcript_len_out
570
- return DALIOutputs(out)
571
-
572
-
573
- class AudioToCharDALIDataset(_AudioTextDALIDataset):
574
- """
575
- Character based NVIDIA DALI pipeline that loads tensors via one or more manifest files where each line containing a
576
- sample descriptor in JSON, including audio files, transcripts, and durations (in seconds).
577
- Here's an example:
578
- {"audio_filepath": "/path/to/audio.wav", "text_filepath": "/path/to/audio.txt", "duration": 23.147}
579
- ...
580
- {"audio_filepath": "/path/to/audio.wav", "text": "the transcription", "offset": 301.75, "duration": 0.82, "utt":
581
- "utterance_id", "ctm_utt": "en_4156", "side": "A"}
582
-
583
- Args:
584
- manifest_filepath: Path to manifest file with the format described above. Can be comma-separated paths.
585
- device (str): Determines the device type to be used for preprocessing. Allowed values are: 'cpu', 'gpu'.
586
- batch_size (int): Number of samples in a batch.
587
- labels (List[str]): String containing all the possible characters to map to.
588
- sample_rate (int): Sample rate to resample loaded audio to.
589
- num_threads (int): Number of CPU processing threads to be created by the DALI pipeline.
590
- max_duration (float): Determines the maximum allowed duration, in seconds, of the loaded audio files.
591
- min_duration (float): Determines the minimum allowed duration, in seconds, of the loaded audio files.
592
- blank_index (int): blank character index, default = -1
593
- unk_index (int): unk_character index, default = -1
594
- normalize (bool): whether to normalize transcript text (default): True
595
- bos_id (int): Id of beginning of sequence symbol to append if not None
596
- eos_id (int): Id of end of sequence symbol to append if not None
597
- pad_id (int): Id used to pad the input. Defaults to 0 if not provided.
598
- trim (bool): If True, it will extract the nonsilent region of the loaded audio signal.
599
- shuffle (bool): If set to True, the dataset will shuffled after loading.
600
- drop_last (bool): If set to True, the last batch will be dropped if incomplete. This will be the case when the shard size is not divisible by the batch size.
601
- If set to False and the size of dataset is not divisible by the batch size, then the last batch will be smaller.
602
- parser (str, callable): A str for an inbuilt parser, or a callable with signature f(str) -> List[int].
603
- device_id (int): Index of the GPU to be used (local_rank). Only applicable when device == 'gpu'. Defaults to 0.
604
- global_rank (int): Worker rank, used for partitioning shards. Defaults to 0.
605
- world_size (int): Total number of processes, used for partitioning shards. Defaults to 1.
606
- preprocessor_cfg (DictConfig): Preprocessor configuration. Supports AudioToMelSpectrogramPreprocessor and AudioToMFCCPreprocessor.
607
- return_sample_id (bool): whether to return the sample_id as a part of each sample (not supported yet).
608
- """
609
-
610
- def __init__(
611
- self,
612
- manifest_filepath: str,
613
- device: str,
614
- batch_size: int,
615
- labels: Union[str, List[str]],
616
- sample_rate: int = 16000,
617
- audio_tar_filepaths: Optional[Union[str, List[str]]] = None,
618
- audio_tar_index_filepaths: Optional[Union[str, List[str]]] = None,
619
- num_threads: int = 4,
620
- max_duration: float = 0.0,
621
- min_duration: float = 0.0,
622
- blank_index: int = -1,
623
- unk_index: int = -1,
624
- normalize: bool = True,
625
- bos_id: Optional[int] = None,
626
- eos_id: Optional[int] = None,
627
- pad_id: int = 0,
628
- trim: bool = False,
629
- shuffle: bool = False,
630
- drop_last: bool = False,
631
- parser: Union[str, Callable] = 'en',
632
- shard_strategy: str = "scatter",
633
- device_id: int = 0,
634
- global_rank: int = 0,
635
- world_size: int = 1,
636
- preprocessor_cfg: DictConfig = None,
637
- return_sample_id: bool = False,
638
- ):
639
- self.labels = labels
640
-
641
- parser = parsers.make_parser(
642
- labels=labels, name=parser, unk_id=unk_index, blank_id=blank_index, do_normalize=normalize
643
- )
644
-
645
- super().__init__(
646
- manifest_filepath=manifest_filepath,
647
- device=device,
648
- batch_size=batch_size,
649
- audio_tar_filepaths=audio_tar_filepaths,
650
- audio_tar_index_filepaths=audio_tar_index_filepaths,
651
- sample_rate=sample_rate,
652
- num_threads=num_threads,
653
- max_duration=max_duration,
654
- min_duration=min_duration,
655
- bos_id=bos_id,
656
- eos_id=eos_id,
657
- pad_id=pad_id,
658
- trim=trim,
659
- shuffle=shuffle,
660
- drop_last=drop_last,
661
- parser=parser,
662
- shard_strategy=shard_strategy,
663
- device_id=device_id,
664
- global_rank=global_rank,
665
- world_size=world_size,
666
- preprocessor_cfg=preprocessor_cfg,
667
- return_sample_id=return_sample_id,
668
- )
669
-
670
-
671
- class AudioToBPEDALIDataset(_AudioTextDALIDataset):
672
- """
673
- Subword based NVIDIA DALI pipeline that loads tensors via one or more manifest files where each line containing a
674
- sample descriptor in JSON, including audio files, transcripts, and durations (in seconds).
675
- Here's an example:
676
- {"audio_filepath": "/path/to/audio.wav", "text_filepath": "/path/to/audio.txt", "duration": 23.147}
677
- ...
678
- {"audio_filepath": "/path/to/audio.wav", "text": "the transcription", "offset": 301.75, "duration": 0.82, "utt":
679
- "utterance_id", "ctm_utt": "en_4156", "side": "A"}
680
-
681
- Args:
682
- manifest_filepath: Path to manifest file with the format described above. Can be comma-separated paths.
683
- tokenizer (TokenizerSpec): A TokenizerSpec implementation that wraps a tokenization implementation.
684
- device (str): Determines the device type to be used for preprocessing. Allowed values are: 'cpu', 'gpu'.
685
- batch_size (int): Number of samples in a batch.
686
- sample_rate (int): Sample rate to resample loaded audio to.
687
- num_threads (int): Number of CPU processing threads to be created by the DALI pipeline.
688
- max_duration (float): Determines the maximum allowed duration, in seconds, of the loaded audio files.
689
- min_duration (float): Determines the minimum allowed duration, in seconds, of the loaded audio files.
690
- bos_id (int): Id of beginning of sequence symbol to append if not None. Injected from the tokenizer.
691
- eos_id (int): Id of end of sequence symbol to append if not None. Injected from the tokenizer.
692
- pad_id (int): Id used to pad the input. Defaults to 0 if not provided. Injected from the tokenizer.
693
- trim (bool): If True, it will extract the nonsilent region of the loaded audio signal.
694
- shuffle (bool): If set to True, the dataset will shuffled after loading.
695
- drop_last (bool): If set to True, the last batch will be dropped if incomplete. This will be the case when the shard size is not divisible by the batch size.
696
- If set to False and the size of dataset is not divisible by the batch size, then the last batch will be smaller.
697
-
698
- device_id (int): Index of the GPU to be used (local_rank). Only applicable when device == 'gpu'. Defaults to 0.
699
- global_rank (int): Worker rank, used for partitioning shards. Defaults to 0.
700
- world_size (int): Total number of processes, used for partitioning shards. Defaults to 1.
701
- preprocessor_cfg (DictConfig): Preprocessor configuration. Supports AudioToMelSpectrogramPreprocessor and AudioToMFCCPreprocessor.
702
- use_start_end_token (bool): Boolean which dictates whether to add [BOS] and [EOS] tokens to beginning and
703
- ending of speech respectively.
704
- return_sample_id (bool): whether to return the sample_id as a part of each sample (not supported yet).
705
- """
706
-
707
- def __init__(
708
- self,
709
- manifest_filepath: str,
710
- tokenizer: 'nemo.collections.common.tokenizers.TokenizerSpec',
711
- device: str,
712
- batch_size: int,
713
- sample_rate: int = 16000,
714
- audio_tar_filepaths: Optional[Union[str, List[str]]] = None,
715
- audio_tar_index_filepaths: Optional[Union[str, List[str]]] = None,
716
- num_threads: int = 4,
717
- max_duration: float = 0.0,
718
- min_duration: float = 0.0,
719
- trim: bool = False,
720
- shuffle: bool = False,
721
- drop_last: bool = False,
722
- shard_strategy: str = "scatter",
723
- device_id: int = 0,
724
- global_rank: int = 0,
725
- world_size: int = 1,
726
- preprocessor_cfg: DictConfig = None,
727
- use_start_end_token: bool = True,
728
- return_sample_id: bool = False,
729
- ):
730
-
731
- if use_start_end_token and hasattr(tokenizer, 'bos_token'):
732
- bos_id = tokenizer.bos_id
733
- else:
734
- bos_id = None
735
-
736
- if use_start_end_token and hasattr(tokenizer, 'eos_token'):
737
- eos_id = tokenizer.eos_id
738
- else:
739
- eos_id = None
740
-
741
- if hasattr(tokenizer, 'pad_token'):
742
- pad_id = tokenizer.pad_id
743
- else:
744
- pad_id = 0
745
-
746
- class TokenizerWrapper:
747
- def __init__(self, tokenizer):
748
- self._tokenizer = tokenizer
749
-
750
- def __call__(self, text):
751
- t = self._tokenizer.text_to_ids(text)
752
- return t
753
-
754
- super().__init__(
755
- manifest_filepath=manifest_filepath,
756
- device=device,
757
- batch_size=batch_size,
758
- sample_rate=sample_rate,
759
- audio_tar_filepaths=audio_tar_filepaths,
760
- audio_tar_index_filepaths=audio_tar_index_filepaths,
761
- num_threads=num_threads,
762
- max_duration=max_duration,
763
- min_duration=min_duration,
764
- bos_id=bos_id,
765
- eos_id=eos_id,
766
- pad_id=pad_id,
767
- trim=trim,
768
- shuffle=shuffle,
769
- drop_last=drop_last,
770
- parser=TokenizerWrapper(tokenizer),
771
- shard_strategy=shard_strategy,
772
- device_id=device_id,
773
- global_rank=global_rank,
774
- world_size=world_size,
775
- preprocessor_cfg=preprocessor_cfg,
776
- return_sample_id=return_sample_id,
777
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nemo/collections/asr/data/audio_to_text_dataset.py DELETED
@@ -1,997 +0,0 @@
1
- # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import copy
16
- import json
17
- import random
18
- from math import isclose
19
- from typing import Any, List, Optional, Union
20
-
21
- import numpy as np
22
- import torch
23
- from lightning.pytorch import LightningModule
24
- from lightning.pytorch.callbacks import BasePredictionWriter
25
- from omegaconf import DictConfig, OmegaConf, open_dict
26
- from omegaconf.listconfig import ListConfig
27
- from torch.utils.data import ChainDataset
28
-
29
- from nemo.collections.asr.data import audio_to_text, audio_to_text_dali
30
- from nemo.collections.asr.data.huggingface.hf_audio_to_text_dataset import (
31
- get_hf_audio_to_text_bpe_dataset,
32
- get_hf_audio_to_text_char_dataset,
33
- )
34
- from nemo.collections.asr.parts.preprocessing.perturb import AudioAugmentor, process_augmentations
35
- from nemo.collections.common.data.dataset import CodeSwitchedDataset, ConcatDataset
36
- from nemo.collections.common.tokenizers import TokenizerSpec
37
- from nemo.utils import logging
38
-
39
-
40
- def inject_dataloader_value_from_model_config(model_cfg: dict, dataloader_cfg: DictConfig, key: str):
41
- """
42
- Extracts the label set provided at the top level of the model, and propagates it to the dataloader
43
- config.
44
-
45
- Args:
46
- model_cfg: A DictConfig representing the model's config.
47
- dataloader_cfg: A DictConfig representing the individual data loader
48
- key: A str value representing a key in the model_cfg whose value will be propagated to the
49
- dataloader config.
50
- """
51
- if key not in model_cfg:
52
- logging.info(
53
- f"Model level config does not contain `{key}`, please explicitly provide `{key}` to the dataloaders."
54
- )
55
- return
56
-
57
- if not isinstance(dataloader_cfg, DictConfig):
58
- dataloader_cfg = DictConfig(dataloader_cfg)
59
-
60
- # If key exists in the data loader config (either set explicitly or as a placeholder (via None))
61
- if key in dataloader_cfg:
62
- # Dataloader `labels` is provided and is non-null
63
- if dataloader_cfg[key] is not None and model_cfg[key] != dataloader_cfg[key]:
64
- # Model level `labels` dont match Dataloader level `labels`
65
- logging.warning(
66
- f'`{key}` is explicitly provided to the data loader, and is different from '
67
- f'the `{key}` provided at the model level config.\n'
68
- f'If this is incorrect, please set the dataloader\'s `{key}` to None.'
69
- )
70
-
71
- else:
72
- # Dataloader `key` is None or values match
73
- # Propagate from model level `key` (even if they match)
74
- with open_dict(dataloader_cfg):
75
- dataloader_cfg[key] = model_cfg[key]
76
-
77
- else:
78
- # If key key doesnt even exist in dataloader_cfg, inject it explicitly
79
- with open_dict(dataloader_cfg):
80
- dataloader_cfg[key] = model_cfg[key]
81
-
82
-
83
- def get_concat_char_dataset(
84
- config: dict, global_rank: int, world_size: int, augmentor: Optional['AudioAugmentor'] = None
85
- ) -> ConcatDataset:
86
- """
87
- Instantiates an instance of ConcatDataset containing one or more intances of
88
- Character Encoding based AudioToCharDataset.
89
-
90
- Args:
91
- config: Config of the AudioToCharDataset.
92
- global_rank: Global rank of this device.
93
- world_size: Global world size in the training method.
94
- augmentor: Optional AudioAugmentor object for augmentations on audio data.
95
-
96
- Returns:
97
- An instance of ConcatDataset containing one or more instances of AudioToCharDataset.
98
- """
99
- if 'labels' not in config:
100
- logging.warning("dataset does not have explicitly defined labels")
101
-
102
- manifest_filepaths = config['manifest_filepath']
103
- datasets = []
104
-
105
- # needed to support validation Concat Datasets that arrive here as
106
- # [[dataset1,dataset2]] otherwise ModelPT would interfere
107
- if len(manifest_filepaths) == 1 and not isinstance(manifest_filepaths[0], str):
108
- logging.info(f"removing an extra nesting level from {manifest_filepaths}")
109
- manifest_filepaths = config['manifest_filepath'][0]
110
-
111
- for manifest_filepath in manifest_filepaths:
112
- conf = copy.deepcopy(config)
113
- conf['manifest_filepath'] = manifest_filepath
114
-
115
- dataset = get_char_dataset(config=conf, augmentor=augmentor)
116
- datasets.append(dataset)
117
-
118
- dataset = ConcatDataset(
119
- datasets,
120
- sampling_technique=config.get('concat_sampling_technique', 'temperature'),
121
- sampling_temperature=config.get('concat_sampling_temperature', 5),
122
- sampling_scale=config.get('concat_sampling_scale', 1),
123
- sampling_probabilities=config.get('concat_sampling_probabilities', None),
124
- shuffle=config.get('concat_shuffle', True),
125
- seed=config.get('concat_sampling_seed', None),
126
- global_rank=global_rank,
127
- world_size=world_size,
128
- )
129
- return dataset
130
-
131
-
132
- def get_char_dataset(config: dict, augmentor: Optional['AudioAugmentor'] = None) -> audio_to_text.AudioToCharDataset:
133
- """
134
- Instantiates a Character Encoding based AudioToCharDataset.
135
-
136
- Args:
137
- config: Config of the AudioToCharDataset.
138
- augmentor: Optional AudioAugmentor object for augmentations on audio data.
139
-
140
- Returns:
141
- An instance of AudioToCharDataset.
142
- """
143
- if 'labels' not in config:
144
- logging.warning("dataset does not have explicitly defined labels")
145
-
146
- dataset = audio_to_text.AudioToCharDataset(
147
- manifest_filepath=config['manifest_filepath'],
148
- labels=config.get('labels', None),
149
- sample_rate=config['sample_rate'],
150
- int_values=config.get('int_values', False),
151
- augmentor=augmentor,
152
- max_duration=config.get('max_duration', None),
153
- min_duration=config.get('min_duration', None),
154
- max_utts=config.get('max_utts', 0),
155
- blank_index=config.get('blank_index', -1),
156
- unk_index=config.get('unk_index', -1),
157
- normalize=config.get('normalize_transcripts', False),
158
- trim=config.get('trim_silence', False),
159
- parser=config.get('parser', 'en'),
160
- return_sample_id=config.get('return_sample_id', False),
161
- channel_selector=config.get('channel_selector', None),
162
- )
163
- return dataset
164
-
165
-
166
- def get_concat_bpe_dataset(
167
- config: dict,
168
- tokenizer: 'TokenizerSpec',
169
- global_rank: int,
170
- world_size: int,
171
- augmentor: Optional['AudioAugmentor'] = None,
172
- ) -> ConcatDataset:
173
- """
174
- Instantiates a ContactDataset based on several Byte Pair Encoding / Word Piece Encoding based AudioToBPEDatasets.
175
-
176
- Args:
177
- config: Config of the AudioToBPEDataset.
178
- tokenizer: An instance of a TokenizerSpec object.
179
- global_rank: Global rank of this device.
180
- world_size: Global world size in the training method.
181
- augmentor: Optional AudioAugmentor object for augmentations on audio data.
182
-
183
- Returns:
184
- An instance of ConcatDataset containing several instances of AudioToBPEDataset.
185
- """
186
- manifest_filepaths = config['manifest_filepath']
187
- datasets = []
188
-
189
- # needed to support validation Concat Datasets that arrive here as
190
- # [[dataset1,dataset2]] otherwise ModelPT would interfere
191
- if len(manifest_filepaths) == 1 and not isinstance(manifest_filepaths[0], str):
192
- logging.info(f"removing an extra nesting level from {manifest_filepaths}")
193
- manifest_filepaths = config['manifest_filepath'][0]
194
-
195
- for manifest_filepath in manifest_filepaths:
196
- conf = copy.deepcopy(config)
197
- conf['manifest_filepath'] = manifest_filepath
198
- dataset = get_bpe_dataset(config=conf, tokenizer=tokenizer, augmentor=augmentor)
199
- datasets.append(dataset)
200
-
201
- dataset = ConcatDataset(
202
- datasets,
203
- sampling_technique=config.get('concat_sampling_technique', 'temperature'),
204
- sampling_temperature=config.get('concat_sampling_temperature', 5),
205
- sampling_scale=config.get('concat_sampling_scale', 1),
206
- sampling_probabilities=config.get('concat_sampling_probabilities', None),
207
- shuffle=config.get('concat_shuffle', True),
208
- seed=config.get('concat_sampling_seed', None),
209
- global_rank=global_rank,
210
- world_size=world_size,
211
- )
212
- return dataset
213
-
214
-
215
- def get_bpe_dataset(
216
- config: dict, tokenizer: 'TokenizerSpec', augmentor: Optional['AudioAugmentor'] = None
217
- ) -> audio_to_text.AudioToBPEDataset:
218
- """
219
- Instantiates a Byte Pair Encoding / Word Piece Encoding based AudioToBPEDataset.
220
-
221
- Args:
222
- config: Config of the AudioToBPEDataset.
223
- tokenizer: An instance of a TokenizerSpec object.
224
- augmentor: Optional AudioAugmentor object for augmentations on audio data.
225
-
226
- Returns:
227
- An instance of AudioToBPEDataset.
228
- """
229
- dataset = audio_to_text.AudioToBPEDataset(
230
- manifest_filepath=config['manifest_filepath'],
231
- tokenizer=tokenizer,
232
- sample_rate=config['sample_rate'],
233
- int_values=config.get('int_values', False),
234
- augmentor=augmentor,
235
- max_duration=config.get('max_duration', None),
236
- min_duration=config.get('min_duration', None),
237
- max_utts=config.get('max_utts', 0),
238
- trim=config.get('trim_silence', False),
239
- use_start_end_token=config.get('use_start_end_token', True),
240
- return_sample_id=config.get('return_sample_id', False),
241
- channel_selector=config.get('channel_selector', None),
242
- )
243
- return dataset
244
-
245
-
246
- def get_concat_tarred_dataset(
247
- config: dict,
248
- shuffle_n: int,
249
- global_rank: int,
250
- world_size: int,
251
- tokenizer: Optional['TokenizerSpec'] = None,
252
- augmentor: Optional['AudioAugmentor'] = None,
253
- ) -> ConcatDataset:
254
- """
255
- Instantiates a ConcatDataset containing multiple Word Piece/BPE Encoding based TarredAudioToBPEDataset or a char based TarredAudioToCharDataset.
256
-
257
- Args:
258
- config: Config of the TarredAudioToBPEDataset or TarredAudioToCharDataset.
259
- shuffle_n: How many samples to look ahead and load to be shuffled.
260
- See WebDataset documentation for more details.
261
- tokenizer: An instance of a TokenizerSpec object if BPE dataset is needed.
262
- global_rank: Global rank of this device.
263
- world_size: Global world size in the training method.
264
- Passsing None would return a char-based dataset.
265
- augmentor: Optional AudioAugmentor object for augmentations on audio data.
266
-
267
- Returns:
268
- An instance of ConcatDataset containing one or more TarredAudioToBPEDatasets or TarredAudioToCharDatasets.
269
- """
270
-
271
- tarred_audio_filepaths = config['tarred_audio_filepaths']
272
- manifest_filepaths = config['manifest_filepath']
273
- datasets = []
274
- for dataset_idx, (tarred_audio_filepath, manifest_filepath) in enumerate(
275
- zip(tarred_audio_filepaths, manifest_filepaths)
276
- ):
277
- conf = copy.deepcopy(config)
278
- conf['manifest_filepath'] = manifest_filepath
279
- conf['tarred_audio_filepaths'] = tarred_audio_filepath
280
- dataset = get_tarred_dataset(
281
- config=conf,
282
- tokenizer=tokenizer,
283
- shuffle_n=shuffle_n,
284
- global_rank=global_rank,
285
- world_size=world_size,
286
- augmentor=augmentor,
287
- )
288
- datasets.append(dataset)
289
-
290
- dataset = ConcatDataset(
291
- datasets,
292
- sampling_technique=config.get('concat_sampling_technique', 'temperature'),
293
- sampling_temperature=config.get('concat_sampling_temperature', 5),
294
- sampling_scale=config.get('concat_sampling_scale', 1),
295
- sampling_probabilities=config.get('concat_sampling_probabilities', None),
296
- shuffle=config.get('concat_shuffle', True),
297
- seed=config.get('concat_sampling_seed', None),
298
- global_rank=global_rank,
299
- world_size=world_size,
300
- )
301
- return dataset
302
-
303
-
304
- def get_tarred_dataset(
305
- config: dict,
306
- shuffle_n: int,
307
- global_rank: int,
308
- world_size: int,
309
- tokenizer: Optional['TokenizerSpec'] = None,
310
- augmentor: Optional['AudioAugmentor'] = None,
311
- ) -> Union[audio_to_text.TarredAudioToBPEDataset, audio_to_text.TarredAudioToCharDataset]:
312
- """
313
- Instantiates a Word Piece/BPE Encoding based TarredAudioToBPEDataset or a char based TarredAudioToCharDataset.
314
-
315
- Args:
316
- config: Config of the TarredAudioToBPEDataset or TarredAudioToCharDataset.
317
- shuffle_n: How many samples to look ahead and load to be shuffled.
318
- See WebDataset documentation for more details.
319
- tokenizer: An instance of a TokenizerSpec object if BPE dataset is needed.
320
- global_rank: Global rank of this device.
321
- world_size: Global world size in the training method.
322
- Passsing None would return a char-based dataset.
323
- augmentor: Optional AudioAugmentor object for augmentations on audio data.
324
-
325
- Returns:
326
- An instance of TarredAudioToBPEDataset or TarredAudioToCharDataset.
327
- """
328
- tarred_audio_filepaths = config['tarred_audio_filepaths']
329
- manifest_filepaths = config['manifest_filepath']
330
- datasets = []
331
- tarred_audio_filepaths = convert_to_config_list(tarred_audio_filepaths)
332
- manifest_filepaths = convert_to_config_list(manifest_filepaths)
333
-
334
- bucketing_weights = config.get('bucketing_weights', None) # For upsampling buckets
335
- if bucketing_weights:
336
- for idx, weight in enumerate(bucketing_weights):
337
- if not isinstance(weight, int) or weight <= 0:
338
- raise ValueError("bucket weights must be positive integers")
339
-
340
- if len(manifest_filepaths) != len(tarred_audio_filepaths):
341
- raise ValueError(
342
- f"manifest_filepaths (length={len(manifest_filepaths)}) and tarred_audio_filepaths (length={len(tarred_audio_filepaths)}) need to have the same number of buckets."
343
- )
344
-
345
- if 'labels' not in config:
346
- logging.warning("dataset does not have explicitly defined labels")
347
-
348
- if 'max_utts' in config:
349
- logging.warning('"max_utts" parameter is not supported for tarred datasets')
350
-
351
- for dataset_idx, (tarred_audio_filepath, manifest_filepath) in enumerate(
352
- zip(tarred_audio_filepaths, manifest_filepaths)
353
- ):
354
- if len(tarred_audio_filepath) == 1:
355
- tarred_audio_filepath = tarred_audio_filepath[0]
356
- if len(manifest_filepath) == 1:
357
- manifest_filepath = manifest_filepath[0]
358
-
359
- if tokenizer is None:
360
- dataset = audio_to_text.TarredAudioToCharDataset(
361
- audio_tar_filepaths=tarred_audio_filepath,
362
- manifest_filepath=manifest_filepath,
363
- labels=config.get('labels', None),
364
- sample_rate=config['sample_rate'],
365
- int_values=config.get('int_values', False),
366
- augmentor=augmentor,
367
- shuffle_n=shuffle_n,
368
- max_duration=config.get('max_duration', None),
369
- min_duration=config.get('min_duration', None),
370
- blank_index=config.get('blank_index', -1),
371
- unk_index=config.get('unk_index', -1),
372
- normalize=config.get('normalize_transcripts', False),
373
- trim=config.get('trim_silence', False),
374
- parser=config.get('parser', 'en'),
375
- shard_strategy=config.get('tarred_shard_strategy', 'scatter'),
376
- shard_manifests=config.get('shard_manifests', False),
377
- global_rank=global_rank,
378
- world_size=world_size,
379
- return_sample_id=config.get('return_sample_id', False),
380
- )
381
- else:
382
- dataset = audio_to_text.TarredAudioToBPEDataset(
383
- audio_tar_filepaths=tarred_audio_filepath,
384
- manifest_filepath=manifest_filepath,
385
- tokenizer=tokenizer,
386
- sample_rate=config['sample_rate'],
387
- int_values=config.get('int_values', False),
388
- augmentor=augmentor,
389
- shuffle_n=shuffle_n,
390
- max_duration=config.get('max_duration', None),
391
- min_duration=config.get('min_duration', None),
392
- trim=config.get('trim_silence', False),
393
- use_start_end_token=config.get('use_start_end_token', True),
394
- shard_strategy=config.get('tarred_shard_strategy', 'scatter'),
395
- shard_manifests=config.get('shard_manifests', False),
396
- global_rank=global_rank,
397
- world_size=world_size,
398
- return_sample_id=config.get('return_sample_id', False),
399
- )
400
- if bucketing_weights:
401
- [datasets.append(dataset) for _ in range(bucketing_weights[dataset_idx])]
402
- else:
403
- datasets.append(dataset)
404
-
405
- return get_chain_dataset(datasets=datasets, ds_config=config, rank=global_rank)
406
-
407
-
408
- def get_code_switched_dataset(
409
- config: dict,
410
- shuffle_n: int,
411
- global_rank: int,
412
- world_size: int,
413
- tokenizer: Optional['TokenizerSpec'] = None,
414
- augmentor: Optional['AudioAugmentor'] = None,
415
- ) -> CodeSwitchedDataset:
416
-
417
- if 'manifest_filepath' not in config:
418
- raise ValueError("`manifest_filepath` must be provided in the dataset config if `is_code_switched=True`")
419
- if 'code_switched' not in config:
420
- raise ValueError("`code_switched` param group must be in the dataset config if `is_code_switched=True`")
421
-
422
- manifest_filepaths = config['manifest_filepath']
423
- tarred_audio_filepaths = config.get('tarred_audio_filepaths', None)
424
-
425
- cs_config = OmegaConf.to_container(config['code_switched'])
426
-
427
- # needed to support validation Datasets that arrive here as
428
- # [[dataset1,dataset2]] otherwise ModelPT would interfere
429
- if len(manifest_filepaths) == 1 and not isinstance(manifest_filepaths[0], str):
430
- manifest_filepaths = config['manifest_filepath'][0]
431
- if tarred_audio_filepaths is None:
432
- tarred_audio_filepaths = [None] * len(manifest_filepaths)
433
-
434
- if len(manifest_filepaths) != len(tarred_audio_filepaths):
435
- raise ValueError(
436
- f"manifest_filepaths (length={len(manifest_filepaths)}) and tarred_audio_filepaths (length={len(tarred_audio_filepaths)}) need to have the same number of items."
437
- )
438
-
439
- datasets = []
440
- for dataset_idx, (tarred_audio_filepath, manifest_filepath) in enumerate(
441
- zip(tarred_audio_filepaths, manifest_filepaths)
442
- ):
443
- conf = copy.deepcopy(config)
444
- conf['manifest_filepath'] = manifest_filepath
445
- with open_dict(conf):
446
- conf['tarred_audio_filepaths'] = tarred_audio_filepath
447
- if tarred_audio_filepath is None or len(tarred_audio_filepath) == 0:
448
- if tokenizer is None:
449
- dataset = get_char_dataset(config=conf, augmentor=None)
450
- else:
451
- dataset = get_bpe_dataset(config=conf, tokenizer=tokenizer, augmentor=None)
452
- else:
453
- dataset = get_tarred_dataset(
454
- config=conf,
455
- tokenizer=tokenizer,
456
- shuffle_n=shuffle_n,
457
- global_rank=global_rank,
458
- world_size=world_size,
459
- augmentor=None,
460
- )
461
- datasets.append(dataset)
462
-
463
- config = OmegaConf.to_container(config)
464
-
465
- dataset = CodeSwitchedDataset(
466
- datasets,
467
- shuffle=cs_config.get('shuffle', True),
468
- min_duration=cs_config.get('min_duration', 4),
469
- max_duration=cs_config.get('max_duration', 20),
470
- min_monolingual=cs_config.get('min_monolingual', 0.3),
471
- lang_probs=cs_config.get('probs', None),
472
- db_norm=cs_config.get('db_norm', -25.0),
473
- pause_start=cs_config.get('pause_start', 0),
474
- pause_join=cs_config.get('pause_join', 0),
475
- pause_end=cs_config.get('pause_end', 0),
476
- sampling_scales=cs_config.get('sampling_scales', None),
477
- seed=cs_config.get('seed', None),
478
- global_rank=global_rank,
479
- world_size=world_size,
480
- pure_random=cs_config.get('pure_random', False),
481
- force_monochannel=cs_config.get('force_monochannel', True),
482
- infinity_mode=cs_config.get('infinity_mode', False),
483
- sample_rate=config['sample_rate'],
484
- augmentor=augmentor,
485
- )
486
-
487
- return dataset
488
-
489
-
490
- def get_dali_char_dataset(
491
- config: dict,
492
- shuffle: bool,
493
- device_id: int,
494
- global_rank: int,
495
- world_size: int,
496
- preprocessor_cfg: Optional[DictConfig] = None,
497
- ) -> audio_to_text_dali.AudioToCharDALIDataset:
498
- """
499
- Instantiates a Character Encoding based AudioToCharDALIDataset.
500
-
501
- Args:
502
- config: Config of the AudioToCharDALIDataset.
503
- shuffle: Bool flag whether to shuffle the dataset.
504
- device_id: Index of the GPU to be used (local_rank). Only applicable when device == 'gpu'. Defaults to 0.
505
- global_rank: Global rank of this device.
506
- world_size: Global world size in the training method.
507
- augmentor: Optional AudioAugmentor object for augmentations on audio data.
508
- preprocessor_cfg: Preprocessor configuration. Supports AudioToMelSpectrogramPreprocessor and AudioToMFCCPreprocessor.
509
-
510
- Returns:
511
- An instance of AudioToCharDALIDataset.
512
- """
513
- device = 'gpu' if torch.cuda.is_available() else 'cpu'
514
- dataset = audio_to_text_dali.AudioToCharDALIDataset(
515
- manifest_filepath=config['manifest_filepath'],
516
- device=device,
517
- batch_size=config['batch_size'],
518
- labels=config['labels'],
519
- sample_rate=config['sample_rate'],
520
- audio_tar_filepaths=config.get('tarred_audio_filepaths', None),
521
- audio_tar_index_filepaths=config.get('tarred_audio_index_filepaths', None),
522
- max_duration=config.get('max_duration', None),
523
- min_duration=config.get('min_duration', None),
524
- blank_index=config.get('blank_index', -1),
525
- unk_index=config.get('unk_index', -1),
526
- normalize=config.get('normalize_transcripts', False),
527
- trim=config.get('trim_silence', False),
528
- parser=config.get('parser', 'en'),
529
- shuffle=shuffle,
530
- shard_strategy=config.get('tarred_shard_strategy', 'scatter'),
531
- device_id=device_id,
532
- global_rank=global_rank,
533
- world_size=world_size,
534
- preprocessor_cfg=preprocessor_cfg,
535
- return_sample_id=config.get('return_sample_id', False),
536
- )
537
- return dataset
538
-
539
-
540
- def get_dali_bpe_dataset(
541
- config: dict,
542
- tokenizer,
543
- shuffle: bool,
544
- device_id: int,
545
- global_rank: int,
546
- world_size: int,
547
- preprocessor_cfg: Optional[DictConfig] = None,
548
- ) -> audio_to_text_dali.AudioToCharDALIDataset:
549
- """
550
- Instantiates a Subword Encoding based AudioToBPEDALIDataset.
551
-
552
- Args:
553
- config: Config of the AudioToBPEDALIDataset.
554
- tokenizer: An implementation of NeMo TokenizerSpec.
555
- shuffle: Bool flag whether to shuffle the dataset.
556
- device_id: Index of the GPU to be used (local_rank). Only applicable when device == 'gpu'. Defaults to 0.
557
- global_rank: Global rank of this device.
558
- world_size: Global world size in the training method.
559
- preprocessor_cfg: Preprocessor configuration. Supports AudioToMelSpectrogramPreprocessor and AudioToMFCCPreprocessor.
560
-
561
- Returns:
562
- An instance of AudioToCharDALIDataset.
563
- """
564
- device = 'gpu' if torch.cuda.is_available() else 'cpu'
565
- dataset = audio_to_text_dali.AudioToBPEDALIDataset(
566
- manifest_filepath=config['manifest_filepath'],
567
- tokenizer=tokenizer,
568
- device=device,
569
- batch_size=config['batch_size'],
570
- sample_rate=config['sample_rate'],
571
- audio_tar_filepaths=config.get('tarred_audio_filepaths', None),
572
- audio_tar_index_filepaths=config.get('tarred_audio_index_filepaths', None),
573
- max_duration=config.get('max_duration', None),
574
- min_duration=config.get('min_duration', None),
575
- trim=config.get('trim_silence', False),
576
- use_start_end_token=config.get('use_start_end_token', True),
577
- shuffle=shuffle,
578
- shard_strategy=config.get('tarred_shard_strategy', 'scatter'),
579
- device_id=device_id,
580
- global_rank=global_rank,
581
- world_size=world_size,
582
- preprocessor_cfg=preprocessor_cfg,
583
- return_sample_id=config.get('return_sample_id', False),
584
- )
585
- return dataset
586
-
587
-
588
- def get_audio_to_text_char_dataset_from_config(
589
- config, local_rank: int, global_rank: int, world_size: int, preprocessor_cfg: Optional[DictConfig] = None
590
- ):
591
- """
592
- Construct Audio-To-Text Char dataset from a config.
593
- Args:
594
- config: dataset config
595
- local_rank: model local rank
596
- global_rank: model global rand
597
- world_size: world size
598
- preprocessor_cfg: preprocessor config, for DALI dataset
599
-
600
- Returns:
601
- constructed dataset or None if dataset config is invalid or nothing to load
602
- """
603
- if 'augmentor' in config:
604
- augmentor = process_augmentations(config['augmentor'], global_rank=global_rank, world_size=world_size)
605
- else:
606
- augmentor = None
607
-
608
- if 'hf_data_cfg' in config:
609
- return get_hf_audio_to_text_char_dataset(
610
- config=config, global_rank=global_rank, world_size=world_size, augmentor=augmentor
611
- )
612
-
613
- is_concat = config.get('is_concat', False)
614
- if is_concat:
615
- if 'concat_sampling_technique' in config and config['concat_sampling_technique'] is None:
616
- logging.warning(
617
- f"Concat dataset requires `concat_sampling_technique` but it was not provided. Config: {config}"
618
- )
619
- return None
620
- if config['concat_sampling_technique'] == 'random':
621
- if not 'concat_sampling_probabilities' in config:
622
- logging.warning(f"Concat dataset requires `concat_sampling_probabilities` list. Config: {config}")
623
- return None
624
- else:
625
- if not isclose(sum(config['concat_sampling_probabilities']), 1, abs_tol=1e-6):
626
- logging.warning(f"`concat_sampling_probabilities` need to sum to 1. Config: {config}")
627
- return None
628
-
629
- shuffle = config['shuffle']
630
- device = 'gpu' if torch.cuda.is_available() else 'cpu'
631
- if config.get('use_dali', False):
632
- device_id = local_rank if device == 'gpu' else None
633
- dataset = get_dali_char_dataset(
634
- config=config,
635
- shuffle=shuffle,
636
- device_id=device_id,
637
- global_rank=global_rank,
638
- world_size=world_size,
639
- preprocessor_cfg=preprocessor_cfg,
640
- )
641
- return dataset
642
-
643
- # Instantiate a code-switched dataset if config is present
644
- if config.get('is_code_switched', False):
645
- if 'manifest_filepath' in config and config['manifest_filepath'] is None:
646
- logging.warning(f"Could not load dataset as `manifest_filepath` was None. Provided config : {config}")
647
- return None
648
- if not ('code_switched' in config and config['code_switched'] is not None):
649
- logging.warning(
650
- f"Code switched dataset requires `*_ds.code_switched.*` dict but it was not provided. Config: {config}"
651
- )
652
- return None
653
- if (
654
- ('probs' in config['code_switched'])
655
- and (config['code_switched']['probs'] is not None)
656
- and (not isclose(sum(config['code_switched']['probs']), 1, abs_tol=1e-6))
657
- ):
658
- logging.warning(f"`.code_switched.probs` need to sum to 1. Config: {config['code_switched']}")
659
- return None
660
-
661
- shuffle_n = config.get('shuffle_n', 4 * config['batch_size']) if shuffle else 0
662
- dataset = get_code_switched_dataset(
663
- config=config,
664
- shuffle_n=shuffle_n,
665
- global_rank=global_rank,
666
- world_size=world_size,
667
- tokenizer=None,
668
- augmentor=augmentor,
669
- )
670
- # Instantiate tarred dataset loader or normal dataset loader
671
- elif config.get('is_tarred', False):
672
- if ('tarred_audio_filepaths' in config and config['tarred_audio_filepaths'] is None) or (
673
- 'manifest_filepath' in config and config['manifest_filepath'] is None
674
- ):
675
- logging.warning(
676
- "Could not load dataset as `manifest_filepath` was None or "
677
- f"`tarred_audio_filepaths` is None. Provided config : {config}"
678
- )
679
- return None
680
-
681
- shuffle_n = config.get('shuffle_n', 4 * config['batch_size']) if shuffle else 0
682
- if is_concat:
683
- dataset = get_concat_tarred_dataset(
684
- config=config,
685
- shuffle_n=shuffle_n,
686
- global_rank=global_rank,
687
- world_size=world_size,
688
- augmentor=augmentor,
689
- )
690
- else:
691
- dataset = get_tarred_dataset(
692
- config=config,
693
- shuffle_n=shuffle_n,
694
- global_rank=global_rank,
695
- world_size=world_size,
696
- augmentor=augmentor,
697
- )
698
- else:
699
- if 'manifest_filepath' in config and config['manifest_filepath'] is None:
700
- logging.warning(f"Could not load dataset as `manifest_filepath` was None. Provided config : {config}")
701
- return None
702
- if is_concat:
703
- dataset = get_concat_char_dataset(
704
- config=config, global_rank=global_rank, world_size=world_size, augmentor=augmentor
705
- )
706
- else:
707
- dataset = get_char_dataset(config=config, augmentor=augmentor)
708
- return dataset
709
-
710
-
711
- def get_audio_to_text_bpe_dataset_from_config(
712
- config,
713
- local_rank: int,
714
- global_rank: int,
715
- world_size: int,
716
- tokenizer,
717
- preprocessor_cfg: Optional[DictConfig] = None,
718
- ):
719
- """
720
- Construct Audio-To-Text BPE dataset from a config.
721
- Args:
722
- config: BPE dataset config
723
- local_rank: model local rank
724
- global_rank: model global rand
725
- world_size: world size
726
- tokenizer: BPE tokenizer
727
- preprocessor_cfg: preprocessor config, for DALI BPE dataset
728
-
729
- Returns:
730
- constructed dataset or None if dataset config is invalid or nothing to load
731
- """
732
- if 'augmentor' in config:
733
- augmentor = process_augmentations(config['augmentor'], global_rank=global_rank, world_size=world_size)
734
- else:
735
- augmentor = None
736
-
737
- if 'hf_data_cfg' in config:
738
- return get_hf_audio_to_text_bpe_dataset(
739
- config=config, global_rank=global_rank, world_size=world_size, tokenizer=tokenizer, augmentor=augmentor
740
- )
741
-
742
- is_concat = config.get('is_concat', False)
743
- if is_concat:
744
- if 'concat_sampling_technique' in config and config['concat_sampling_technique'] is None:
745
- logging.warning(
746
- f"Concat dataset requires `concat_sampling_technique` but it was not provided. Config: {config}"
747
- )
748
- return None
749
-
750
- if config['concat_sampling_technique'] == 'random':
751
- if not 'concat_sampling_probabilities' in config:
752
- logging.warning(f"Concat dataset requires `concat_sampling_probabilities` list. Config: {config}")
753
- return None
754
- else:
755
- if not isclose(sum(config['concat_sampling_probabilities']), 1, abs_tol=1e-6):
756
- logging.warning(f"`concat_sampling_probabilities` need to sum to 1. Config: {config}")
757
- return None
758
-
759
- shuffle = config['shuffle']
760
- device = 'gpu' if torch.cuda.is_available() else 'cpu'
761
- if config.get('use_dali', False):
762
- device_id = local_rank if device == 'gpu' else None
763
- dataset = get_dali_bpe_dataset(
764
- config=config,
765
- tokenizer=tokenizer,
766
- shuffle=shuffle,
767
- device_id=device_id,
768
- global_rank=global_rank,
769
- world_size=world_size,
770
- preprocessor_cfg=preprocessor_cfg,
771
- )
772
- return dataset
773
-
774
- # Instantiate a code-switched dataset if config is present
775
- if config.get('is_code_switched', False):
776
- if 'manifest_filepath' in config and config['manifest_filepath'] is None:
777
- logging.warning(f"Could not load dataset as `manifest_filepath` was None. Provided config : {config}")
778
- return None
779
- if not ('code_switched' in config and config['code_switched'] is not None):
780
- logging.warning(
781
- f"Code switched dataset requires `*_ds.code_switched.*` dict but it was not provided. Config: {config}"
782
- )
783
- return None
784
- if (
785
- ('probs' in config['code_switched'])
786
- and (config['code_switched']['probs'] is not None)
787
- and (not isclose(sum(config['code_switched']['probs']), 1, abs_tol=1e-6))
788
- ):
789
- logging.warning(f"`.code_switched.probs` need to sum to 1. Config: {config['code_switched']}")
790
- return None
791
-
792
- shuffle_n = config.get('shuffle_n', 4 * config['batch_size']) if shuffle else 0
793
- dataset = get_code_switched_dataset(
794
- config=config,
795
- shuffle_n=shuffle_n,
796
- global_rank=global_rank,
797
- world_size=world_size,
798
- tokenizer=tokenizer,
799
- augmentor=augmentor,
800
- )
801
- # Instantiate tarred dataset loader or normal dataset loader
802
- elif config.get('is_tarred', False):
803
- if ('tarred_audio_filepaths' in config and config['tarred_audio_filepaths'] is None) or (
804
- 'manifest_filepath' in config and config['manifest_filepath'] is None
805
- ):
806
- logging.warning(
807
- "Could not load dataset as `manifest_filepath` was None or "
808
- f"`tarred_audio_filepaths` is None. Provided config : {config}"
809
- )
810
- return None
811
-
812
- shuffle_n = config.get('shuffle_n', 4 * config['batch_size']) if shuffle else 0
813
- if is_concat:
814
- dataset = get_concat_tarred_dataset(
815
- config=config,
816
- tokenizer=tokenizer,
817
- shuffle_n=shuffle_n,
818
- global_rank=global_rank,
819
- world_size=world_size,
820
- augmentor=augmentor,
821
- )
822
- else:
823
- dataset = get_tarred_dataset(
824
- config=config,
825
- tokenizer=tokenizer,
826
- shuffle_n=shuffle_n,
827
- global_rank=global_rank,
828
- world_size=world_size,
829
- augmentor=augmentor,
830
- )
831
- else:
832
- if 'manifest_filepath' in config and config['manifest_filepath'] is None:
833
- logging.warning(f"Could not load dataset as `manifest_filepath` was None. Provided config : {config}")
834
- return None
835
- if is_concat:
836
- dataset = get_concat_bpe_dataset(
837
- config=config,
838
- global_rank=global_rank,
839
- world_size=world_size,
840
- tokenizer=tokenizer,
841
- augmentor=augmentor,
842
- )
843
- else:
844
- dataset = get_bpe_dataset(config=config, tokenizer=tokenizer, augmentor=augmentor)
845
- return dataset
846
-
847
-
848
- class ASRPredictionWriter(BasePredictionWriter):
849
- def __init__(self, dataset, output_file: str):
850
- super().__init__(write_interval="batch")
851
- self.outf = open(output_file, 'w', encoding='utf-8')
852
- self.dataset = dataset
853
- self.samples_num = 0
854
-
855
- def write_on_batch_end(
856
- self,
857
- trainer,
858
- pl_module: 'LightningModule',
859
- prediction: Any,
860
- batch_indices: List[int],
861
- batch: Any,
862
- batch_idx: int,
863
- dataloader_idx: int,
864
- ):
865
- import lhotse
866
-
867
- for sample_id, hypotheses in prediction:
868
- item = {}
869
- if isinstance(sample_id, lhotse.cut.Cut):
870
- sample = sample_id
871
- if isinstance(sample, lhotse.cut.MixedCut):
872
- sample = sample.first_non_padding_cut
873
- if sample.recording.sources[0].source != '':
874
- item["audio_filepath"] = sample.recording.sources[0].source
875
- else:
876
- item["audio_filepath"] = sample.id
877
- item["offset"] = sample.start
878
- item["duration"] = sample.duration
879
- item["text"] = sample.supervisions[0].text or ''
880
- if hasattr(sample, 'shard_id'):
881
- item["shard_id"] = sample.shard_id
882
- item["pred_text"] = hypotheses.text
883
-
884
- else:
885
- sample = self.dataset.get_manifest_sample(sample_id)
886
- item["audio_filepath"] = sample.audio_file
887
- item["offset"] = sample.offset
888
- item["duration"] = sample.duration
889
- item["text"] = sample.text_raw
890
- item["pred_text"] = hypotheses.text
891
-
892
- if hasattr(hypotheses, "timestamp") and isinstance(hypotheses.timestamp, dict):
893
- for timestamp_type, timestamps in hypotheses.timestamp.items():
894
- if timestamp_type in ['char', 'word', 'segment']:
895
- item[f'{timestamp_type}_timestamps'] = [
896
- {
897
- key: int(value) if isinstance(value, np.int64) else value
898
- for key, value in offset.items()
899
- }
900
- for offset in timestamps
901
- ]
902
-
903
- self.outf.write(json.dumps(item) + "\n")
904
- self.samples_num += 1
905
- return
906
-
907
- def close_output_file(self):
908
- self.outf.close()
909
- return self.samples_num
910
-
911
-
912
- def convert_to_config_list(initial_list):
913
- if type(initial_list) is str:
914
- initial_list = initial_list.split(",")
915
- if initial_list is None or initial_list == []:
916
- raise ValueError("manifest_filepaths and tarred_audio_filepaths must not be empty.")
917
- if not isinstance(initial_list, ListConfig):
918
- initial_list = ListConfig([initial_list])
919
-
920
- for list_idx, list_val in enumerate(initial_list):
921
- if type(list_val) != type(initial_list[0]):
922
- raise ValueError(
923
- "manifest_filepaths and tarred_audio_filepaths need to be a list of lists for bucketing or just a list of strings"
924
- )
925
- if type(initial_list[0]) is not ListConfig:
926
- initial_list = ListConfig([initial_list])
927
- return initial_list
928
-
929
-
930
- def get_chain_dataset(datasets, ds_config, rank=0):
931
- if len(datasets) > 1:
932
- if ds_config.get('bucketing_batch_size', None) is not None:
933
- bucketing_batch_sizes = calc_bucketing_batch_sizes(ds_config, len(datasets))
934
- logging.info(
935
- f"Batch bucketing is enabled for {len(datasets)} buckets with adaptive batch sizes of {bucketing_batch_sizes}!"
936
- )
937
- for idx, dataset in enumerate(datasets):
938
- datasets[idx] = audio_to_text.BucketingDataset(
939
- dataset=dataset, bucketing_batch_size=bucketing_batch_sizes[idx]
940
- )
941
- else:
942
- logging.info(
943
- f"Batch bucketing is enabled for {len(datasets)} buckets with fixed batch size of {ds_config['batch_size']}!"
944
- )
945
-
946
- if len(datasets) == 1:
947
- return datasets[0]
948
- bucketing_strategy = ds_config.get('bucketing_strategy', 'synced_randomized')
949
- if bucketing_strategy == 'fixed_order':
950
- return ChainDataset(datasets)
951
- elif bucketing_strategy == 'synced_randomized':
952
- return audio_to_text.RandomizedChainDataset(datasets=datasets, rnd_seed=0)
953
- elif bucketing_strategy == 'fully_randomized':
954
- return audio_to_text.RandomizedChainDataset(datasets=datasets, rnd_seed=random.randint(0, 30000) + rank)
955
- else:
956
- raise ValueError(
957
- f'bucketing_strategy={bucketing_strategy} is not supported! Supported strategies are [fixed_order, fully_randomized, synced_randomized].'
958
- )
959
-
960
-
961
- def calc_bucketing_batch_sizes(ds_config, datasets_len):
962
- bucketing_batch_size = ds_config['bucketing_batch_size']
963
- bucketing_weights = ds_config.get('bucketing_weights', None) # To adjust for upsampled buckets
964
-
965
- bucketing_batch_sizes = []
966
-
967
- if ds_config['batch_size'] != 1:
968
- raise ValueError(
969
- f"batch_size should be set to one when bucketing_batch_size is set and adaptive bucketing is enabled (batch_size={ds_config['batch_size']}!"
970
- )
971
- if type(bucketing_batch_size) == int: # linear scaling
972
- if bucketing_weights: # Want same batchsize for the same duplicated bucket
973
- for idx, weight in enumerate(bucketing_weights):
974
- scale_factor = datasets_len - idx
975
- [bucketing_batch_sizes.append(scale_factor * bucketing_batch_size) for _ in range(weight)]
976
- else:
977
- for idx in range(datasets_len):
978
- scale_factor = datasets_len - idx
979
- bucketing_batch_sizes.append(scale_factor * bucketing_batch_size)
980
- elif isinstance(bucketing_batch_size, ListConfig) or isinstance(
981
- bucketing_batch_size, list
982
- ): # assigned bucket sizes
983
- if bucketing_weights: # Want same batchsize for same duplicated bucket
984
- for idx, weight in enumerate(bucketing_weights):
985
- [bucketing_batch_sizes.append(bucketing_batch_size[idx]) for _ in range(weight)]
986
- else:
987
- bucketing_batch_sizes = bucketing_batch_size
988
- else:
989
- raise ValueError(
990
- f"bucketing_batch_size should be an integer or a list (bucketing_batch_size={bucketing_batch_size})!"
991
- )
992
-
993
- if len(bucketing_batch_sizes) != datasets_len:
994
- raise ValueError(
995
- f"batch_size should have the same length as the number of buckets ({len(bucketing_batch_sizes)}!={datasets_len}) "
996
- )
997
- return bucketing_batch_sizes