Martí commited on
Commit
78ec87c
·
unverified ·
2 Parent(s): b4c3899 685f83e

Merge pull request #16 from langtech-bsc/feature/box-translation

Browse files
whisperlivekit/audio_processor.py CHANGED
@@ -384,8 +384,11 @@ class AudioProcessor:
384
  lines[-1]["translation"] = ""
385
  #lines[-1]["translation"] += " " + await self.translate_text(text = token.text)
386
 
387
- text_to_translate = lines[-1]["text"]
388
- lines[-1]["translation"] = await self.translate_text(text=text_to_translate) if len(lines) > 0 else ""
 
 
 
389
  # Handle undiarized text
390
  if undiarized_text:
391
  combined = sep.join(undiarized_text)
 
384
  lines[-1]["translation"] = ""
385
  #lines[-1]["translation"] += " " + await self.translate_text(text = token.text)
386
 
387
+ text_to_translate = ""
388
+ if lines:
389
+ text_to_translate = lines[-1]["text"]
390
+ lines[-1]["translation"] = await self.translate_text(text=text_to_translate) if len(lines) > 0 else ""
391
+
392
  # Handle undiarized text
393
  if undiarized_text:
394
  combined = sep.join(undiarized_text)
whisperlivekit/whisper_streaming_custom/backends.py CHANGED
@@ -92,7 +92,7 @@ class WhisperTimestampedASR(ASRBase):
92
 
93
  class FasterWhisperASR(ASRBase):
94
  """Uses faster-whisper as the backend."""
95
- sep = ""
96
 
97
  def load_model(self, modelsize=None, cache_dir=None, model_dir=None):
98
  from faster_whisper import WhisperModel, BatchedInferencePipeline
 
92
 
93
  class FasterWhisperASR(ASRBase):
94
  """Uses faster-whisper as the backend."""
95
+ sep = " "
96
 
97
  def load_model(self, modelsize=None, cache_dir=None, model_dir=None):
98
  from faster_whisper import WhisperModel, BatchedInferencePipeline