{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "view-in-github"
   },
   "source": [
    "<a href=\"https://colab.research.google.com/github/kauterry/seamless_communication/blob/main/Seamless_Tutorial.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "SbI8G4-0V1OG"
   },
   "source": [
    "\n",
    "\n",
    "# Seamless Tutorial\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "1p2d9R1LHJL2"
   },
   "source": [
    "## Quick Links"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "nLlZgJvBpWxT"
   },
   "source": [
    "1. seamless_communication GitHub repository: https://github.com/facebookresearch/seamless_communication\n",
    "2. fairseq2 Github repository: https://github.com/facebookresearch/fairseq2\n",
    "3. HuggingFace: https://huggingface.co/collections/facebook/seamless-communication-6568d486ef451c6ba62c7724\n",
    "4. Seamless demos: https://seamless.metademolab.com/\n",
    "5. Fleurs datasets for evaluation: https://huggingface.co/datasets/google/fleurs/tree/main/data"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "YICcqOErh-om"
   },
   "source": [
    "### Set up seamless_communication, fairseq2 and some utilities."
   ]
  },
  {
   "cell_type": "raw",
   "metadata": {
    "id": "1Ei8HSHamsBG",
    "vscode": {
     "languageId": "raw"
    }
   },
   "source": [
    "%%capture\n",
    "!pip install fairseq2\n",
    "!pip install pydub sentencepiece\n",
    "!pip install git+https://github.com/facebookresearch/seamless_communication.git"
   ]
  },
  {
   "cell_type": "raw",
   "metadata": {
    "vscode": {
     "languageId": "raw"
    }
   },
   "source": [
    "%%capture\n",
    "! pip install matplotlib\n",
    "! conda install -c conda-forge libsndfile==1.0.31 -y "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "TWlkq20jms6V"
   },
   "outputs": [],
   "source": [
    "import io\n",
    "import json\n",
    "import matplotlib as mpl\n",
    "import matplotlib.pyplot as plt\n",
    "import mmap\n",
    "import numpy\n",
    "import soundfile\n",
    "import torchaudio\n",
    "import torch\n",
    "\n",
    "from collections import defaultdict\n",
    "from IPython.display import Audio, display\n",
    "from pathlib import Path\n",
    "from pydub import AudioSegment\n",
    "\n",
    "from seamless_communication.inference import Translator\n",
    "from seamless_communication.streaming.dataloaders.s2tt import SileroVADSilenceRemover"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "j25uCSvKHRKu"
   },
   "source": [
    "# SeamlessM4T Inference:"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "06JLP7rIEzfP"
   },
   "source": [
    "## Initialize the models:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import yaml\n",
    "\n",
    "model_yaml = './src/seamless_communication/cards/seamlessM4T_v2_large_local.yaml'\n",
    "vocoder_yaml = './src/seamless_communication/cards/vocoder_v2_local.yaml'\n",
    "\n",
    "# 打开YAML文件并加载内容\n",
    "with open(model_yaml, 'r') as f1, open(vocoder_yaml, 'r') as f2:\n",
    "    model_metadata = yaml.safe_load(f1)\n",
    "    vocoder_metadata = yaml.safe_load(f2)\n",
    "    \n",
    "from fairseq2.assets.card import AssetCard\n",
    "\n",
    "model_card = AssetCard(model_metadata)\n",
    "vocoder_card = AssetCard(vocoder_metadata)\n",
    "\n",
    "translator = Translator(\n",
    "    model_card,\n",
    "    vocoder_card,\n",
    "    # device=torch.device(\"cuda:0\"),\n",
    "    device=torch.device(\"cpu\"),\n",
    "    dtype=torch.float16,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "fA4iPYnoMLkK",
    "outputId": "c19ae7c7-c0c9-4b85-be2c-561f57d279f1"
   },
   "outputs": [],
   "source": [
    "# Initialize a Translator object with a multitask model, vocoder on the GPU.\n",
    "\n",
    "model_name = \"seamlessM4T_v2_large\"\n",
    "vocoder_name = \"vocoder_v2\" if model_name == \"seamlessM4T_v2_large\" else \"vocoder_36langs\"\n",
    "\n",
    "\n",
    "translator = Translator(\n",
    "    model_name,\n",
    "    vocoder_name,\n",
    "    # device=torch.device(\"cuda:0\"),\n",
    "    device=torch.device(\"cpu\"),\n",
    "    dtype=torch.float16,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "BU4xoNRpqEey"
   },
   "outputs": [],
   "source": [
    "%%capture\n",
    "# Download an english audio sample from the LJ speech dataset for testing purposes.\n",
    "! mkdir -p ./input\n",
    "! wget https://dl.fbaipublicfiles.com/seamlessM4T/LJ037-0171_sr16k.wav -O ./input/LJ_eng.wav"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "PoWClYZ6FP1a"
   },
   "source": [
    "## S2ST inference:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 880
    },
    "id": "J_qeX25RnTr_",
    "outputId": "0828c902-5ae3-49be-ffef-4e73751aaccb"
   },
   "outputs": [],
   "source": [
    "# README:  https://github.com/facebookresearch/seamless_communication/tree/main/src/seamless_communication/cli/m4t/predict\n",
    "# Please use audios with duration under 20 seconds for optimal performance.\n",
    "\n",
    "# Resample the audio in 16khz if sample rate is not 16khz already.\n",
    "# torchaudio.functional.resample(audio, orig_freq=orig_freq, new_freq=16_000)\n",
    "\n",
    "print(\"English audio:\")\n",
    "in_file = \"/content/LJ_eng.wav\"\n",
    "display(Audio(in_file, rate=16000, autoplay=False, normalize=True))\n",
    "\n",
    "tgt_langs = (\"spa\", \"fra\", \"deu\", \"ita\", \"hin\", \"cmn\")\n",
    "\n",
    "for tgt_lang in tgt_langs:\n",
    "  text_output, speech_output = translator.predict(\n",
    "      input=in_file,\n",
    "      task_str=\"s2st\",\n",
    "      tgt_lang=tgt_lang,\n",
    "  )\n",
    "\n",
    "  print(f\"Translated text in {tgt_lang}: {text_output[0]}\")\n",
    "  print()\n",
    "\n",
    "  out_file = f\"/content/translated_LJ_{tgt_lang}.wav\"\n",
    "\n",
    "  torchaudio.save(out_file, speech_output.audio_wavs[0][0].to(torch.float32).cpu(), speech_output.sample_rate)\n",
    "\n",
    "  print(f\"Translated audio in {tgt_lang}:\")\n",
    "  audio_play = Audio(out_file, rate=speech_output.sample_rate, autoplay=False, normalize=True)\n",
    "  display(audio_play)\n",
    "  print()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "VTD8l_vXFX5x"
   },
   "source": [
    "## S2TT inference:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "xpvz3VdcFbYA",
    "outputId": "b91d60ca-fa55-414d-c56a-67da1d5eb691"
   },
   "outputs": [],
   "source": [
    "tgt_langs = (\"arb\", \"rus\", \"tgl\", \"ind\", \"tam\", \"kor\")\n",
    "in_file = \"/content/LJ_eng.wav\"\n",
    "\n",
    "for tgt_lang in tgt_langs:\n",
    "\n",
    "  text_output, _ = translator.predict(\n",
    "      input=in_file,\n",
    "      task_str=\"s2tt\",\n",
    "      tgt_lang=tgt_lang,\n",
    "  )\n",
    "\n",
    "  print(f\"Translated text in {tgt_lang}: {text_output[0]}\")\n",
    "  print()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "IBfkgdQlFcRV"
   },
   "source": [
    "## ASR inference:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "E-GkJ-GsFjwM",
    "outputId": "191e3d49-ff61-4d3b-c235-1978ff522521"
   },
   "outputs": [],
   "source": [
    "tgt_langs = (\"spa\", \"fra\", \"deu\", \"ita\", \"hin\", \"cmn\")\n",
    "\n",
    "for tgt_lang in tgt_langs:\n",
    "  in_file = f\"/content/translated_LJ_{tgt_lang}.wav\"\n",
    "\n",
    "  text_output, _ = translator.predict(\n",
    "      input=in_file,\n",
    "      task_str=\"asr\",\n",
    "      tgt_lang=tgt_lang,\n",
    "  )\n",
    "\n",
    "  print(f\"Transcribed text in {tgt_lang}: {text_output[0]}\")\n",
    "  print()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "1g3oeNp_Fj_m"
   },
   "source": [
    "## T2ST inference:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 784
    },
    "id": "KivvCtS9FnH8",
    "outputId": "34392c09-40d0-4da6-95b1-9639a9abe960"
   },
   "outputs": [],
   "source": [
    "tgt_langs = (\"spa\", \"fra\", \"deu\", \"ita\", \"hin\", \"cmn\")\n",
    "\n",
    "for tgt_lang in tgt_langs:\n",
    "\n",
    "  text_output, speech_output = translator.predict(\n",
    "      input=\"Hey everyone! I hope you're all doing well. Thank you for attending our workshop.\",\n",
    "      task_str=\"t2st\",\n",
    "      tgt_lang=tgt_lang,\n",
    "      src_lang=\"eng\",\n",
    "  )\n",
    "\n",
    "  print(f\"Translated text in {tgt_lang}: {text_output[0]}\")\n",
    "  print()\n",
    "\n",
    "  out_file = f\"/content/{tgt_lang}.wav\"\n",
    "\n",
    "  torchaudio.save(out_file, speech_output.audio_wavs[0][0].to(torch.float32).cpu(), speech_output.sample_rate)\n",
    "\n",
    "  print(f\"Translated audio in {tgt_lang}:\")\n",
    "  audio_play = Audio(out_file, rate=speech_output.sample_rate, autoplay=False, normalize=True)\n",
    "  display(audio_play)\n",
    "  print()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "F_hA66F4Fnjk"
   },
   "source": [
    "## T2TT (MT) inference:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "wSrZZCIXFtFp",
    "outputId": "fe1accd0-d038-455f-9781-bdc6893df7b0"
   },
   "outputs": [],
   "source": [
    "tgt_langs = (\"arb\", \"rus\", \"ind\", \"tam\", \"kor\")\n",
    "\n",
    "for tgt_lang in tgt_langs:\n",
    "\n",
    "  text_output, speech_output = translator.predict(\n",
    "      input=\"Hey everyone! I hope you're all doing well. Thank you for attending our workshop.\",\n",
    "      task_str=\"t2tt\",\n",
    "      tgt_lang=tgt_lang,\n",
    "      src_lang=\"eng\",\n",
    "  )\n",
    "\n",
    "  print(f\"Translated text in {tgt_lang}: {text_output[0]}\")\n",
    "  print()\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "N_q-Ek9M9M36"
   },
   "source": [
    "## UnitY2 aligner usage"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "ttDrZ9nh9LhH"
   },
   "outputs": [],
   "source": [
    "from seamless_communication.models.aligner.alignment_extractor import AlignmentExtractor\n",
    "from fairseq2.typing import Device\n",
    "import torch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "D-yY4y129WFD",
    "outputId": "a3678919-f1e4-45e2-bd62-272d99df5424"
   },
   "outputs": [],
   "source": [
    "alignment_extractor = AlignmentExtractor(\n",
    "    aligner_model_name_or_card=\"nar_t2u_aligner\",\n",
    "    unit_extractor_model_name_or_card=\"xlsr2_1b_v2\",\n",
    "    unit_extractor_output_layer=35,\n",
    "    unit_extractor_kmeans_model_uri=\"https://dl.fbaipublicfiles.com/seamlessM4T/models/unit_extraction/kmeans_10k.npy\",\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "N8FYd-Fg-YaE",
    "outputId": "5ad68739-cabf-4561-acde-64fc5ff01be5"
   },
   "outputs": [],
   "source": [
    "# downloading en audio\n",
    "! wget https://dl.fbaipublicfiles.com/seamlessM4T/LJ037-0171_sr16k.wav"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 93
    },
    "id": "v5k0B1An_DOd",
    "outputId": "706daf4a-a07b-4977-9a68-a09cd395a1c6"
   },
   "outputs": [],
   "source": [
    "# listen to the audio\n",
    "en_transcription = \"the examination and testimony of the experts enabled the commision to conclude that five shots may have been fired.\"\n",
    "audio_play = Audio(\"LJ037-0171_sr16k.wav\", rate=16_000, autoplay=False, normalize=True)\n",
    "display(audio_play)\n",
    "print(en_transcription)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 221
    },
    "id": "GpmN4Ofs-ySY",
    "outputId": "681d840e-a0c1-4724-aac0-704dfd6ec17d"
   },
   "outputs": [],
   "source": [
    "alignment_durations, _, tokenized_text_tokens = alignment_extractor.extract_alignment(\"LJ037-0171_sr16k.wav\", en_transcription, plot=True, add_trailing_silence=False)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "eQh3GBo_hb5g"
   },
   "source": [
    "## HF transformers:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "6jSyZHFihel5"
   },
   "outputs": [],
   "source": [
    "# Refer to README: https://github.com/facebookresearch/seamless_communication/tree/main/docs/m4t#transformers-usage\n",
    "# HF space: https://huggingface.co/spaces/facebook/seamless-m4t-v2-large"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "10CG60YSw4QB"
   },
   "source": [
    "## m4t_evaluate"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "oQ5GuaQ7w7K8"
   },
   "outputs": [],
   "source": [
    "# Refer to README: https://github.com/facebookresearch/seamless_communication/tree/main/src/seamless_communication/cli/m4t/evaluate"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "GIPdJ3x9tstZ"
   },
   "source": [
    "# SeamlessExpressive Inference:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "l76dn3mRtwxK"
   },
   "outputs": [],
   "source": [
    "# Please follow instructions to download SeamlessExpressive here: https://ai.meta.com/resources/models-and-libraries/seamless-downloads/\n",
    "\n",
    "!wget \"<download_link_in_email>\" -O /content/SeamlessExpressive.tar.gz\n",
    "\n",
    "!tar -xzvf /content/SeamlessExpressive.tar.gz"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "wqkH-Js91cLX",
    "outputId": "09919807-5a69-4639-cd7d-5110f6f5f023"
   },
   "outputs": [],
   "source": [
    "!wget https://dl.fbaipublicfiles.com/seamless/data/samples/expressivity_data.tar.gz -O /content/expressivity_data.tar.gz\n",
    "!tar -xzvf /content/expressivity_data.tar.gz"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 1000
    },
    "id": "L1o7JgU2xiHV",
    "outputId": "3b51ce48-e7ca-4f6c-f8db-5d19dfa1e10f"
   },
   "outputs": [],
   "source": [
    "expressions = (\"default\", \"whisper\", \"confused\", \"enunciated\", \"happy\", \"sad\", \"laughing\")\n",
    "\n",
    "for expression in expressions:\n",
    "  print(f\"English {expression} audio:\")\n",
    "  print()\n",
    "\n",
    "  in_file = f\"ex01_{expression}_00367.wav\"\n",
    "\n",
    "  audio_play = Audio(in_file, rate=16000, autoplay=False, normalize=True)\n",
    "  display(audio_play)\n",
    "\n",
    "  out_file = f\"spa_{expression}.wav\"\n",
    "\n",
    "  !expressivity_predict {in_file} --tgt_lang spa \\\n",
    "    --model_name seamless_expressivity --vocoder_name vocoder_pretssel \\\n",
    "    --gated-model-dir SeamlessExpressive --output_path {out_file}\n",
    "\n",
    "  print()\n",
    "  print(f\"Translated {expression} audio in spa:\")\n",
    "\n",
    "  audio_play = Audio(out_file, rate=16000, autoplay=False, normalize=True)\n",
    "  display(audio_play)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "-qo85CRkgVSW"
   },
   "source": [
    "## Automatic Expressive Evaluation:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "gGg6R8zogfn1"
   },
   "outputs": [],
   "source": [
    "# Refer to README: https://github.com/facebookresearch/seamless_communication/blob/main/docs/expressive/README.md#automatic-evaluation\n",
    "\n",
    "# AutoPCP: https://github.com/facebookresearch/stopes/tree/main/stopes/eval/auto_pcp\n",
    "\n",
    "# VSim: https://github.com/facebookresearch/stopes/tree/main/stopes/eval/vocal_style_similarity\n",
    "\n",
    "# expressivity_evaluate: https://github.com/facebookresearch/seamless_communication#seamlessexpressive-evaluation\n",
    "\n",
    "# HF space: https://huggingface.co/spaces/facebook/seamless-expressive"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "4PNlRLsloKWo"
   },
   "source": [
    "# Streaming Standalone Inference\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "dvM68NSZGK8o"
   },
   "source": [
    "## Utility classes + functions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "ihWc_q0lGcnl",
    "outputId": "05696d30-a68b-494f-e2c8-146b00673aa8"
   },
   "outputs": [],
   "source": [
    "# Download an the LJ speech dataset sample if you didn't already run it above\n",
    "# %%capture\n",
    "!wget https://dl.fbaipublicfiles.com/seamlessM4T/LJ037-0171_sr16k.wav -O /content/LJ_eng.wav"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "R5PPqT9boJ9e"
   },
   "outputs": [],
   "source": [
    "import math\n",
    "from simuleval.data.segments import SpeechSegment, EmptySegment\n",
    "from seamless_communication.streaming.agents.seamless_streaming_s2st import (\n",
    "    SeamlessStreamingS2STVADAgent,\n",
    ")\n",
    "\n",
    "from simuleval.utils.arguments import cli_argument_list\n",
    "from simuleval import options\n",
    "\n",
    "\n",
    "from typing import Union, List\n",
    "from simuleval.data.segments import Segment, TextSegment\n",
    "from simuleval.agents.pipeline import TreeAgentPipeline\n",
    "from simuleval.agents.states import AgentStates\n",
    "\n",
    "\n",
    "SAMPLE_RATE = 16000\n",
    "\n",
    "\n",
    "class AudioFrontEnd:\n",
    "    def __init__(self, wav_file, segment_size) -> None:\n",
    "        self.samples, self.sample_rate = soundfile.read(wav_file)\n",
    "        assert self.sample_rate == SAMPLE_RATE\n",
    "        # print(len(self.samples), self.samples[:100])\n",
    "        self.samples = self.samples  # .tolist()\n",
    "        self.segment_size = segment_size\n",
    "        self.step = 0\n",
    "\n",
    "    def send_segment(self):\n",
    "        \"\"\"\n",
    "        This is the front-end logic in simuleval instance.py\n",
    "        \"\"\"\n",
    "\n",
    "        num_samples = math.ceil(self.segment_size / 1000 * self.sample_rate)\n",
    "\n",
    "        if self.step < len(self.samples):\n",
    "            if self.step + num_samples >= len(self.samples):\n",
    "                samples = self.samples[self.step :]\n",
    "                is_finished = True\n",
    "            else:\n",
    "                samples = self.samples[self.step : self.step + num_samples]\n",
    "                is_finished = False\n",
    "            self.step = min(self.step + num_samples, len(self.samples))\n",
    "\n",
    "            segment = SpeechSegment(\n",
    "                content=samples,\n",
    "                sample_rate=self.sample_rate,\n",
    "                finished=is_finished,\n",
    "            )\n",
    "        else:\n",
    "            # Finish reading this audio\n",
    "            segment = EmptySegment(\n",
    "                finished=True,\n",
    "            )\n",
    "        return segment\n",
    "\n",
    "\n",
    "class OutputSegments:\n",
    "    def __init__(self, segments: Union[List[Segment], Segment]):\n",
    "        if isinstance(segments, Segment):\n",
    "            segments = [segments]\n",
    "        self.segments: List[Segment] = [s for s in segments]\n",
    "\n",
    "    @property\n",
    "    def is_empty(self):\n",
    "        return all(segment.is_empty for segment in self.segments)\n",
    "\n",
    "    @property\n",
    "    def finished(self):\n",
    "        return all(segment.finished for segment in self.segments)\n",
    "\n",
    "\n",
    "def get_audiosegment(samples, sr):\n",
    "    b = io.BytesIO()\n",
    "    soundfile.write(b, samples, samplerate=sr, format=\"wav\")\n",
    "    b.seek(0)\n",
    "    return AudioSegment.from_file(b)\n",
    "\n",
    "\n",
    "def reset_states(system, states):\n",
    "    if isinstance(system, TreeAgentPipeline):\n",
    "        states_iter = states.values()\n",
    "    else:\n",
    "        states_iter = states\n",
    "    for state in states_iter:\n",
    "        state.reset()\n",
    "\n",
    "\n",
    "def get_states_root(system, states) -> AgentStates:\n",
    "    if isinstance(system, TreeAgentPipeline):\n",
    "        # self.states is a dict\n",
    "        return states[system.source_module]\n",
    "    else:\n",
    "        # self.states is a list\n",
    "        return system.states[0]\n",
    "\n",
    "\n",
    "def plot_s2st(source_file, target_samples, target_fs, intervals, delays, prediction_lists):\n",
    "    mpl.rcParams[\"axes.spines.left\"] = False\n",
    "    mpl.rcParams[\"axes.spines.right\"] = False\n",
    "    mpl.rcParams[\"axes.spines.top\"] = False\n",
    "    mpl.rcParams[\"axes.spines.bottom\"] = False\n",
    "\n",
    "    source_samples, source_fs = soundfile.read(source_file)\n",
    "\n",
    "    _, axes = plt.subplots(5, sharex=True, figsize=(25, 5))\n",
    "    for ax in axes:\n",
    "        ax.set_yticks([])\n",
    "\n",
    "    axes[0].plot(\n",
    "        numpy.linspace(0, len(source_samples) / source_fs, len(source_samples)),\n",
    "        source_samples,\n",
    "    )\n",
    "\n",
    "    axes[1].plot(\n",
    "        numpy.linspace(0, len(target_samples) / target_fs, len(target_samples)),\n",
    "        target_samples,\n",
    "    )\n",
    "\n",
    "    start = 0\n",
    "    for seg_index in range(len(intervals)):\n",
    "        start, duration = intervals[seg_index]\n",
    "        offset = delays[\"s2st\"][seg_index]\n",
    "\n",
    "        samples = target_samples[\n",
    "            int((start) / 1000 * target_fs) : int(\n",
    "                (start + duration) / 1000 * target_fs\n",
    "            )\n",
    "        ]\n",
    "\n",
    "        # Uncomment this if you want to see the segments without speech playback delay\n",
    "        axes[2].plot(\n",
    "            offset / 1000 + numpy.linspace(0, len(samples) / target_fs, len(samples)),\n",
    "            -seg_index * 0.05 + numpy.array(samples),\n",
    "        )\n",
    "        axes[4].plot(\n",
    "            start / 1000 + numpy.linspace(0, len(samples) / target_fs, len(samples)),\n",
    "            numpy.array(samples),\n",
    "        )\n",
    "\n",
    "    from pydub import AudioSegment\n",
    "    print(\"Output translation (without input)\")\n",
    "    display(Audio(target_samples, rate=target_fs))\n",
    "    print(\"Output translation (overlay with input)\")\n",
    "    source_seg = get_audiosegment(source_samples, source_fs) + AudioSegment.silent(duration=3000)\n",
    "    target_seg = get_audiosegment(target_samples, target_fs)\n",
    "    output_seg = source_seg.overlay(target_seg)\n",
    "    display(output_seg)\n",
    "\n",
    "    delay_token = defaultdict(list)\n",
    "    d = delays[\"s2tt\"][0]\n",
    "    for token, delay in zip(prediction_lists[\"s2tt\"], delays[\"s2tt\"]):\n",
    "        if delay != d:\n",
    "            d = delay\n",
    "        delay_token[d].append(token)\n",
    "    for key, value in delay_token.items():\n",
    "        axes[3].text(\n",
    "            key / 1000, 0.2, \" \".join(value), rotation=40\n",
    "        )\n",
    "\n",
    "def build_streaming_system(model_configs, agent_class):\n",
    "    parser = options.general_parser()\n",
    "    parser.add_argument(\"-f\", \"--f\", help=\"a dummy argument to fool ipython\", default=\"1\")\n",
    "\n",
    "    agent_class.add_args(parser)\n",
    "    args, _ = parser.parse_known_args(cli_argument_list(model_configs))\n",
    "    system = agent_class.from_args(args)\n",
    "    return system\n",
    "\n",
    "\n",
    "def run_streaming_inference(system, audio_frontend, system_states, tgt_lang):\n",
    "    # NOTE: Here for visualization, we calculate delays offset from audio\n",
    "    # *BEFORE* VAD segmentation.\n",
    "    # In contrast for SimulEval evaluation, we assume audios are pre-segmented,\n",
    "    # and Average Lagging, End Offset metrics are based on those pre-segmented audios.\n",
    "    # Thus, delays here are *NOT* comparable to SimulEval per-segment delays\n",
    "    delays = {\"s2st\": [], \"s2tt\": []}\n",
    "    prediction_lists = {\"s2st\": [], \"s2tt\": []}\n",
    "    speech_durations = []\n",
    "    curr_delay = 0\n",
    "    target_sample_rate = None\n",
    "\n",
    "    while True:\n",
    "        input_segment = audio_frontend.send_segment()\n",
    "        input_segment.tgt_lang = tgt_lang\n",
    "        curr_delay += len(input_segment.content) / SAMPLE_RATE * 1000\n",
    "        if input_segment.finished:\n",
    "            # a hack, we expect a real stream to end with silence\n",
    "            get_states_root(system, system_states).source_finished = True\n",
    "        # Translation happens here\n",
    "        output_segments = OutputSegments(system.pushpop(input_segment, system_states))\n",
    "        if not output_segments.is_empty:\n",
    "            for segment in output_segments.segments:\n",
    "                # NOTE: another difference from SimulEval evaluation -\n",
    "                # delays are accumulated per-token\n",
    "                if isinstance(segment, SpeechSegment):\n",
    "                    pred_duration = 1000 * len(segment.content) / segment.sample_rate\n",
    "                    speech_durations.append(pred_duration)\n",
    "                    delays[\"s2st\"].append(curr_delay)\n",
    "                    prediction_lists[\"s2st\"].append(segment.content)\n",
    "                    target_sample_rate = segment.sample_rate\n",
    "                elif isinstance(segment, TextSegment):\n",
    "                    delays[\"s2tt\"].append(curr_delay)\n",
    "                    prediction_lists[\"s2tt\"].append(segment.content)\n",
    "                    print(curr_delay, segment.content)\n",
    "        if output_segments.finished:\n",
    "            print(\"End of VAD segment\")\n",
    "            reset_states(system, system_states)\n",
    "        if input_segment.finished:\n",
    "            # an assumption of SimulEval agents -\n",
    "            # once source_finished=True, generate until output translation is finished\n",
    "            assert output_segments.finished\n",
    "            break\n",
    "    return delays, prediction_lists, speech_durations, target_sample_rate\n",
    "\n",
    "\n",
    "def get_s2st_delayed_targets(delays, target_sample_rate, prediction_lists, speech_durations):\n",
    "    # get calculate intervals + durations for s2st\n",
    "    intervals = []\n",
    "\n",
    "    start = prev_end = prediction_offset = delays[\"s2st\"][0]\n",
    "    target_samples = [0.0] * int(target_sample_rate * prediction_offset / 1000)\n",
    "\n",
    "    for i, delay in enumerate(delays[\"s2st\"]):\n",
    "        start = max(prev_end, delay)\n",
    "\n",
    "        if start > prev_end:\n",
    "            # Wait source speech, add discontinuity with silence\n",
    "            target_samples += [0.0] * int(\n",
    "                target_sample_rate * (start - prev_end) / 1000\n",
    "            )\n",
    "\n",
    "        target_samples += prediction_lists[\"s2st\"][i]\n",
    "        duration = speech_durations[i]\n",
    "        prev_end = start + duration\n",
    "        intervals.append([start, duration])\n",
    "    return target_samples, intervals"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "wGHmMwIPGWgm"
   },
   "source": [
    "## Build SeamlessStreaming S2ST + S2TT agent"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "TZPg2tm3oXGR",
    "outputId": "6c9b3f55-e50f-46f9-8d39-4e43d3b8251a"
   },
   "outputs": [],
   "source": [
    "from seamless_communication.streaming.agents.seamless_streaming_s2st import (\n",
    "    SeamlessStreamingS2STJointVADAgent,\n",
    ")\n",
    "\n",
    "\n",
    "print(\"building system from dir\")\n",
    "\n",
    "agent_class = SeamlessStreamingS2STJointVADAgent\n",
    "tgt_lang = \"spa\"\n",
    "\n",
    "model_configs = dict(\n",
    "    source_segment_size=320,\n",
    "    device=\"cuda:0\",\n",
    "    dtype=\"fp16\",\n",
    "    min_starting_wait_w2vbert=192,\n",
    "    decision_threshold=0.5,\n",
    "    min_unit_chunk_size=50,\n",
    "    no_early_stop=True,\n",
    "    max_len_a=0,\n",
    "    max_len_b=100,\n",
    "    task=\"s2st\",\n",
    "    tgt_lang=tgt_lang,\n",
    "    block_ngrams=True,\n",
    "    detokenize_only=True,\n",
    ")\n",
    "system = build_streaming_system(model_configs, agent_class)\n",
    "print(\"finished building system\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "rWAgPoUlGaQ0"
   },
   "source": [
    "## Initialize states + run inference"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "izpe5S-rom8A",
    "outputId": "be5433bb-258f-4950-a599-61a73577ab15"
   },
   "outputs": [],
   "source": [
    "source_segment_size = 320  # milliseconds\n",
    "audio_frontend = AudioFrontEnd(\n",
    "    wav_file=\"/content/LJ_eng.wav\",\n",
    "    segment_size=source_segment_size,\n",
    ")\n",
    "\n",
    "system_states = system.build_states()\n",
    "\n",
    "# you can pass tgt_lang at inference time to change the output lang.\n",
    "# SeamlessStreaming supports 36 speech output languages, see https://github.com/facebookresearch/seamless_communication/blob/main/docs/m4t/README.md#supported-languages\n",
    "# in the Target column for `Sp` outputs.\n",
    "delays, prediction_lists, speech_durations, target_sample_rate = run_streaming_inference(\n",
    "    system, audio_frontend, system_states, tgt_lang\n",
    ")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "WnHddD4KGgPr"
   },
   "source": [
    "## Visualize streaming outputs"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "Ac3YKDJwISWJ"
   },
   "source": [
    "The top row is the input audio, while the later rows are the output audio (in chunks), as well as output text, offset by the corresponding delays."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 329
    },
    "id": "x08NFlRzoxdT",
    "outputId": "565b921f-1797-44b8-c85a-476d9a1bcc6d"
   },
   "outputs": [],
   "source": [
    "target_samples, intervals = get_s2st_delayed_targets(delays, target_sample_rate, prediction_lists, speech_durations)\n",
    "\n",
    "plot_s2st(\"/content/LJ_eng.wav\", target_samples, target_sample_rate, intervals, delays, prediction_lists)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "Yy12VEzvJ1zo"
   },
   "source": [
    "## Seamless Unified Inference"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "smeOkMUSyLRk",
    "outputId": "061652e9-2ba0-481a-9ebe-d3b09fd00968"
   },
   "outputs": [],
   "source": [
    "# If you haven't already above, please follow instructions to download\n",
    "# SeamlessExpressive here: https://ai.meta.com/resources/models-and-libraries/seamless-downloads/\n",
    "\n",
    "!wget \"https://d11ywzt2xtszji.cloudfront.net/SeamlessExpressive.tar.gz?Policy=eyJTdGF0ZW1lbnQiOlt7InVuaXF1ZV9oYXNoIjoiZ2sxMzhuZnNkNDQ0dmM2dDhhazgxbWluIiwiUmVzb3VyY2UiOiJodHRwczpcL1wvZDExeXd6dDJ4dHN6amkuY2xvdWRmcm9udC5uZXRcLyoiLCJDb25kaXRpb24iOnsiRGF0ZUxlc3NUaGFuIjp7IkFXUzpFcG9jaFRpbWUiOjE3MDI1NzIxMjl9fX1dfQ__&Signature=npTULjeiKp9U8hUng4f9Njb6QKpK52Rl9pQjRpamsQSNzWgYeshMABRUNjWQJrw5givbbdGhaa6mW2l3UYHi66x3rBLazIS7d7npHu6aTElyNRZtFgjKMlNWSRfZOXh7NsQSZOFwWy0VxJwVZ%7EKtJnBWvgh7Mov3SKeJFeJEdAESDVO%7EWCHO1Z2zIWl%7EIkfpX5OnMqz7ntU9SpzsVpEHgefcyktm5NZ2xIr%7EoOml3YUXwNEUDj5PhLUkeoSHpFXHSzI0S0GHlxp48C162gUS8qK1HtaXalk7GUDem%7ErAGpx-Bo9oPBe33PdSsvpqngT9E32eS33oJoU1am4RGKFysg__&Key-Pair-Id=K15QRJLYKIFSLZ&Download-Request-ID=1024805765443779\" -O /content/SeamlessExpressive.tar.gz\n",
    "!tar -xzvf /content/SeamlessExpressive.tar.gz"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "Q_hyGuCgMy6O"
   },
   "outputs": [],
   "source": [
    "# You may need to delete earlier loaded model to free memory\n",
    "# del system, system_states\n",
    "# import gc\n",
    "\n",
    "# gc.collect()\n",
    "# torch.cuda.empty_cache()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "7LATtfmGJ5hW",
    "outputId": "c1cd5a69-5366-4f99-84f1-e5fa75970878"
   },
   "outputs": [],
   "source": [
    "# TODO: to run Seamless unified inference, need to download gated model\n",
    "# and specify gated_model_dir (here we use `SeamlessExpressive`)\n",
    "from seamless_communication.streaming.agents.seamless_s2st import (\n",
    "    SeamlessS2STJointVADAgent,\n",
    ")\n",
    "\n",
    "print(\"building system from dir\")\n",
    "\n",
    "agent_class = SeamlessS2STJointVADAgent\n",
    "tgt_lang = \"spa\"\n",
    "\n",
    "model_configs = dict(\n",
    "    source_segment_size=320,\n",
    "    device=\"cuda:0\",\n",
    "    dtype=\"fp16\",\n",
    "    min_starting_wait_w2vbert=192,\n",
    "    decision_threshold=0.5,\n",
    "    min_unit_chunk_size=50,\n",
    "    no_early_stop=True,\n",
    "    max_len_a=0,\n",
    "    max_len_b=100,\n",
    "    task=\"s2st\",\n",
    "    tgt_lang=tgt_lang,\n",
    "    block_ngrams=True,\n",
    "    upstream_idx=1,\n",
    "    detokenize_only=True,\n",
    "    gated_model_dir=\"SeamlessExpressive\",\n",
    ")\n",
    "system = build_streaming_system(model_configs, agent_class)\n",
    "print(\"finished building system\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "1Go-cO6OKS3q",
    "outputId": "80323e57-3849-44e9-b125-2edb57e563e9"
   },
   "outputs": [],
   "source": [
    "source_segment_size = 320  # milliseconds\n",
    "audio_frontend = AudioFrontEnd(\n",
    "    wav_file=\"/content/LJ_eng.wav\",\n",
    "    segment_size=source_segment_size,\n",
    ")\n",
    "\n",
    "system_states = system.build_states()\n",
    "# you can pass tgt_lang at inference time to change the output lang.\n",
    "# Seamless unified supports 6 output languages (eng, spa, fra, cmn, deu, ita)\n",
    "delays, prediction_lists, speech_durations, target_sample_rate = run_streaming_inference(\n",
    "    system, audio_frontend, system_states, tgt_lang\n",
    ")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 341
    },
    "id": "ptr3nXlQKYed",
    "outputId": "e3c91ea0-9f46-4aa4-86fb-25bc1125eae8"
   },
   "outputs": [],
   "source": [
    "target_samples, intervals = get_s2st_delayed_targets(delays, target_sample_rate, prediction_lists, speech_durations)\n",
    "\n",
    "plot_s2st(\"/content/LJ_eng.wav\", target_samples, target_sample_rate, intervals, delays, prediction_lists)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "Jr0kcQ_mGj8s"
   },
   "source": [
    "## Streaming HF space:\n",
    "Try out the streaming HuggingFace space at: https://huggingface.co/spaces/facebook/seamless-streaming"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "OzNNvD5aGr8i"
   },
   "source": [
    "# Unity.cpp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "FFGHgLbaKQ00"
   },
   "outputs": [],
   "source": [
    "# unity.cpp\n",
    "%mkdir -p ggml/build\n",
    "%cd ggml/build\n",
    "!cmake -DGGML_OPENBLAS=ON -DBUILD_SHARED_LIBS=On -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_FLAGS=\"-g2 -fno-omit-frame-pointer\" ..\n",
    "!make -j4 unity\n",
    "# Download seamless_M4T_medium model, converted to ggml format\n",
    "# Conversion script: https://github.com/facebookresearch/seamless_communication/blob/main/ggml/ggml_convert.py\n",
    "!wget https://dl.fbaipublicfiles.com/seamless/models/seamlessM4T_medium.ggml\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "ktkvmng1KTKE"
   },
   "outputs": [],
   "source": [
    "#Launching the console. But google colab doesn't support a console in C program\n",
    "./bin/unity --model seamlessM4T_medium.ggml -t 8"
   ]
  }
 ],
 "metadata": {
  "accelerator": "GPU",
  "colab": {
   "gpuType": "T4",
   "include_colab_link": true,
   "provenance": []
  },
  "kernelspec": {
   "display_name": "seamless",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}
