{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d2f0771f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# | default_exp _helpers"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "510c8e0d",
   "metadata": {},
   "source": [
    "# Helpers"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4af114fa",
   "metadata": {},
   "outputs": [],
   "source": [
    "# | export\n",
    "\n",
    "import asyncio\n",
    "import inspect\n",
    "import json\n",
    "import textwrap\n",
    "from datetime import datetime, timedelta\n",
    "from typing import *\n",
    "\n",
    "import aiohttp\n",
    "import anyio\n",
    "from fastkafka._aiokafka_imports import AIOKafkaProducer, AIOKafkaConsumer\n",
    "from aiokafka.helpers import create_ssl_context\n",
    "from aiokafka.structs import RecordMetadata\n",
    "from IPython.display import Markdown\n",
    "\n",
    "from fastkafka._components.helpers import in_notebook\n",
    "from fastkafka._components.logger import get_logger\n",
    "from fastkafka._components.meta import delegates"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b77c1814",
   "metadata": {},
   "outputs": [],
   "source": [
    "from fastkafka._components.logger import suppress_timestamps\n",
    "from fastkafka.testing import ApacheKafkaBroker"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ae342228",
   "metadata": {},
   "outputs": [],
   "source": [
    "# | export\n",
    "\n",
    "if in_notebook():\n",
    "    from tqdm.notebook import tqdm\n",
    "else:\n",
    "    from tqdm import tqdm"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d9f26762",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pytest\n",
    "from pydantic import BaseModel"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cf2b594b",
   "metadata": {},
   "outputs": [],
   "source": [
    "# | export\n",
    "\n",
    "logger = get_logger(__name__)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "74640d74",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[INFO] __main__: ok\n"
     ]
    }
   ],
   "source": [
    "suppress_timestamps()\n",
    "logger = get_logger(__name__, level=20)\n",
    "logger.info(\"ok\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "685d0ba3",
   "metadata": {},
   "source": [
    "## Configuration conversions between Confluent and AIOKafka formats"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2b85a92f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# | export\n",
    "\n",
    "\n",
    "@delegates(AIOKafkaProducer)\n",
    "def aiokafka2confluent(**kwargs: Dict[str, Any]) -> Dict[str, Any]:\n",
    "    \"\"\"Converts AIOKafka styled config dictionary into Confluence styled one\n",
    "\n",
    "    Returns (Dict[str, Any]):\n",
    "        Confluence styled config dictionary\n",
    "\n",
    "    Args:\n",
    "        bootstrap_servers (str, list(str)): a ``host[:port]`` string or list of\n",
    "            ``host[:port]`` strings that the producer should contact to\n",
    "            bootstrap initial cluster metadata. This does not have to be the\n",
    "            full node list.  It just needs to have at least one broker that will\n",
    "            respond to a Metadata API Request. Default port is 9092. If no\n",
    "            servers are specified, will default to ``localhost:9092``.\n",
    "        client_id (str): a name for this client. This string is passed in\n",
    "            each request to servers and can be used to identify specific\n",
    "            server-side log entries that correspond to this client.\n",
    "            Default: ``aiokafka-producer-#`` (appended with a unique number\n",
    "            per instance)\n",
    "        key_serializer (Callable): used to convert user-supplied keys to bytes\n",
    "            If not :data:`None`, called as ``f(key),`` should return\n",
    "            :class:`bytes`.\n",
    "            Default: :data:`None`.\n",
    "        value_serializer (Callable): used to convert user-supplied message\n",
    "            values to :class:`bytes`. If not :data:`None`, called as\n",
    "            ``f(value)``, should return :class:`bytes`.\n",
    "            Default: :data:`None`.\n",
    "        acks (Any): one of ``0``, ``1``, ``all``. The number of acknowledgments\n",
    "            the producer requires the leader to have received before considering a\n",
    "            request complete. This controls the durability of records that are\n",
    "            sent. The following settings are common:\n",
    "\n",
    "            * ``0``: Producer will not wait for any acknowledgment from the server\n",
    "              at all. The message will immediately be added to the socket\n",
    "              buffer and considered sent. No guarantee can be made that the\n",
    "              server has received the record in this case, and the retries\n",
    "              configuration will not take effect (as the client won't\n",
    "              generally know of any failures). The offset given back for each\n",
    "              record will always be set to -1.\n",
    "            * ``1``: The broker leader will write the record to its local log but\n",
    "              will respond without awaiting full acknowledgement from all\n",
    "              followers. In this case should the leader fail immediately\n",
    "              after acknowledging the record but before the followers have\n",
    "              replicated it then the record will be lost.\n",
    "            * ``all``: The broker leader will wait for the full set of in-sync\n",
    "              replicas to acknowledge the record. This guarantees that the\n",
    "              record will not be lost as long as at least one in-sync replica\n",
    "              remains alive. This is the strongest available guarantee.\n",
    "\n",
    "            If unset, defaults to ``acks=1``. If `enable_idempotence` is\n",
    "            :data:`True` defaults to ``acks=all``\n",
    "        compression_type (str): The compression type for all data generated by\n",
    "            the producer. Valid values are ``gzip``, ``snappy``, ``lz4``, ``zstd``\n",
    "            or :data:`None`.\n",
    "            Compression is of full batches of data, so the efficacy of batching\n",
    "            will also impact the compression ratio (more batching means better\n",
    "            compression). Default: :data:`None`.\n",
    "        max_batch_size (int): Maximum size of buffered data per partition.\n",
    "            After this amount :meth:`send` coroutine will block until batch is\n",
    "            drained.\n",
    "            Default: 16384\n",
    "        linger_ms (int): The producer groups together any records that arrive\n",
    "            in between request transmissions into a single batched request.\n",
    "            Normally this occurs only under load when records arrive faster\n",
    "            than they can be sent out. However in some circumstances the client\n",
    "            may want to reduce the number of requests even under moderate load.\n",
    "            This setting accomplishes this by adding a small amount of\n",
    "            artificial delay; that is, if first request is processed faster,\n",
    "            than `linger_ms`, producer will wait ``linger_ms - process_time``.\n",
    "            Default: 0 (i.e. no delay).\n",
    "        partitioner (Callable): Callable used to determine which partition\n",
    "            each message is assigned to. Called (after key serialization):\n",
    "            ``partitioner(key_bytes, all_partitions, available_partitions)``.\n",
    "            The default partitioner implementation hashes each non-None key\n",
    "            using the same murmur2 algorithm as the Java client so that\n",
    "            messages with the same key are assigned to the same partition.\n",
    "            When a key is :data:`None`, the message is delivered to a random partition\n",
    "            (filtered to partitions with available leaders only, if possible).\n",
    "        max_request_size (int): The maximum size of a request. This is also\n",
    "            effectively a cap on the maximum record size. Note that the server\n",
    "            has its own cap on record size which may be different from this.\n",
    "            This setting will limit the number of record batches the producer\n",
    "            will send in a single request to avoid sending huge requests.\n",
    "            Default: 1048576.\n",
    "        metadata_max_age_ms (int): The period of time in milliseconds after\n",
    "            which we force a refresh of metadata even if we haven't seen any\n",
    "            partition leadership changes to proactively discover any new\n",
    "            brokers or partitions. Default: 300000\n",
    "        request_timeout_ms (int): Produce request timeout in milliseconds.\n",
    "            As it's sent as part of\n",
    "            :class:`~kafka.protocol.produce.ProduceRequest` (it's a blocking\n",
    "            call), maximum waiting time can be up to ``2 *\n",
    "            request_timeout_ms``.\n",
    "            Default: 40000.\n",
    "        retry_backoff_ms (int): Milliseconds to backoff when retrying on\n",
    "            errors. Default: 100.\n",
    "        api_version (str): specify which kafka API version to use.\n",
    "            If set to ``auto``, will attempt to infer the broker version by\n",
    "            probing various APIs. Default: ``auto``\n",
    "        security_protocol (str): Protocol used to communicate with brokers.\n",
    "            Valid values are: ``PLAINTEXT``, ``SSL``. Default: ``PLAINTEXT``.\n",
    "            Default: ``PLAINTEXT``.\n",
    "        ssl_context (ssl.SSLContext): pre-configured :class:`~ssl.SSLContext`\n",
    "            for wrapping socket connections. Directly passed into asyncio's\n",
    "            :meth:`~asyncio.loop.create_connection`. For more\n",
    "            information see :ref:`ssl_auth`.\n",
    "            Default: :data:`None`\n",
    "        connections_max_idle_ms (int): Close idle connections after the number\n",
    "            of milliseconds specified by this config. Specifying :data:`None` will\n",
    "            disable idle checks. Default: 540000 (9 minutes).\n",
    "        enable_idempotence (bool): When set to :data:`True`, the producer will\n",
    "            ensure that exactly one copy of each message is written in the\n",
    "            stream. If :data:`False`, producer retries due to broker failures,\n",
    "            etc., may write duplicates of the retried message in the stream.\n",
    "            Note that enabling idempotence acks to set to ``all``. If it is not\n",
    "            explicitly set by the user it will be chosen. If incompatible\n",
    "            values are set, a :exc:`ValueError` will be thrown.\n",
    "            New in version 0.5.0.\n",
    "        sasl_mechanism (str): Authentication mechanism when security_protocol\n",
    "            is configured for ``SASL_PLAINTEXT`` or ``SASL_SSL``. Valid values\n",
    "            are: ``PLAIN``, ``GSSAPI``, ``SCRAM-SHA-256``, ``SCRAM-SHA-512``,\n",
    "            ``OAUTHBEARER``.\n",
    "            Default: ``PLAIN``\n",
    "        sasl_plain_username (str): username for SASL ``PLAIN`` authentication.\n",
    "            Default: :data:`None`\n",
    "        sasl_plain_password (str): password for SASL ``PLAIN`` authentication.\n",
    "            Default: :data:`None`\n",
    "        sasl_oauth_token_provider (: class:`~aiokafka.abc.AbstractTokenProvider`):\n",
    "            OAuthBearer token provider instance. (See\n",
    "            :mod:`kafka.oauth.abstract`).\n",
    "            Default: :data:`None`\n",
    "    \"\"\"\n",
    "    confluent_config = {k.replace(\"_\", \".\"): v for k, v in kwargs.items()}\n",
    "    for k1, k2 in zip(\n",
    "        [\"sasl.plain.username\", \"sasl.plain.password\"],\n",
    "        [\"sasl.username\", \"sasl.password\"],\n",
    "    ):\n",
    "        if k1 in confluent_config:\n",
    "            confluent_config[k2] = confluent_config.pop(k1)\n",
    "\n",
    "    if \"ssl.context\" in confluent_config:\n",
    "        confluent_config.pop(\"ssl.context\")\n",
    "\n",
    "    return confluent_config"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fe42a4bb",
   "metadata": {},
   "outputs": [],
   "source": [
    "ssl_context = create_ssl_context()\n",
    "\n",
    "aiokafka_config = {\n",
    "    \"bootstrap_servers\": f\"kafka.staging.airt:9092\",\n",
    "    \"group_id\": \"kafka_consume_group\",\n",
    "    \"auto_offset_reset\": \"earliest\",\n",
    "    \"security_protocol\": \"SASL_SSL\",\n",
    "    \"sasl_mechanism\": \"PLAIN\",\n",
    "    \"sasl_plain_username\": \"myname\",\n",
    "    \"sasl_plain_password\": \"*************\",\n",
    "    \"ssl_context\": create_ssl_context(),\n",
    "}\n",
    "\n",
    "expected = {\n",
    "    \"bootstrap.servers\": \"kafka.staging.airt:9092\",\n",
    "    \"group.id\": \"kafka_consume_group\",\n",
    "    \"auto.offset.reset\": \"earliest\",\n",
    "    \"security.protocol\": \"SASL_SSL\",\n",
    "    \"sasl.mechanism\": \"PLAIN\",\n",
    "    \"sasl.username\": \"myname\",\n",
    "    \"sasl.password\": \"*************\",\n",
    "}\n",
    "\n",
    "confluent_config = aiokafka2confluent(**aiokafka_config)\n",
    "assert confluent_config == expected"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b016537f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# | export\n",
    "\n",
    "\n",
    "def confluent2aiokafka(confluent_config: Dict[str, Any]) -> Dict[str, Any]:\n",
    "    \"\"\"Converts AIOKafka styled config dictionary into Confluence styled one\n",
    "\n",
    "    Args:\n",
    "        confluent_config: Confluence styled config dictionary\n",
    "\n",
    "    Returns:\n",
    "        AIOKafka styled config dictionary\n",
    "    \"\"\"\n",
    "\n",
    "    aiokafka_config = {k.replace(\".\", \"_\"): v for k, v in confluent_config.items()}\n",
    "    for k1, k2 in zip(\n",
    "        [\"sasl_username\", \"sasl_password\"],\n",
    "        [\"sasl_plain_username\", \"sasl_plain_password\"],\n",
    "    ):\n",
    "        if k1 in aiokafka_config:\n",
    "            aiokafka_config[k2] = aiokafka_config.pop(k1)\n",
    "\n",
    "    if \"sasl_plain_username\" in aiokafka_config:\n",
    "        aiokafka_config[\"ssl.context\"] = (create_ssl_context(),)\n",
    "\n",
    "    return aiokafka_config"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0787bb4f",
   "metadata": {},
   "outputs": [],
   "source": [
    "confluent_config = {\n",
    "    \"bootstrap.servers\": \"kafka.staging.airt:9092\",\n",
    "    \"group.id\": \"kafka_consume_group\",\n",
    "    \"auto.offset.reset\": \"earliest\",\n",
    "    \"security.protocol\": \"SASL_SSL\",\n",
    "    \"sasl.mechanism\": \"PLAIN\",\n",
    "    \"sasl.username\": \"myname\",\n",
    "    \"sasl.password\": \"*************\",\n",
    "}\n",
    "\n",
    "expected = {\n",
    "    \"bootstrap_servers\": \"kafka.staging.airt:9092\",\n",
    "    \"group_id\": \"kafka_consume_group\",\n",
    "    \"auto_offset_reset\": \"earliest\",\n",
    "    \"security_protocol\": \"SASL_SSL\",\n",
    "    \"sasl_mechanism\": \"PLAIN\",\n",
    "    \"sasl_plain_username\": \"myname\",\n",
    "    \"sasl_plain_password\": \"*************\",\n",
    "}\n",
    "\n",
    "aiokafka_config = confluent2aiokafka(confluent_config)\n",
    "\n",
    "aiokafka_config.pop(\"ssl.context\")\n",
    "\n",
    "assert aiokafka_config == expected"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f29bfa31",
   "metadata": {},
   "source": [
    "## Producing and consuming messages"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cd699bdd",
   "metadata": {},
   "outputs": [],
   "source": [
    "# | export\n",
    "\n",
    "\n",
    "@delegates(AIOKafkaProducer)\n",
    "async def produce_messages(  # type: ignore\n",
    "    *,\n",
    "    topic: str,\n",
    "    msgs: List[Any],\n",
    "    **kwargs: Dict[str, Any],\n",
    ") -> List[RecordMetadata]:\n",
    "    \"\"\"Produces messages to Kafka topic\n",
    "\n",
    "    Args:\n",
    "        topic: Topic name\n",
    "        msgs: a list of messages to produce\n",
    "        bootstrap_servers (str, list(str)): a ``host[:port]`` string or list of\n",
    "            ``host[:port]`` strings that the producer should contact to\n",
    "            bootstrap initial cluster metadata. This does not have to be the\n",
    "            full node list.  It just needs to have at least one broker that will\n",
    "            respond to a Metadata API Request. Default port is 9092. If no\n",
    "            servers are specified, will default to ``localhost:9092``.\n",
    "        client_id (str): a name for this client. This string is passed in\n",
    "            each request to servers and can be used to identify specific\n",
    "            server-side log entries that correspond to this client.\n",
    "            Default: ``aiokafka-producer-#`` (appended with a unique number\n",
    "            per instance)\n",
    "        key_serializer (Callable): used to convert user-supplied keys to bytes\n",
    "            If not :data:`None`, called as ``f(key),`` should return\n",
    "            :class:`bytes`.\n",
    "            Default: :data:`None`.\n",
    "        value_serializer (Callable): used to convert user-supplied message\n",
    "            values to :class:`bytes`. If not :data:`None`, called as\n",
    "            ``f(value)``, should return :class:`bytes`.\n",
    "            Default: :data:`None`.\n",
    "        acks (Any): one of ``0``, ``1``, ``all``. The number of acknowledgments\n",
    "            the producer requires the leader to have received before considering a\n",
    "            request complete. This controls the durability of records that are\n",
    "            sent. The following settings are common:\n",
    "\n",
    "            * ``0``: Producer will not wait for any acknowledgment from the server\n",
    "              at all. The message will immediately be added to the socket\n",
    "              buffer and considered sent. No guarantee can be made that the\n",
    "              server has received the record in this case, and the retries\n",
    "              configuration will not take effect (as the client won't\n",
    "              generally know of any failures). The offset given back for each\n",
    "              record will always be set to -1.\n",
    "            * ``1``: The broker leader will write the record to its local log but\n",
    "              will respond without awaiting full acknowledgement from all\n",
    "              followers. In this case should the leader fail immediately\n",
    "              after acknowledging the record but before the followers have\n",
    "              replicated it then the record will be lost.\n",
    "            * ``all``: The broker leader will wait for the full set of in-sync\n",
    "              replicas to acknowledge the record. This guarantees that the\n",
    "              record will not be lost as long as at least one in-sync replica\n",
    "              remains alive. This is the strongest available guarantee.\n",
    "\n",
    "            If unset, defaults to ``acks=1``. If `enable_idempotence` is\n",
    "            :data:`True` defaults to ``acks=all``\n",
    "        compression_type (str): The compression type for all data generated by\n",
    "            the producer. Valid values are ``gzip``, ``snappy``, ``lz4``, ``zstd``\n",
    "            or :data:`None`.\n",
    "            Compression is of full batches of data, so the efficacy of batching\n",
    "            will also impact the compression ratio (more batching means better\n",
    "            compression). Default: :data:`None`.\n",
    "        max_batch_size (int): Maximum size of buffered data per partition.\n",
    "            After this amount :meth:`send` coroutine will block until batch is\n",
    "            drained.\n",
    "            Default: 16384\n",
    "        linger_ms (int): The producer groups together any records that arrive\n",
    "            in between request transmissions into a single batched request.\n",
    "            Normally this occurs only under load when records arrive faster\n",
    "            than they can be sent out. However in some circumstances the client\n",
    "            may want to reduce the number of requests even under moderate load.\n",
    "            This setting accomplishes this by adding a small amount of\n",
    "            artificial delay; that is, if first request is processed faster,\n",
    "            than `linger_ms`, producer will wait ``linger_ms - process_time``.\n",
    "            Default: 0 (i.e. no delay).\n",
    "        partitioner (Callable): Callable used to determine which partition\n",
    "            each message is assigned to. Called (after key serialization):\n",
    "            ``partitioner(key_bytes, all_partitions, available_partitions)``.\n",
    "            The default partitioner implementation hashes each non-None key\n",
    "            using the same murmur2 algorithm as the Java client so that\n",
    "            messages with the same key are assigned to the same partition.\n",
    "            When a key is :data:`None`, the message is delivered to a random partition\n",
    "            (filtered to partitions with available leaders only, if possible).\n",
    "        max_request_size (int): The maximum size of a request. This is also\n",
    "            effectively a cap on the maximum record size. Note that the server\n",
    "            has its own cap on record size which may be different from this.\n",
    "            This setting will limit the number of record batches the producer\n",
    "            will send in a single request to avoid sending huge requests.\n",
    "            Default: 1048576.\n",
    "        metadata_max_age_ms (int): The period of time in milliseconds after\n",
    "            which we force a refresh of metadata even if we haven't seen any\n",
    "            partition leadership changes to proactively discover any new\n",
    "            brokers or partitions. Default: 300000\n",
    "        request_timeout_ms (int): Produce request timeout in milliseconds.\n",
    "            As it's sent as part of\n",
    "            :class:`~kafka.protocol.produce.ProduceRequest` (it's a blocking\n",
    "            call), maximum waiting time can be up to ``2 *\n",
    "            request_timeout_ms``.\n",
    "            Default: 40000.\n",
    "        retry_backoff_ms (int): Milliseconds to backoff when retrying on\n",
    "            errors. Default: 100.\n",
    "        api_version (str): specify which kafka API version to use.\n",
    "            If set to ``auto``, will attempt to infer the broker version by\n",
    "            probing various APIs. Default: ``auto``\n",
    "        security_protocol (str): Protocol used to communicate with brokers.\n",
    "            Valid values are: ``PLAINTEXT``, ``SSL``. Default: ``PLAINTEXT``.\n",
    "            Default: ``PLAINTEXT``.\n",
    "        ssl_context (ssl.SSLContext): pre-configured :class:`~ssl.SSLContext`\n",
    "            for wrapping socket connections. Directly passed into asyncio's\n",
    "            :meth:`~asyncio.loop.create_connection`. For more\n",
    "            information see :ref:`ssl_auth`.\n",
    "            Default: :data:`None`\n",
    "        connections_max_idle_ms (int): Close idle connections after the number\n",
    "            of milliseconds specified by this config. Specifying :data:`None` will\n",
    "            disable idle checks. Default: 540000 (9 minutes).\n",
    "        enable_idempotence (bool): When set to :data:`True`, the producer will\n",
    "            ensure that exactly one copy of each message is written in the\n",
    "            stream. If :data:`False`, producer retries due to broker failures,\n",
    "            etc., may write duplicates of the retried message in the stream.\n",
    "            Note that enabling idempotence acks to set to ``all``. If it is not\n",
    "            explicitly set by the user it will be chosen. If incompatible\n",
    "            values are set, a :exc:`ValueError` will be thrown.\n",
    "            New in version 0.5.0.\n",
    "        sasl_mechanism (str): Authentication mechanism when security_protocol\n",
    "            is configured for ``SASL_PLAINTEXT`` or ``SASL_SSL``. Valid values\n",
    "            are: ``PLAIN``, ``GSSAPI``, ``SCRAM-SHA-256``, ``SCRAM-SHA-512``,\n",
    "            ``OAUTHBEARER``.\n",
    "            Default: ``PLAIN``\n",
    "        sasl_plain_username (str): username for SASL ``PLAIN`` authentication.\n",
    "            Default: :data:`None`\n",
    "        sasl_plain_password (str): password for SASL ``PLAIN`` authentication.\n",
    "            Default: :data:`None`\n",
    "        sasl_oauth_token_provider (: class:`~aiokafka.abc.AbstractTokenProvider`):\n",
    "            OAuthBearer token provider instance. (See\n",
    "            :mod:`kafka.oauth.abstract`).\n",
    "            Default: :data:`None`\n",
    "    \"\"\"\n",
    "    p = AIOKafkaProducer(**kwargs)\n",
    "    await p.start()\n",
    "\n",
    "    try:\n",
    "\n",
    "        def prepare_msg(msg: Any) -> bytes:\n",
    "            if isinstance(msg, bytes):\n",
    "                return msg\n",
    "            elif isinstance(msg, str):\n",
    "                return msg.encode(\"utf-8\")\n",
    "            elif hasattr(msg, \"json\"):\n",
    "                return msg.json().encode(\"utf-8\")  # type: ignore\n",
    "            return json.dumps(msg).encode(\"utf-8\")\n",
    "\n",
    "        fx = [\n",
    "            await p.send(topic, prepare_msg(msg))\n",
    "            for msg in tqdm(msgs, desc=f\"producing to '{topic}'\")\n",
    "        ]\n",
    "        delivery = [await f for f in fx]\n",
    "        return delivery\n",
    "    finally:\n",
    "        await p.stop()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "122e4eb1",
   "metadata": {},
   "outputs": [],
   "source": [
    "# print(combine_params(produce_messages, AIOKafkaProducer).__doc__)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "338d0a92",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[INFO] fastkafka._components.test_dependencies: Java is already installed.\n",
      "[INFO] fastkafka._components.test_dependencies: But not exported to PATH, exporting...\n",
      "[INFO] fastkafka._components.test_dependencies: Kafka is installed.\n",
      "[INFO] fastkafka._components.test_dependencies: But not exported to PATH, exporting...\n",
      "[INFO] fastkafka._testing.apache_kafka_broker: Starting zookeeper...\n",
      "[INFO] fastkafka._testing.apache_kafka_broker: Starting kafka...\n",
      "[INFO] fastkafka._testing.apache_kafka_broker: Local Kafka broker up and running on 127.0.0.1:9092\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "25c5bbb2b11f44f3a3ea78dac14454a4",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "producing to 'test_produce_messages':   0%|          | 0/120000 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[INFO] fastkafka._components._subprocess: terminate_asyncio_process(): Terminating the process 2864...\n",
      "[INFO] fastkafka._components._subprocess: terminate_asyncio_process(): Process 2864 terminated.\n",
      "[INFO] fastkafka._components._subprocess: terminate_asyncio_process(): Terminating the process 2490...\n",
      "[INFO] fastkafka._components._subprocess: terminate_asyncio_process(): Process 2490 terminated.\n"
     ]
    }
   ],
   "source": [
    "class Hello(BaseModel):\n",
    "    msg: str\n",
    "\n",
    "\n",
    "msgs_count = 120_000\n",
    "msgs = (\n",
    "    [b\"Hello world bytes\" for _ in range(msgs_count // 3)]\n",
    "    + [f\"Hello world as string for the {i+1}. time!\" for i in range(msgs_count // 3)]\n",
    "    + [\n",
    "        Hello(msg=\"Hello workd as Pydantic object for the {i+1}. time!\")\n",
    "        for i in range(msgs_count // 3)\n",
    "    ]\n",
    ")\n",
    "async with ApacheKafkaBroker(topics=[\"test_produce_messages\"], listener_port=9992) as bootstrap_server:\n",
    "    delivery_report = await produce_messages(\n",
    "        msgs=msgs, topic=\"test_produce_messages\", bootstrap_servers=bootstrap_server\n",
    "    )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7ae30572",
   "metadata": {},
   "outputs": [],
   "source": [
    "# | export\n",
    "\n",
    "\n",
    "@delegates(AIOKafkaConsumer)\n",
    "async def consumes_messages(\n",
    "    *,\n",
    "    topic: str,\n",
    "    msgs_count: int,\n",
    "    **kwargs: Dict[str, Any],\n",
    ") -> None:\n",
    "    \"\"\"Consumes messages\n",
    "    Args:\n",
    "        topic: Topic name\n",
    "        msgs_count: number of messages to consume before returning\n",
    "        *topics (list(str)): optional list of topics to subscribe to. If not set,\n",
    "            call :meth:`.subscribe` or :meth:`.assign` before consuming records.\n",
    "            Passing topics directly is same as calling :meth:`.subscribe` API.\n",
    "        bootstrap_servers (str, list(str)): a ``host[:port]`` string (or list of\n",
    "            ``host[:port]`` strings) that the consumer should contact to bootstrap\n",
    "            initial cluster metadata.\n",
    "\n",
    "            This does not have to be the full node list.\n",
    "            It just needs to have at least one broker that will respond to a\n",
    "            Metadata API Request. Default port is 9092. If no servers are\n",
    "            specified, will default to ``localhost:9092``.\n",
    "        client_id (str): a name for this client. This string is passed in\n",
    "            each request to servers and can be used to identify specific\n",
    "            server-side log entries that correspond to this client. Also\n",
    "            submitted to :class:`~.consumer.group_coordinator.GroupCoordinator`\n",
    "            for logging with respect to consumer group administration. Default:\n",
    "            ``aiokafka-{version}``\n",
    "        group_id (str or None): name of the consumer group to join for dynamic\n",
    "            partition assignment (if enabled), and to use for fetching and\n",
    "            committing offsets. If None, auto-partition assignment (via\n",
    "            group coordinator) and offset commits are disabled.\n",
    "            Default: None\n",
    "        key_deserializer (Callable): Any callable that takes a\n",
    "            raw message key and returns a deserialized key.\n",
    "        value_deserializer (Callable, Optional): Any callable that takes a\n",
    "            raw message value and returns a deserialized value.\n",
    "        fetch_min_bytes (int): Minimum amount of data the server should\n",
    "            return for a fetch request, otherwise wait up to\n",
    "            `fetch_max_wait_ms` for more data to accumulate. Default: 1.\n",
    "        fetch_max_bytes (int): The maximum amount of data the server should\n",
    "            return for a fetch request. This is not an absolute maximum, if\n",
    "            the first message in the first non-empty partition of the fetch\n",
    "            is larger than this value, the message will still be returned\n",
    "            to ensure that the consumer can make progress. NOTE: consumer\n",
    "            performs fetches to multiple brokers in parallel so memory\n",
    "            usage will depend on the number of brokers containing\n",
    "            partitions for the topic.\n",
    "            Supported Kafka version >= 0.10.1.0. Default: 52428800 (50 Mb).\n",
    "        fetch_max_wait_ms (int): The maximum amount of time in milliseconds\n",
    "            the server will block before answering the fetch request if\n",
    "            there isn't sufficient data to immediately satisfy the\n",
    "            requirement given by fetch_min_bytes. Default: 500.\n",
    "        max_partition_fetch_bytes (int): The maximum amount of data\n",
    "            per-partition the server will return. The maximum total memory\n",
    "            used for a request ``= #partitions * max_partition_fetch_bytes``.\n",
    "            This size must be at least as large as the maximum message size\n",
    "            the server allows or else it is possible for the producer to\n",
    "            send messages larger than the consumer can fetch. If that\n",
    "            happens, the consumer can get stuck trying to fetch a large\n",
    "            message on a certain partition. Default: 1048576.\n",
    "        max_poll_records (int): The maximum number of records returned in a\n",
    "            single call to :meth:`.getmany`. Defaults ``None``, no limit.\n",
    "        request_timeout_ms (int): Client request timeout in milliseconds.\n",
    "            Default: 40000.\n",
    "        retry_backoff_ms (int): Milliseconds to backoff when retrying on\n",
    "            errors. Default: 100.\n",
    "        auto_offset_reset (str): A policy for resetting offsets on\n",
    "            :exc:`.OffsetOutOfRangeError` errors: ``earliest`` will move to the oldest\n",
    "            available message, ``latest`` will move to the most recent, and\n",
    "            ``none`` will raise an exception so you can handle this case.\n",
    "            Default: ``latest``.\n",
    "        enable_auto_commit (bool): If true the consumer's offset will be\n",
    "            periodically committed in the background. Default: True.\n",
    "        auto_commit_interval_ms (int): milliseconds between automatic\n",
    "            offset commits, if enable_auto_commit is True. Default: 5000.\n",
    "        check_crcs (bool): Automatically check the CRC32 of the records\n",
    "            consumed. This ensures no on-the-wire or on-disk corruption to\n",
    "            the messages occurred. This check adds some overhead, so it may\n",
    "            be disabled in cases seeking extreme performance. Default: True\n",
    "        metadata_max_age_ms (int): The period of time in milliseconds after\n",
    "            which we force a refresh of metadata even if we haven't seen any\n",
    "            partition leadership changes to proactively discover any new\n",
    "            brokers or partitions. Default: 300000\n",
    "        partition_assignment_strategy (list): List of objects to use to\n",
    "            distribute partition ownership amongst consumer instances when\n",
    "            group management is used. This preference is implicit in the order\n",
    "            of the strategies in the list. When assignment strategy changes:\n",
    "            to support a change to the assignment strategy, new versions must\n",
    "            enable support both for the old assignment strategy and the new\n",
    "            one. The coordinator will choose the old assignment strategy until\n",
    "            all members have been updated. Then it will choose the new\n",
    "            strategy. Default: [:class:`.RoundRobinPartitionAssignor`]\n",
    "        max_poll_interval_ms (int): Maximum allowed time between calls to\n",
    "            consume messages (e.g., :meth:`.getmany`). If this interval\n",
    "            is exceeded the consumer is considered failed and the group will\n",
    "            rebalance in order to reassign the partitions to another consumer\n",
    "            group member. If API methods block waiting for messages, that time\n",
    "            does not count against this timeout. See `KIP-62`_ for more\n",
    "            information. Default 300000\n",
    "        rebalance_timeout_ms (int): The maximum time server will wait for this\n",
    "            consumer to rejoin the group in a case of rebalance. In Java client\n",
    "            this behaviour is bound to `max.poll.interval.ms` configuration,\n",
    "            but as ``aiokafka`` will rejoin the group in the background, we\n",
    "            decouple this setting to allow finer tuning by users that use\n",
    "            :class:`.ConsumerRebalanceListener` to delay rebalacing. Defaults\n",
    "            to ``session_timeout_ms``\n",
    "        session_timeout_ms (int): Client group session and failure detection\n",
    "            timeout. The consumer sends periodic heartbeats\n",
    "            (`heartbeat.interval.ms`) to indicate its liveness to the broker.\n",
    "            If no hearts are received by the broker for a group member within\n",
    "            the session timeout, the broker will remove the consumer from the\n",
    "            group and trigger a rebalance. The allowed range is configured with\n",
    "            the **broker** configuration properties\n",
    "            `group.min.session.timeout.ms` and `group.max.session.timeout.ms`.\n",
    "            Default: 10000\n",
    "        heartbeat_interval_ms (int): The expected time in milliseconds\n",
    "            between heartbeats to the consumer coordinator when using\n",
    "            Kafka's group management feature. Heartbeats are used to ensure\n",
    "            that the consumer's session stays active and to facilitate\n",
    "            rebalancing when new consumers join or leave the group. The\n",
    "            value must be set lower than `session_timeout_ms`, but typically\n",
    "            should be set no higher than 1/3 of that value. It can be\n",
    "            adjusted even lower to control the expected time for normal\n",
    "            rebalances. Default: 3000\n",
    "        consumer_timeout_ms (int): maximum wait timeout for background fetching\n",
    "            routine. Mostly defines how fast the system will see rebalance and\n",
    "            request new data for new partitions. Default: 200\n",
    "        api_version (str): specify which kafka API version to use.\n",
    "            :class:`AIOKafkaConsumer` supports Kafka API versions >=0.9 only.\n",
    "            If set to ``auto``, will attempt to infer the broker version by\n",
    "            probing various APIs. Default: ``auto``\n",
    "        security_protocol (str): Protocol used to communicate with brokers.\n",
    "            Valid values are: ``PLAINTEXT``, ``SSL``. Default: ``PLAINTEXT``.\n",
    "        ssl_context (ssl.SSLContext): pre-configured :class:`~ssl.SSLContext`\n",
    "            for wrapping socket connections. Directly passed into asyncio's\n",
    "            :meth:`~asyncio.loop.create_connection`. For more information see\n",
    "            :ref:`ssl_auth`. Default: None.\n",
    "        exclude_internal_topics (bool): Whether records from internal topics\n",
    "            (such as offsets) should be exposed to the consumer. If set to True\n",
    "            the only way to receive records from an internal topic is\n",
    "            subscribing to it. Requires 0.10+ Default: True\n",
    "        connections_max_idle_ms (int): Close idle connections after the number\n",
    "            of milliseconds specified by this config. Specifying `None` will\n",
    "            disable idle checks. Default: 540000 (9 minutes).\n",
    "        isolation_level (str): Controls how to read messages written\n",
    "            transactionally.\n",
    "\n",
    "            If set to ``read_committed``, :meth:`.getmany` will only return\n",
    "            transactional messages which have been committed.\n",
    "            If set to ``read_uncommitted`` (the default), :meth:`.getmany` will\n",
    "            return all messages, even transactional messages which have been\n",
    "            aborted.\n",
    "\n",
    "            Non-transactional messages will be returned unconditionally in\n",
    "            either mode.\n",
    "\n",
    "            Messages will always be returned in offset order. Hence, in\n",
    "            `read_committed` mode, :meth:`.getmany` will only return\n",
    "            messages up to the last stable offset (LSO), which is the one less\n",
    "            than the offset of the first open transaction. In particular any\n",
    "            messages appearing after messages belonging to ongoing transactions\n",
    "            will be withheld until the relevant transaction has been completed.\n",
    "            As a result, `read_committed` consumers will not be able to read up\n",
    "            to the high watermark when there are in flight transactions.\n",
    "            Further, when in `read_committed` the seek_to_end method will\n",
    "            return the LSO. See method docs below. Default: ``read_uncommitted``\n",
    "        sasl_mechanism (str): Authentication mechanism when security_protocol\n",
    "            is configured for ``SASL_PLAINTEXT`` or ``SASL_SSL``. Valid values are:\n",
    "            ``PLAIN``, ``GSSAPI``, ``SCRAM-SHA-256``, ``SCRAM-SHA-512``,\n",
    "            ``OAUTHBEARER``.\n",
    "            Default: ``PLAIN``\n",
    "        sasl_plain_username (str): username for SASL ``PLAIN`` authentication.\n",
    "            Default: None\n",
    "        sasl_plain_password (str): password for SASL ``PLAIN`` authentication.\n",
    "            Default: None\n",
    "        sasl_oauth_token_provider (~aiokafka.abc.AbstractTokenProvider): OAuthBearer token provider instance. (See :mod:`kafka.oauth.abstract`).\n",
    "            Default: None\n",
    "    \"\"\"\n",
    "    consumer = AIOKafkaConsumer(topic, **kwargs)\n",
    "    await consumer.start()\n",
    "    try:\n",
    "        with tqdm(total=msgs_count, desc=f\"consuming from '{topic}'\") as pbar:\n",
    "            async for msg in consumer:\n",
    "                pbar.update(1)\n",
    "                if pbar.n >= pbar.total:\n",
    "                    break\n",
    "    finally:\n",
    "        await consumer.stop()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "29cddabb",
   "metadata": {},
   "outputs": [],
   "source": [
    "# print(combine_params(consumes_messages, AIOKafkaConsumer).__doc__)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d33f1b9a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[INFO] fastkafka._components.test_dependencies: Java is already installed.\n",
      "[INFO] fastkafka._components.test_dependencies: Kafka is installed.\n",
      "[INFO] fastkafka._testing.apache_kafka_broker: Starting zookeeper...\n",
      "[INFO] fastkafka._testing.apache_kafka_broker: Starting kafka...\n",
      "[INFO] fastkafka._testing.apache_kafka_broker: Local Kafka broker up and running on 127.0.0.1:9092\n",
      "[INFO] aiokafka.consumer.subscription_state: Updating subscribed topics to: frozenset({'test_consume_messages'})\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "27758f3f1c484200b92d0a71907d9563",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "producing to 'test_consume_messages':   0%|          | 0/120000 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[INFO] aiokafka.consumer.group_coordinator: Metadata for topic has changed from {} to {'test_consume_messages': 1}. \n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "85c976608f344a4bbfbb862447afa55f",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "consuming from 'test_consume_messages':   0%|          | 0/108000 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[INFO] fastkafka._components._subprocess: terminate_asyncio_process(): Terminating the process 4073...\n",
      "[INFO] fastkafka._components._subprocess: terminate_asyncio_process(): Process 4073 terminated.\n",
      "[INFO] fastkafka._components._subprocess: terminate_asyncio_process(): Terminating the process 3701...\n",
      "[INFO] fastkafka._components._subprocess: terminate_asyncio_process(): Process 3701 terminated.\n"
     ]
    }
   ],
   "source": [
    "async with ApacheKafkaBroker(topics=[\"test_consume_messages\"], listener_port=9992) as bootstrap_server:\n",
    "    async with anyio.create_task_group() as tg:\n",
    "        tg.start_soon(\n",
    "            lambda d: produce_messages(**d),\n",
    "            dict(\n",
    "                msgs=msgs,\n",
    "                topic=\"test_consume_messages\",\n",
    "                bootstrap_servers=bootstrap_server,\n",
    "            ),\n",
    "        )\n",
    "        tg.start_soon(\n",
    "            lambda d: consumes_messages(**d),\n",
    "            dict(\n",
    "                msgs_count=int(len(msgs) * 0.9),\n",
    "                topic=\"test_consume_messages\",\n",
    "                bootstrap_servers=bootstrap_server,\n",
    "            ),\n",
    "        )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "17d70e10",
   "metadata": {},
   "outputs": [],
   "source": [
    "# | export\n",
    "\n",
    "\n",
    "@delegates(AIOKafkaConsumer)\n",
    "@delegates(AIOKafkaProducer, keep=True)\n",
    "async def produce_and_consume_messages(\n",
    "    *,\n",
    "    produce_topic: str,\n",
    "    consume_topic: str,\n",
    "    msgs: List[Any],\n",
    "    msgs_count: int,\n",
    "    **kwargs: Dict[str, Any],\n",
    ") -> None:\n",
    "    \"\"\"produce_and_consume_messages\n",
    "\n",
    "    Args:\n",
    "        produce_topic: Topic name for producing messages\n",
    "        consume_topic: Topic name for consuming messages\n",
    "        msgs: a list of messages to produce\n",
    "        msgs_count: number of messages to consume before returning\n",
    "        bootstrap_servers (str, list(str)): a ``host[:port]`` string (or list of\n",
    "            ``host[:port]`` strings) that the consumer should contact to bootstrap\n",
    "            initial cluster metadata.\n",
    "\n",
    "            This does not have to be the full node list.\n",
    "            It just needs to have at least one broker that will respond to a\n",
    "            Metadata API Request. Default port is 9092. If no servers are\n",
    "            specified, will default to ``localhost:9092``.\n",
    "        client_id (str): a name for this client. This string is passed in\n",
    "            each request to servers and can be used to identify specific\n",
    "            server-side log entries that correspond to this client. Also\n",
    "            submitted to :class:`~.consumer.group_coordinator.GroupCoordinator`\n",
    "            for logging with respect to consumer group administration. Default:\n",
    "            ``aiokafka-{version}``\n",
    "        group_id (str or None): name of the consumer group to join for dynamic\n",
    "            partition assignment (if enabled), and to use for fetching and\n",
    "            committing offsets. If None, auto-partition assignment (via\n",
    "            group coordinator) and offset commits are disabled.\n",
    "            Default: None\n",
    "        key_deserializer (Callable): Any callable that takes a\n",
    "            raw message key and returns a deserialized key.\n",
    "        value_deserializer (Callable, Optional): Any callable that takes a\n",
    "            raw message value and returns a deserialized value.\n",
    "        fetch_min_bytes (int): Minimum amount of data the server should\n",
    "            return for a fetch request, otherwise wait up to\n",
    "            `fetch_max_wait_ms` for more data to accumulate. Default: 1.\n",
    "        fetch_max_bytes (int): The maximum amount of data the server should\n",
    "            return for a fetch request. This is not an absolute maximum, if\n",
    "            the first message in the first non-empty partition of the fetch\n",
    "            is larger than this value, the message will still be returned\n",
    "            to ensure that the consumer can make progress. NOTE: consumer\n",
    "            performs fetches to multiple brokers in parallel so memory\n",
    "            usage will depend on the number of brokers containing\n",
    "            partitions for the topic.\n",
    "            Supported Kafka version >= 0.10.1.0. Default: 52428800 (50 Mb).\n",
    "        fetch_max_wait_ms (int): The maximum amount of time in milliseconds\n",
    "            the server will block before answering the fetch request if\n",
    "            there isn't sufficient data to immediately satisfy the\n",
    "            requirement given by fetch_min_bytes. Default: 500.\n",
    "        max_partition_fetch_bytes (int): The maximum amount of data\n",
    "            per-partition the server will return. The maximum total memory\n",
    "            used for a request ``= #partitions * max_partition_fetch_bytes``.\n",
    "            This size must be at least as large as the maximum message size\n",
    "            the server allows or else it is possible for the producer to\n",
    "            send messages larger than the consumer can fetch. If that\n",
    "            happens, the consumer can get stuck trying to fetch a large\n",
    "            message on a certain partition. Default: 1048576.\n",
    "        max_poll_records (int): The maximum number of records returned in a\n",
    "            single call to :meth:`.getmany`. Defaults ``None``, no limit.\n",
    "        request_timeout_ms (int): Client request timeout in milliseconds.\n",
    "            Default: 40000.\n",
    "        retry_backoff_ms (int): Milliseconds to backoff when retrying on\n",
    "            errors. Default: 100.\n",
    "        auto_offset_reset (str): A policy for resetting offsets on\n",
    "            :exc:`.OffsetOutOfRangeError` errors: ``earliest`` will move to the oldest\n",
    "            available message, ``latest`` will move to the most recent, and\n",
    "            ``none`` will raise an exception so you can handle this case.\n",
    "            Default: ``latest``.\n",
    "        enable_auto_commit (bool): If true the consumer's offset will be\n",
    "            periodically committed in the background. Default: True.\n",
    "        auto_commit_interval_ms (int): milliseconds between automatic\n",
    "            offset commits, if enable_auto_commit is True. Default: 5000.\n",
    "        check_crcs (bool): Automatically check the CRC32 of the records\n",
    "            consumed. This ensures no on-the-wire or on-disk corruption to\n",
    "            the messages occurred. This check adds some overhead, so it may\n",
    "            be disabled in cases seeking extreme performance. Default: True\n",
    "        metadata_max_age_ms (int): The period of time in milliseconds after\n",
    "            which we force a refresh of metadata even if we haven't seen any\n",
    "            partition leadership changes to proactively discover any new\n",
    "            brokers or partitions. Default: 300000\n",
    "        partition_assignment_strategy (list): List of objects to use to\n",
    "            distribute partition ownership amongst consumer instances when\n",
    "            group management is used. This preference is implicit in the order\n",
    "            of the strategies in the list. When assignment strategy changes:\n",
    "            to support a change to the assignment strategy, new versions must\n",
    "            enable support both for the old assignment strategy and the new\n",
    "            one. The coordinator will choose the old assignment strategy until\n",
    "            all members have been updated. Then it will choose the new\n",
    "            strategy. Default: [:class:`.RoundRobinPartitionAssignor`]\n",
    "        max_poll_interval_ms (int): Maximum allowed time between calls to\n",
    "            consume messages (e.g., :meth:`.getmany`). If this interval\n",
    "            is exceeded the consumer is considered failed and the group will\n",
    "            rebalance in order to reassign the partitions to another consumer\n",
    "            group member. If API methods block waiting for messages, that time\n",
    "            does not count against this timeout. See `KIP-62`_ for more\n",
    "            information. Default 300000\n",
    "        rebalance_timeout_ms (int): The maximum time server will wait for this\n",
    "            consumer to rejoin the group in a case of rebalance. In Java client\n",
    "            this behaviour is bound to `max.poll.interval.ms` configuration,\n",
    "            but as ``aiokafka`` will rejoin the group in the background, we\n",
    "            decouple this setting to allow finer tuning by users that use\n",
    "            :class:`.ConsumerRebalanceListener` to delay rebalacing. Defaults\n",
    "            to ``session_timeout_ms``\n",
    "        session_timeout_ms (int): Client group session and failure detection\n",
    "            timeout. The consumer sends periodic heartbeats\n",
    "            (`heartbeat.interval.ms`) to indicate its liveness to the broker.\n",
    "            If no hearts are received by the broker for a group member within\n",
    "            the session timeout, the broker will remove the consumer from the\n",
    "            group and trigger a rebalance. The allowed range is configured with\n",
    "            the **broker** configuration properties\n",
    "            `group.min.session.timeout.ms` and `group.max.session.timeout.ms`.\n",
    "            Default: 10000\n",
    "        heartbeat_interval_ms (int): The expected time in milliseconds\n",
    "            between heartbeats to the consumer coordinator when using\n",
    "            Kafka's group management feature. Heartbeats are used to ensure\n",
    "            that the consumer's session stays active and to facilitate\n",
    "            rebalancing when new consumers join or leave the group. The\n",
    "            value must be set lower than `session_timeout_ms`, but typically\n",
    "            should be set no higher than 1/3 of that value. It can be\n",
    "            adjusted even lower to control the expected time for normal\n",
    "            rebalances. Default: 3000\n",
    "        consumer_timeout_ms (int): maximum wait timeout for background fetching\n",
    "            routine. Mostly defines how fast the system will see rebalance and\n",
    "            request new data for new partitions. Default: 200\n",
    "        api_version (str): specify which kafka API version to use.\n",
    "            :class:`AIOKafkaConsumer` supports Kafka API versions >=0.9 only.\n",
    "            If set to ``auto``, will attempt to infer the broker version by\n",
    "            probing various APIs. Default: ``auto``\n",
    "        security_protocol (str): Protocol used to communicate with brokers.\n",
    "            Valid values are: ``PLAINTEXT``, ``SSL``. Default: ``PLAINTEXT``.\n",
    "        ssl_context (ssl.SSLContext): pre-configured :class:`~ssl.SSLContext`\n",
    "            for wrapping socket connections. Directly passed into asyncio's\n",
    "            :meth:`~asyncio.loop.create_connection`. For more information see\n",
    "            :ref:`ssl_auth`. Default: None.\n",
    "        exclude_internal_topics (bool): Whether records from internal topics\n",
    "            (such as offsets) should be exposed to the consumer. If set to True\n",
    "            the only way to receive records from an internal topic is\n",
    "            subscribing to it. Requires 0.10+ Default: True\n",
    "        connections_max_idle_ms (int): Close idle connections after the number\n",
    "            of milliseconds specified by this config. Specifying `None` will\n",
    "            disable idle checks. Default: 540000 (9 minutes).\n",
    "        isolation_level (str): Controls how to read messages written\n",
    "            transactionally.\n",
    "\n",
    "            If set to ``read_committed``, :meth:`.getmany` will only return\n",
    "            transactional messages which have been committed.\n",
    "            If set to ``read_uncommitted`` (the default), :meth:`.getmany` will\n",
    "            return all messages, even transactional messages which have been\n",
    "            aborted.\n",
    "\n",
    "            Non-transactional messages will be returned unconditionally in\n",
    "            either mode.\n",
    "\n",
    "            Messages will always be returned in offset order. Hence, in\n",
    "            `read_committed` mode, :meth:`.getmany` will only return\n",
    "            messages up to the last stable offset (LSO), which is the one less\n",
    "            than the offset of the first open transaction. In particular any\n",
    "            messages appearing after messages belonging to ongoing transactions\n",
    "            will be withheld until the relevant transaction has been completed.\n",
    "            As a result, `read_committed` consumers will not be able to read up\n",
    "            to the high watermark when there are in flight transactions.\n",
    "            Further, when in `read_committed` the seek_to_end method will\n",
    "            return the LSO. See method docs below. Default: ``read_uncommitted``\n",
    "        sasl_mechanism (str): Authentication mechanism when security_protocol\n",
    "            is configured for ``SASL_PLAINTEXT`` or ``SASL_SSL``. Valid values are:\n",
    "            ``PLAIN``, ``GSSAPI``, ``SCRAM-SHA-256``, ``SCRAM-SHA-512``,\n",
    "            ``OAUTHBEARER``.\n",
    "            Default: ``PLAIN``\n",
    "        sasl_plain_username (str): username for SASL ``PLAIN`` authentication.\n",
    "            Default: None\n",
    "        sasl_plain_password (str): password for SASL ``PLAIN`` authentication.\n",
    "            Default: None\n",
    "        sasl_oauth_token_provider (~aiokafka.abc.AbstractTokenProvider): OAuthBearer token provider instance. (See :mod:`kafka.oauth.abstract`).\n",
    "            Default: None\n",
    "        key_serializer (Callable): used to convert user-supplied keys to bytes\n",
    "            If not :data:`None`, called as ``f(key),`` should return\n",
    "            :class:`bytes`.\n",
    "            Default: :data:`None`.\n",
    "        value_serializer (Callable): used to convert user-supplied message\n",
    "            values to :class:`bytes`. If not :data:`None`, called as\n",
    "            ``f(value)``, should return :class:`bytes`.\n",
    "            Default: :data:`None`.\n",
    "        acks (Any): one of ``0``, ``1``, ``all``. The number of acknowledgments\n",
    "            the producer requires the leader to have received before considering a\n",
    "            request complete. This controls the durability of records that are\n",
    "            sent. The following settings are common:\n",
    "\n",
    "            * ``0``: Producer will not wait for any acknowledgment from the server\n",
    "              at all. The message will immediately be added to the socket\n",
    "              buffer and considered sent. No guarantee can be made that the\n",
    "              server has received the record in this case, and the retries\n",
    "              configuration will not take effect (as the client won't\n",
    "              generally know of any failures). The offset given back for each\n",
    "              record will always be set to -1.\n",
    "            * ``1``: The broker leader will write the record to its local log but\n",
    "              will respond without awaiting full acknowledgement from all\n",
    "              followers. In this case should the leader fail immediately\n",
    "              after acknowledging the record but before the followers have\n",
    "              replicated it then the record will be lost.\n",
    "            * ``all``: The broker leader will wait for the full set of in-sync\n",
    "              replicas to acknowledge the record. This guarantees that the\n",
    "              record will not be lost as long as at least one in-sync replica\n",
    "              remains alive. This is the strongest available guarantee.\n",
    "\n",
    "            If unset, defaults to ``acks=1``. If `enable_idempotence` is\n",
    "            :data:`True` defaults to ``acks=all``\n",
    "        compression_type (str): The compression type for all data generated by\n",
    "            the producer. Valid values are ``gzip``, ``snappy``, ``lz4``, ``zstd``\n",
    "            or :data:`None`.\n",
    "            Compression is of full batches of data, so the efficacy of batching\n",
    "            will also impact the compression ratio (more batching means better\n",
    "            compression). Default: :data:`None`.\n",
    "        max_batch_size (int): Maximum size of buffered data per partition.\n",
    "            After this amount :meth:`send` coroutine will block until batch is\n",
    "            drained.\n",
    "            Default: 16384\n",
    "        linger_ms (int): The producer groups together any records that arrive\n",
    "            in between request transmissions into a single batched request.\n",
    "            Normally this occurs only under load when records arrive faster\n",
    "            than they can be sent out. However in some circumstances the client\n",
    "            may want to reduce the number of requests even under moderate load.\n",
    "            This setting accomplishes this by adding a small amount of\n",
    "            artificial delay; that is, if first request is processed faster,\n",
    "            than `linger_ms`, producer will wait ``linger_ms - process_time``.\n",
    "            Default: 0 (i.e. no delay).\n",
    "        partitioner (Callable): Callable used to determine which partition\n",
    "            each message is assigned to. Called (after key serialization):\n",
    "            ``partitioner(key_bytes, all_partitions, available_partitions)``.\n",
    "            The default partitioner implementation hashes each non-None key\n",
    "            using the same murmur2 algorithm as the Java client so that\n",
    "            messages with the same key are assigned to the same partition.\n",
    "            When a key is :data:`None`, the message is delivered to a random partition\n",
    "            (filtered to partitions with available leaders only, if possible).\n",
    "        max_request_size (int): The maximum size of a request. This is also\n",
    "            effectively a cap on the maximum record size. Note that the server\n",
    "            has its own cap on record size which may be different from this.\n",
    "            This setting will limit the number of record batches the producer\n",
    "            will send in a single request to avoid sending huge requests.\n",
    "            Default: 1048576.\n",
    "        enable_idempotence (bool): When set to :data:`True`, the producer will\n",
    "            ensure that exactly one copy of each message is written in the\n",
    "            stream. If :data:`False`, producer retries due to broker failures,\n",
    "            etc., may write duplicates of the retried message in the stream.\n",
    "            Note that enabling idempotence acks to set to ``all``. If it is not\n",
    "            explicitly set by the user it will be chosen. If incompatible\n",
    "            values are set, a :exc:`ValueError` will be thrown.\n",
    "            New in version 0.5.0.\n",
    "        sasl_oauth_token_provider (: class:`~aiokafka.abc.AbstractTokenProvider`):\n",
    "            OAuthBearer token provider instance. (See\n",
    "            :mod:`kafka.oauth.abstract`).\n",
    "            Default: :data:`None`\n",
    "        *topics (list(str)): optional list of topics to subscribe to. If not set,\n",
    "            call :meth:`.subscribe` or :meth:`.assign` before consuming records.\n",
    "            Passing topics directly is same as calling :meth:`.subscribe` API.\n",
    "    \"\"\"\n",
    "    async with anyio.create_task_group() as tg:\n",
    "        tg.start_soon(\n",
    "            lambda d: produce_messages(**d),\n",
    "            dict(msgs=msgs, topic=produce_topic, **kwargs),\n",
    "        )\n",
    "        tg.start_soon(\n",
    "            lambda d: consumes_messages(**d),\n",
    "            dict(\n",
    "                msgs_count=msgs_count,\n",
    "                topic=consume_topic,\n",
    "                **kwargs,\n",
    "            ),\n",
    "        )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a5bae758",
   "metadata": {},
   "outputs": [],
   "source": [
    "# print(combine_params(combine_params(produce_and_consume_messages, AIOKafkaProducer), AIOKafkaConsumer).__doc__)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a5f5f9c7",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[INFO] fastkafka._components.test_dependencies: Java is already installed.\n",
      "[INFO] fastkafka._components.test_dependencies: Kafka is installed.\n",
      "[INFO] fastkafka._testing.apache_kafka_broker: Starting zookeeper...\n",
      "[INFO] fastkafka._testing.apache_kafka_broker: Starting kafka...\n",
      "[INFO] fastkafka._testing.apache_kafka_broker: Local Kafka broker up and running on 127.0.0.1:9092\n",
      "[INFO] aiokafka.consumer.subscription_state: Updating subscribed topics to: frozenset({'produce_and_consume_messages'})\n",
      "[INFO] aiokafka.consumer.group_coordinator: Metadata for topic has changed from {} to {'produce_and_consume_messages': 1}. \n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "036194e4ba554428911cd5ba1f585146",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "producing to 'produce_and_consume_messages':   0%|          | 0/120000 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "37027e75a7a1462389ee8060129b144c",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "consuming from 'produce_and_consume_messages':   0%|          | 0/114000 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[INFO] fastkafka._components._subprocess: terminate_asyncio_process(): Terminating the process 5277...\n",
      "[INFO] fastkafka._components._subprocess: terminate_asyncio_process(): Process 5277 terminated.\n",
      "[INFO] fastkafka._components._subprocess: terminate_asyncio_process(): Terminating the process 4904...\n",
      "[INFO] fastkafka._components._subprocess: terminate_asyncio_process(): Process 4904 terminated.\n"
     ]
    }
   ],
   "source": [
    "async with ApacheKafkaBroker(\n",
    "    topics=[\"produce_and_consume_messages\"], listener_port=9992\n",
    ") as bootstrap_server:\n",
    "    await produce_and_consume_messages(\n",
    "        produce_topic=\"produce_and_consume_messages\",\n",
    "        consume_topic=\"produce_and_consume_messages\",\n",
    "        msgs=msgs,\n",
    "        msgs_count=int(len(msgs) * 0.95),\n",
    "        bootstrap_servers=bootstrap_server,\n",
    "    )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3a544d7b",
   "metadata": {},
   "outputs": [],
   "source": [
    "# | export\n",
    "\n",
    "\n",
    "def get_collapsible_admonition(\n",
    "    code_block: str, *, name: Optional[str] = None\n",
    ") -> Markdown:\n",
    "    \"\"\"\n",
    "    Generate a collapsible admonition containing a code block as an example.\n",
    "\n",
    "    Args:\n",
    "        code_block: The code block to be included in the example.\n",
    "        name: Optional name or title for the example.\n",
    "            Default is None.\n",
    "\n",
    "    Returns:\n",
    "        A Markdown object representing the collapsible admonition\n",
    "        with the provided code block.\n",
    "    \"\"\"\n",
    "    alt_name = \"\" if name is None else name\n",
    "    intro = f'This example contains the content of the file \"{alt_name}\":'\n",
    "    return Markdown(\n",
    "        f\"??? Example \\n\\n    {intro}\\n\\n\"\n",
    "        + textwrap.indent(f\"```python\\n{code_block}\\n```\", prefix=\"    \")\n",
    "    )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2f3f3cbe",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/markdown": [
       "??? Example \n",
       "\n",
       "    This example contains the content of the file \"server.py\":\n",
       "\n",
       "    ```python\n",
       "    print('hello')\n",
       "    ```"
      ],
      "text/plain": [
       "<IPython.core.display.Markdown object>"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "get_collapsible_admonition(\"print('hello')\", name=\"server.py\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2383b6b5",
   "metadata": {},
   "outputs": [],
   "source": [
    "# | export\n",
    "\n",
    "\n",
    "def source2markdown(o: Union[str, Callable[..., Any]]) -> Markdown:\n",
    "    \"\"\"Converts source code into Markdown for displaying it with Jupyter notebook\n",
    "\n",
    "    Args:\n",
    "        o: source code\n",
    "    \"\"\"\n",
    "    s = inspect.getsource(o) if callable(o) else o\n",
    "    return Markdown(\n",
    "        f\"\"\"\n",
    "```python\n",
    "{s}\n",
    "```\n",
    "\"\"\"\n",
    "    )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "30ecd10b",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/markdown": [
       "\n",
       "```python\n",
       "def f():\n",
       "    pass\n",
       "\n",
       "```\n"
      ],
      "text/plain": [
       "<IPython.core.display.Markdown object>"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "def f():\n",
    "    pass\n",
    "\n",
    "\n",
    "source2markdown(f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "317e3f0c",
   "metadata": {},
   "outputs": [],
   "source": [
    "# | export\n",
    "\n",
    "\n",
    "async def wait_for_get_url(\n",
    "    url: str, timeout: Optional[int] = None, **kwargs: Dict[str, Any]\n",
    ") -> aiohttp.ClientResponse:\n",
    "    \"\"\"\n",
    "    Asynchronously wait for a GET request to a specified URL with an optional timeout.\n",
    "\n",
    "    Args:\n",
    "        url: The URL to send the GET request to.\n",
    "        timeout: Optional maximum number of seconds to wait\n",
    "            for a response. If not provided, there is no timeout. Default is None.\n",
    "        **kwargs: Additional keyword arguments to be passed to the tqdm progress bar,\n",
    "            if a timeout is provided.\n",
    "\n",
    "    Returns:\n",
    "        The aiohttp.ClientResponse response object for the GET request.\n",
    "\n",
    "    Raises:\n",
    "        TimeoutError: If the timeout is reached and the URL couldn't be fetched within\n",
    "            the specified time.\n",
    "    \"\"\"\n",
    "    t0 = datetime.now()\n",
    "    if timeout is not None:\n",
    "        pbar = tqdm(total=timeout, **kwargs)\n",
    "    try:\n",
    "        async with aiohttp.ClientSession() as session:\n",
    "            while True:\n",
    "                try:\n",
    "                    async with session.get(url) as response:\n",
    "                        if timeout is not None:\n",
    "                            pbar.update(pbar.total - pbar.n)\n",
    "                        return response\n",
    "                except aiohttp.ClientConnectorError as e:\n",
    "                    if timeout is not None:\n",
    "                        if pbar.total - pbar.n > 1:\n",
    "                            pbar.update(1)\n",
    "                    await asyncio.sleep(1)\n",
    "\n",
    "                if timeout is not None and datetime.now() - t0 >= timedelta(\n",
    "                    seconds=timeout\n",
    "                ):\n",
    "                    raise TimeoutError(\n",
    "                        f\"Could not fetch url '{url}' for more than {timeout} seconds\"\n",
    "                    )\n",
    "    finally:\n",
    "        if timeout is not None:\n",
    "            pbar.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6f284dbc",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "62d27726c5ae46cfbf655d30826dff78",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "should pass:   0%|          | 0/5 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "4f2e4486bcb94cbdaf3eb374e613a8b7",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "expected to fail:   0%|          | 0/5 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "<ExceptionInfo TimeoutError(\"Could not fetch url 'https://0.0.0.0:4000' for more than 5 seconds\") tblen=2>"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "await wait_for_get_url(\"https://python.org\", timeout=5, desc=\"should pass\")\n",
    "\n",
    "with pytest.raises(TimeoutError) as e:\n",
    "    await wait_for_get_url(\"https://0.0.0.0:4000\", timeout=5, desc=\"expected to fail\")\n",
    "e"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a54b935c",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "17ebec05",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "python3",
   "language": "python",
   "name": "python3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
