{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import asyncio\n",
    "from random import random"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "async def echo(index: int):\n",
    "    await asyncio.sleep(0.1)\n",
    "    return index\n",
    "\n",
    "\n",
    "async def echo_random_latency(index: int):\n",
    "    await asyncio.sleep(random())\n",
    "    return index"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Test Executor "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from ragas.async_utils import as_completed, is_event_loop_running"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "assert is_event_loop_running() is True, \"is_event_loop_running() returned False\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": "async def _run():\n    results = []\n    for task in as_completed([echo(1), echo(2), echo(3)], 3):\n        r = await task\n        results.append(r)\n    return results\n\n\nresults = await _run()\n\nexpected = [1, 2, 3]\nassert results == expected, f\"got: {results}, expected: {expected}\""
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Test Executor"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "_**NOTE**: Requires `ipywidgets` installed_"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from ragas.executor import Executor"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# test order of results when they should return in submission order\n",
    "executor = Executor(raise_exceptions=True)\n",
    "for i in range(10):\n",
    "    executor.submit(echo, i, name=f\"echo_{i}\")\n",
    "\n",
    "results = executor.results()  # await executor.aresults()\n",
    "assert results == list(range(10))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# test order of results when they should return in submission order\n",
    "executor = Executor(raise_exceptions=True)\n",
    "for i in range(10):\n",
    "    executor.submit(echo, i, name=f\"echo_{i}\")\n",
    "\n",
    "results = executor.results()  # await executor.aresults()\n",
    "assert results == list(range(10))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# test order of results when may return unordered\n",
    "executor = Executor(batch_size=None)\n",
    "\n",
    "# add jobs to the executor\n",
    "for i in range(10):\n",
    "    executor.submit(echo_random_latency, i, name=f\"echo_order_{i}\")\n",
    "\n",
    "# Act\n",
    "results = executor.results()  # await executor.aresults()\n",
    "# Assert\n",
    "assert results == list(range(10))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Test output order; batching\n",
    "executor = Executor(batch_size=3)\n",
    "\n",
    "# add jobs to the executor\n",
    "for i in range(10):\n",
    "    executor.submit(echo_random_latency, i, name=f\"echo_order_{i}\")\n",
    "\n",
    "# Act\n",
    "results = executor.results()  # await executor.aresults()\n",
    "# Assert\n",
    "assert results == list(range(10))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Test no progress\n",
    "executor = Executor(show_progress=False)\n",
    "\n",
    "# add jobs to the executor\n",
    "for i in range(10):\n",
    "    executor.submit(echo_random_latency, i, name=f\"echo_order_{i}\")\n",
    "\n",
    "# Act\n",
    "results = executor.results()  # await executor.aresults()\n",
    "# Assert\n",
    "assert results == list(range(10))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Test multiple submission sets\n",
    "executor = Executor(raise_exceptions=True)\n",
    "for i in range(1000):\n",
    "    executor.submit(asyncio.sleep, 0.01)\n",
    "\n",
    "results = executor.results()  # await executor.aresults()\n",
    "assert results, \"Results should be list of None\"\n",
    "\n",
    "for i in range(1000):\n",
    "    executor.submit(asyncio.sleep, 0.01)\n",
    "\n",
    "results = executor.results()  # await executor.aresults()\n",
    "assert results, \"Results should be list of None\""
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Test Metric"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import typing as t\n",
    "from dataclasses import dataclass, field\n",
    "\n",
    "from ragas.dataset_schema import SingleTurnSample\n",
    "from ragas.metrics.base import MetricType, SingleTurnMetric\n",
    "\n",
    "\n",
    "@dataclass\n",
    "class FakeMetric(SingleTurnMetric):\n",
    "    name: str = \"fake_metric\"\n",
    "    _required_columns: t.Dict[MetricType, t.Set[str]] = field(\n",
    "        default_factory=lambda: {MetricType.SINGLE_TURN: {\"user_input\", \"response\"}}\n",
    "    )\n",
    "\n",
    "    def init(self, run_config=None):\n",
    "        pass\n",
    "\n",
    "    async def _single_turn_ascore(self, sample: SingleTurnSample, callbacks) -> float:\n",
    "        return 0.0\n",
    "\n",
    "\n",
    "fm = FakeMetric()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "score = await fm.single_turn_ascore(SingleTurnSample(user_input=\"a\", response=\"b\"))\n",
    "assert score == 0.0"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Test run_async_tasks"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from ragas.async_utils import run_async_tasks"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# run tasks unbatched\n",
    "tasks = [echo_random_latency(i) for i in range(10)]\n",
    "results = run_async_tasks(tasks, batch_size=None, show_progress=True)\n",
    "# Assert\n",
    "assert sorted(results) == list(range(10))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# run tasks batched\n",
    "tasks = [echo_random_latency(i) for i in range(10)]\n",
    "results = run_async_tasks(tasks, batch_size=3, show_progress=True)\n",
    "# Assert\n",
    "assert sorted(results) == list(range(10))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Test no progress\n",
    "tasks = [echo_random_latency(i) for i in range(10)]\n",
    "results = run_async_tasks(tasks, batch_size=3, show_progress=False)\n",
    "# Assert\n",
    "assert sorted(results) == list(range(10))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": ".venv",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.13.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}