import asyncio
import uuid
from datetime import timedelta
from itertools import count

import pytest

from pgqueuer import helpers as pg_helpers
from pgqueuer.buffers import JobStatusLogBuffer
from pgqueuer.models import JOB_STATUS, Job, TracebackRecord
from test.helpers import mocked_job


def job_faker(
    cnt: count = count(),
    queue_manager_id: uuid.UUID = uuid.uuid4(),
) -> Job:
    return mocked_job(
        id=next(cnt),
        status="picked",
        entrypoint="foo",
        priority=0,
        payload=None,
        queue_manager_id=queue_manager_id,
    )


@pytest.mark.parametrize("max_size", (1, 2, 3, 5, 64))
async def test_job_buffer_max_size(max_size: int) -> None:
    helper_buffer = []

    async def helper(x: list) -> None:
        helper_buffer.extend(x)

    async with JobStatusLogBuffer(
        max_size=max_size,
        timeout=timedelta(seconds=100),
        callback=helper,
    ) as buffer:
        for _ in range(max_size - 1):
            await buffer.add((job_faker(), "successful", None))
            assert len(helper_buffer) == 0

        await buffer.add((job_faker(), "successful", None))

    # On ctx-mangner exit flush is forced.
    assert len(helper_buffer) == max_size


@pytest.mark.parametrize("N", (5, 64))
@pytest.mark.parametrize("timeout", (timedelta(seconds=0.01), timedelta(seconds=0.001)))
async def test_job_buffer_timeout(
    N: int,
    timeout: timedelta,
) -> None:
    helper_buffer = []

    async def helper(x: list) -> None:
        helper_buffer.extend(x)

    async with JobStatusLogBuffer(
        max_size=N * 2,
        timeout=timeout,
        callback=helper,
    ) as buffer:
        for _ in range(N):
            await buffer.add((job_faker(), "successful", None))
            assert len(helper_buffer) == 0

        await asyncio.sleep(timeout.total_seconds() * 1.1)

    assert len(helper_buffer) == N


@pytest.mark.parametrize("max_size", (2, 3, 5, 64))  # Adjusted to max_size >=2
async def test_job_buffer_flush_on_exit(max_size: int) -> None:
    """
    Test that the buffer flushes all remaining items upon exiting the context,
    even if max_size is not reached and timeout hasn't occurred.
    """
    helper_buffer = []

    async def helper(x: list) -> None:
        helper_buffer.extend(x)

    async with JobStatusLogBuffer(
        max_size=max_size,
        timeout=timedelta(seconds=100),
        callback=helper,
    ) as buffer:
        for _ in range(max_size - 2):
            await buffer.add((job_faker(), "successful", None))
            assert len(helper_buffer) == 0

    # After exiting the context, remaining items should be flushed
    assert len(helper_buffer) == max_size - 2


@pytest.mark.parametrize("max_size", (1, 2, 3, 5, 64))
@pytest.mark.parametrize("flushes", (1, 2, 3, 5, 64))
async def test_job_buffer_multiple_flushes(max_size: int, flushes: int) -> None:
    """
    Test that the buffer can handle multiple flushes when more items than max_size are added.
    """
    helper_buffer = []

    async def helper(x: list) -> None:
        helper_buffer.append(x)

    async with JobStatusLogBuffer(
        max_size=max_size,
        timeout=timedelta(seconds=100),
        callback=helper,
    ) as buffer:
        for _ in range(flushes):
            for _ in range(max_size):
                await buffer.add((job_faker(), "successful", None))
            await buffer.flush()

    # Verify that the buffer flushed three times
    assert len(helper_buffer) == flushes


@pytest.mark.parametrize("max_size", (1, 2, 3, 5, 64))
async def test_job_buffer_flush_on_exception(max_size: int) -> None:
    """
    Test that the buffer handles exceptions in the callback gracefully and retries.
    """
    helper_buffer = []
    flush_call_count = 0

    async def faulty_helper(x: list) -> None:
        nonlocal flush_call_count
        flush_call_count += 1
        if flush_call_count < 2:
            raise RuntimeError("Simulated flush failure")
        helper_buffer.extend(x)

    async with JobStatusLogBuffer(
        max_size=max_size,
        timeout=timedelta(seconds=0.01),
        callback=faulty_helper,
    ) as buffer:
        for _ in range(max_size):
            await buffer.add((job_faker(), "successful", None))

        # Allow time for the flush to be attempted and retried
        await asyncio.sleep(0.02)

    # The first flush should have failed, and the second should have succeeded
    assert flush_call_count == 2
    assert len(helper_buffer) == max_size


@pytest.mark.parametrize("max_size", (1, 2, 3, 5, 64))
async def test_job_buffer_flush_order(max_size: int) -> None:
    """
    Test that items are flushed in the order they were added.
    """
    helper_buffer = []

    async def helper(x: list) -> None:
        helper_buffer.extend(x)

    async with JobStatusLogBuffer(
        max_size=max_size,
        timeout=timedelta(seconds=100),
        callback=helper,
    ) as buffer:
        items = [(job_faker(), "successful") for _ in range(max_size)]
        for item in items:
            await buffer.add(item)  # type: ignore[arg-type]

    assert helper_buffer == items


@pytest.mark.parametrize("max_size", (1, 2, 3, 5, 64))
async def test_job_buffer_concurrent_adds(max_size: int) -> None:
    """
    Test that the buffer can handle concurrent additions without losing items.
    """
    helper_buffer = []

    async def helper(x: list) -> None:
        helper_buffer.extend(x)

    async with JobStatusLogBuffer(
        max_size=max_size,
        timeout=timedelta(seconds=100),
        callback=helper,
    ) as buffer:

        async def add_items(n: int) -> None:
            for _ in range(n):
                await buffer.add((job_faker(), "successful", None))

        tasks = [asyncio.create_task(add_items(max_size // 2)) for _ in range(4)]
        await asyncio.gather(*tasks)

    # Total items added: (max_size // 2) * 4
    expected = (max_size // 2) * 4
    assert len(helper_buffer) == expected


async def test_job_buffer_empty_flush() -> None:
    """
    Test that flushing an empty buffer does not cause any issues.
    """
    helper_buffer = []

    async def helper(x: list) -> None:
        helper_buffer.extend(x)

    async with JobStatusLogBuffer(
        max_size=10,
        timeout=timedelta(seconds=0.1),
        callback=helper,
    ):
        # Do not add any items and let the buffer flush on exit
        pass

    assert len(helper_buffer) == 0


@pytest.mark.parametrize("max_size", (1, 2, 3, 5, 64))
async def test_job_buffer_reuse_after_flush(max_size: int) -> None:
    """
    Test that the buffer can be reused after a flush has occurred.
    """
    helper_buffer = []

    async def helper(x: list) -> None:
        helper_buffer.extend(x)

    async with JobStatusLogBuffer(
        max_size=max_size,
        timeout=timedelta(seconds=100),
        callback=helper,
    ) as buffer:
        # First flush
        for _ in range(max_size):
            await buffer.add((job_faker(), "successful", None))
        await buffer.flush()
        assert len(helper_buffer) == max_size

        # Reset helper_buffer for the second flush
        helper_buffer.clear()

        # Second flush
        for _ in range(max_size):
            await buffer.add((job_faker(), "successful", None))
        await buffer.flush()
        assert len(helper_buffer) == max_size


@pytest.mark.parametrize("max_size", (1, 2, 3, 5, 64))
async def test_job_buffer_exception_during_flush(max_size: int) -> None:
    """
    Test that the buffer handles exceptions during flush without losing items.
    """
    helper_buffer = []
    flush_call_count = 0

    async def faulty_helper(x: list) -> None:
        nonlocal flush_call_count
        flush_call_count += 1
        if flush_call_count == 1:
            raise RuntimeError("Simulated flush failure")
        helper_buffer.extend(x)

    async with JobStatusLogBuffer(
        max_size=max_size,
        timeout=timedelta(seconds=0.01),
        callback=faulty_helper,
    ) as buffer:
        for _ in range(max_size):
            await buffer.add((job_faker(), "successful", None))

        # Allow time for the flush to be attempted and retried
        await asyncio.sleep(0.02)

    # After first failure, flush should retry and succeed
    assert flush_call_count == 2
    assert len(helper_buffer) == max_size


@pytest.mark.parametrize("max_size", (1, 2, 3, 5, 64))
async def test_job_buffer_callback_called_correctly(max_size: int) -> None:
    """
    Test that the callback is called with the correct items.
    """
    items = [(job_faker(), "successful") for _ in range(max_size)]
    received_items = []

    async def helper(x: list) -> None:
        received_items.extend(x)

    async with JobStatusLogBuffer(
        max_size=max_size,
        timeout=timedelta(seconds=100),
        callback=helper,
    ) as buffer:
        for item in items:
            await buffer.add(item)  # type: ignore[arg-type]

    assert received_items == items


async def test_job_buffer_callback_exception_during_teardown() -> None:
    N = 10
    items: list[tuple[Job, JOB_STATUS, None]] = [
        (job_faker(), "successful", None) for _ in range(N)
    ]

    async def helper(_: object) -> None:
        raise ValueError

    async with JobStatusLogBuffer(
        max_size=N**2,  # max size must be gt. N.
        timeout=timedelta(seconds=60),  # must be gt. run time of 'for loop' in the with block.
        callback=helper,
    ) as buffer:
        for item in items:
            await buffer.add(item)

    # Was uanble to flush at exit, buffer should have all elements.
    assert buffer.events.qsize() == N


async def test_job_buffer_retry_uses_jitter(monkeypatch: pytest.MonkeyPatch) -> None:
    sleep_calls: list[float] = []

    async def fake_sleep(delay: float) -> None:
        sleep_calls.append(delay)

    jitter_calls: list[timedelta] = []

    def fake_jitter(delay: timedelta) -> timedelta:
        jitter_calls.append(delay)
        return timedelta(milliseconds=5)

    monkeypatch.setattr(asyncio, "sleep", fake_sleep)
    monkeypatch.setattr(pg_helpers, "timeout_with_jitter", fake_jitter)

    async def failing_callback(_: list[LogEntry]) -> None:
        raise RuntimeError("flush failed")

    buffer = JobStatusLogBuffer(
        max_size=2,
        timeout=timedelta(seconds=1),
        callback=failing_callback,
    )

    await buffer.add((job_faker(), "successful", None))
    await buffer.flush()

    assert jitter_calls
    # fake_jitter returns 5ms which should be forwarded to asyncio.sleep
    assert sleep_calls == [pytest.approx(timedelta(milliseconds=5).total_seconds())]


async def test_job_buffer_retry_skips_jitter_on_shutdown(
    monkeypatch: pytest.MonkeyPatch,
) -> None:
    sleep_calls: list[float] = []

    async def fake_sleep(delay: float) -> None:
        sleep_calls.append(delay)

    def fail_if_called(_: timedelta) -> timedelta:
        raise AssertionError("timeout_with_jitter should not be used when shutdown")

    monkeypatch.setattr(asyncio, "sleep", fake_sleep)
    monkeypatch.setattr(pg_helpers, "timeout_with_jitter", fail_if_called)

    async def failing_callback(_: list[LogEntry]) -> None:
        raise RuntimeError("flush failed")

    buffer = JobStatusLogBuffer(
        max_size=2,
        timeout=timedelta(seconds=1),
        callback=failing_callback,
    )

    await buffer.add((job_faker(), "successful", None))
    buffer.shutdown.set()
    await buffer.flush()

    assert sleep_calls == [0]


async def test_job_buffer_flush_returns_when_lock_held() -> None:
    helper_calls: list[list[LogEntry]] = []

    async def helper(items: list[LogEntry]) -> None:
        helper_calls.append(items)

    buffer = JobStatusLogBuffer(
        max_size=1,
        timeout=timedelta(seconds=1),
        callback=helper,
    )

    await buffer.add((job_faker(), "successful", None))
    await buffer.lock.acquire()
    try:
        await buffer.flush()
    finally:
        buffer.lock.release()

    assert helper_calls == []
    assert buffer.events.qsize() == 1


LogEntry = tuple[Job, JOB_STATUS, TracebackRecord | None]
