#!/usr/bin/env python3

import asyncio
import aiohttp
import random
import time
import sys

from sys import argv

BLOCK_HEIGHT_URL = "http://localhost:3030/v2/testnet/block/height/latest"
GET_BLOCK_BASE_URL = "http://localhost:3030/v2/testnet/block"
MIN_BLOCK = 1
MAX_BLOCK = 250
NUM_WORKERS = 8

# Statistics
stats = {
    'successful_requests': 0,
    'failed_requests': 0,
}


def write_results(mode, total_wait, endpoint):
    num_ops = stats['successful_requests']
    throughput = num_ops / total_wait

    print(f'🎉 REST benchmark "{mode}" done! It took {total_wait} seconds'
          f' for {num_ops} ops. Throughput was {throughput} ops/s.')

    with open('info.txt', 'r') as f:
        snapshot_info = f.read().replace('\n', '')

    with open("results.json", "a") as f:
        f.write(f'{{ "name": "rest-{mode}", "unit": "ops/s", '
                f'"value": {throughput}, "extra": "num_ops={num_ops}, '
                f'total_wait={total_wait}, endpoint={endpoint}, '
                f'{snapshot_info}" }},\n')


async def make_request(session, worker_id, mode):
    """Make a single async request to the block endpoint"""

    if mode == "get-block":
        # Checks that any block can be retrieved in a reasonable time.
        block_id = random.randint(MIN_BLOCK, MAX_BLOCK)
        url = f"{GET_BLOCK_BASE_URL}/{block_id}"
    elif mode == "get-latest-block":
        # Tests that the most recent block(s) are cached and can be retrieved even quicker.
        url = f"{GET_BLOCK_BASE_URL}/{MAX_BLOCK}"
    elif mode == "block-height":
        # Fetches the current block height as a basline for the REST API speed.
        url = BLOCK_HEIGHT_URL
    else:
        raise RuntimeError(f'Unknown REST mode "{mode}"')

    try:
        async with session.get(url, timeout=aiohttp.ClientTimeout(total=100)) as response:
            content = await response.read()

            if response.status == 200:
                stats['successful_requests'] += 1
                return True
            else:
                print(f"Request failed: {content}")
                stats['failed_requests'] += 1
                return False

    except asyncio.TimeoutError:
        print("ERROR: Request timed out!")
        stats['failed_requests'] += 1
        return False

    except Exception as err:
        print(f"ERROR: Got exception: {err}")
        stats['failed_requests'] += 1
        return False


async def worker(session, worker_id, mode, reqs_per_worker):
    """Worker coroutine that makes multiple requests"""
    print(f"Worker {worker_id} starting...")
    worker_successful = 0
    worker_failed = 0

    for i in range(reqs_per_worker):
        success = await make_request(session, worker_id, mode)

        if success:
            worker_successful += 1
            if (i+1) % 10 == 0:  # Log every 10th request
                print(f'Worker {worker_id}: Finished {i+1} of '
                      f'{reqs_per_worker} requests')
        else:
            worker_failed += 1
            break

    return worker_successful, worker_failed


async def main(mode, num_workers, reqs_per_worker):
    """Main async function to coordinate the workers"""

    if mode in ["get-block", "get-latest-block"]:
        base_url = GET_BLOCK_BASE_URL
    elif mode == "block-height":
        base_url = BLOCK_HEIGHT_URL
    else:
        raise RuntimeError(f'Unknown REST mode "{mode}"')

    print(f'Starting {num_workers} async workers for "{mode}", '
          f' each making {reqs_per_worker} requests...')
    print(f"Target endpoint: {base_url}")
    print("")

    start_time = time.time()

    # Create HTTP session with connection pooling
    connector = aiohttp.TCPConnector(
        limit=100,  # Total connection pool size
        limit_per_host=50,  # Max connections per host
        keepalive_timeout=30,
        enable_cleanup_closed=True
    )

    async with aiohttp.ClientSession(connector=connector) as session:
        # Create and run all workers concurrently
        tasks = [worker(session, i+1, mode, reqs_per_worker) for i in range(num_workers)]
        results = await asyncio.gather(*tasks, return_exceptions=True)

        # Process results
        for i, result in enumerate(results):
            if isinstance(result, Exception):
                print(f"Worker {i+1} failed with exception: {result}")
            else:
                worker_successful, worker_failed = result

    end_time = time.time()
    duration = end_time - start_time

    if stats['failed_requests'] > 0:
        print(f'REST benchmark "{mode}" failed')
        sys.exit(1)
    else:
        write_results(mode, duration, base_url)
        sys.exit(0)

if __name__ == "__main__":
    # what type of benchmark to run?
    mode = argv[1]
    # how many client tasks?
    num_workers = int(argv[2])
    # how many requests per client task?
    reqs_per_worker = int(argv[3])

    # Run the async main function
    asyncio.run(main(mode, num_workers, reqs_per_worker))
