#!/usr/bin/env python3
"""
ODBC benchmark for KWDB using pyodbc
This represents the traditional JDBC/ODBC baseline performance
"""

import pyodbc
import time
import threading
import queue
import argparse
import datetime
import os
import random

def worker(thread_id, n_rows, batch_size, conn_str, qps_counter, error_counter):
    """Worker thread for concurrent ODBC writes"""
    try:
        cn = pyodbc.connect(conn_str, autocommit=False)
        cur = cn.cursor()

        # Use monotonic timestamp (same strategy as SDK benchmark)
        # Base: epoch 1727700000 (2024-09-30) + thread_id * 100000 seconds
        # Using large offset between threads and 1 second per point to avoid any collision
        base_epoch = 1727700000 + (thread_id * 100000)
        point_counter = 0

        rows_written = 0
        while rows_written < n_rows:
            this_batch = min(batch_size, n_rows - rows_written)
            data = []

            for i in range(this_batch):
                # Add 1 second per point to avoid collisions
                ts_epoch = base_epoch + point_counter
                ts = datetime.datetime.fromtimestamp(ts_epoch, tz=datetime.timezone.utc)

                host = f"bench-host-{thread_id % 4}"
                region = f"bench-region-{thread_id % 2}"
                usage = random.uniform(0.0, 100.0)
                temperature = random.uniform(0.0, 100.0)

                data.append((ts, host, region, usage, temperature))
                point_counter += 1

            try:
                # Use executemany with fast_executemany if supported
                if hasattr(cur, 'fast_executemany'):
                    cur.fast_executemany = True

                sql = "INSERT INTO sdk_bench(ts, host, region, usage, temperature) VALUES (?, ?, ?, ?, ?)"
                cur.executemany(sql, data)
                cn.commit()

                rows_written += this_batch
                qps_counter.put(this_batch)

            except Exception as e:
                print(f"[thread-{thread_id}] Batch error: {str(e)[:100]}")
                error_counter.put(this_batch)
                try:
                    cn.rollback()
                except:
                    pass
                rows_written += this_batch  # Skip failed batch

        cn.close()

    except Exception as e:
        print(f"[thread-{thread_id}] Connection error: {str(e)}")
        error_counter.put(n_rows)

def main():
    parser = argparse.ArgumentParser(description='ODBC Benchmark for KWDB')
    parser.add_argument('--host', default=os.getenv('KWDB_HOST', '127.0.0.1'))
    parser.add_argument('--port', default=os.getenv('KWDB_SQL_PORT', '26257'))
    parser.add_argument('--database', default=os.getenv('KWDB_DB', 'kwdb'))
    parser.add_argument('--user', default=os.getenv('KWDB_USER', 'root'))
    parser.add_argument('--password', default=os.getenv('KWDB_PWD', ''))
    parser.add_argument('--threads', type=int, default=4)
    parser.add_argument('--batch', type=int, default=500)
    parser.add_argument('--total', type=int, default=100000)
    parser.add_argument('--table', default='sdk_bench')

    args = parser.parse_args()

    # Build ODBC connection string
    driver_path = '/usr/lib/x86_64-linux-gnu/odbc/psqlodbcw.so'
    conn_str = (
        f"DRIVER={{{driver_path}}};"
        f"SERVER={args.host};"
        f"PORT={args.port};"
        f"DATABASE={args.database};"
        f"UID={args.user};"
        f"PWD={args.password};"
        "SSLmode=disable;"
    )

    per_thread = args.total // args.threads
    actual_total = per_thread * args.threads

    print("=== ODBC Benchmark ===")
    print(f"Total points:  {actual_total}")
    print(f"Batch size:    {args.batch}")
    print(f"Concurrency:   {args.threads}")
    print(f"Per thread:    {per_thread}")
    print(f"Connection:    {args.host}:{args.port}/{args.database}")
    print()

    success_queue = queue.Queue()
    error_queue = queue.Queue()

    threads = []
    start_time = time.time()

    for i in range(args.threads):
        t = threading.Thread(
            target=worker,
            args=(i, per_thread, args.batch, conn_str, success_queue, error_queue)
        )
        t.start()
        threads.append(t)

    for t in threads:
        t.join()

    end_time = time.time()
    elapsed = end_time - start_time

    # Calculate results
    success_count = 0
    while not success_queue.empty():
        success_count += success_queue.get()

    error_count = 0
    while not error_queue.empty():
        error_count += error_queue.get()

    qps = success_count / elapsed if elapsed > 0 else 0
    avg_latency = (elapsed * 1000 * args.threads) / (success_count / args.batch) if success_count > 0 else 0

    print("=== Results ===")
    print(f"Duration:      {elapsed:.2f} seconds")
    print(f"Success:       {success_count} points")
    print(f"Errors:        {error_count} points")
    print(f"QPS:           {qps:.2f} points/sec")
    print(f"Avg latency:   {avg_latency:.2f} ms/batch")
    print()

    if error_count > 0:
        print(f"⚠ Warning: {error_count} points failed to write")
        return 1

    print("✓ Benchmark completed successfully")
    return 0

if __name__ == '__main__':
    exit(main())
