import os
import logging
import traceback
import time

from tool_util import create_conn
from tool_util import get_resource_info
from tool_util import prometheus_push

from info_config import con_timeout
from info_config import pg_response_time_prometheus_metrics_name
from info_config import prometheus_url
from info_config import time_interval
# Get the current script path
script_path = os.path.dirname(os.path.abspath(__file__))
log_file = os.path.join(script_path, "pg_response_monitor_all.log")

logging.basicConfig(
    filename=log_file,
    level=logging.INFO,
    format="%(asctime)s - %(levelname)s - %(message)s",
)


def find_large_table(pg_host, pg_port):
    """Finds a large table (>2GB) and its database name in the PostgreSQL instance."""
    # Connect to the postgres database to get the list of databases
    conn = create_conn(pg_host, pg_port, 'postgres', timeout=con_timeout)
    if conn is None:
        logging.error("Could not connect to the postgres database to search for large tables.")
        return None, None
    
    try:
        with conn.cursor() as cursor:
            # Get all databases except template databases
            cursor.execute("SELECT datname FROM pg_database WHERE datistemplate = false and datname not in ('polardb_admin','rdsadmin','postgres');")
            databases = cursor.fetchall()

            for (dbname,) in databases:
                # Connect to each database to find a large table
                db_conn = create_conn(pg_host, pg_port, dbname, timeout=con_timeout)
                if db_conn is None:
                    logging.error(f"Could not connect to database {dbname}")
                    continue

                try:
                    with db_conn.cursor() as db_cursor:
                        db_cursor.execute("""
                            SELECT relname
                            FROM pg_class
                            WHERE relkind = 'r' AND pg_total_relation_size(oid) / 1024 / 1024 / 1024 > 2
                            LIMIT 1
                        """)
                        result = db_cursor.fetchone()
                        if result:
                            large_table = result[0]
                            logging.info(f"Found large table: {large_table} in database: {dbname}")
                            return dbname, large_table

                except Exception as e:
                    logging.error(f"Error searching for large table in {dbname}: {str(e)}")
                    traceback.print_exc()
                finally:
                    db_conn.close()

            logging.info("No table larger than 2GB found in any database.")
            return None, None

    except Exception as e:
        logging.error(f"Error in find_large_table: {str(e)}")
        traceback.print_exc()
        return None, None
    finally:
        conn.close()


def get_pg_response_time(pg_host, pg_port, dbname, large_table):
    """Connects to a specific database and retrieves response time for a query on the given large table."""
    conn = create_conn(pg_host, pg_port, dbname, timeout=con_timeout)
    if conn is None:
        logging.error(f"Could not connect to database {dbname}")
        return None
    
    try:
        with conn.cursor() as cursor:
            # Measure execution time for the query
            start_time = time.time()
            cursor.execute(f"SELECT * FROM {large_table} LIMIT 200;")
            cursor.fetchall()  # Fetch to complete execution
            response_time = (time.time() - start_time) * 1000  # Convert to milliseconds
            
            logging.info(f"Query executed in {response_time} seconds on {large_table} in {dbname}")
            return response_time

    except Exception as e:
        logging.error(f"Error in get_pg_response_time for {dbname}: {str(e)}")
        traceback.print_exc()
        return None
    finally:
        conn.close()


def pg_response_monitor(pg_info):
    """Monitors the PostgreSQL instance for response time on a specific large table."""
    id = pg_info[0]
    name = pg_info[1]
    pg_host = pg_info[2].get("host")
    pg_port = pg_info[2].get("port")
    
    if name.lower().startswith("wl-datapipeline-polardb2.0_data"):
        dbname = "prod_datastore01"
        large_table = "oss_file_request_queue_annotation"
    # Find a large table and the database it resides in
    else:
        dbname, large_table = find_large_table(pg_host, pg_port)
    if dbname and large_table:
        response_time = get_pg_response_time(pg_host, pg_port, dbname, large_table)
        if response_time is not None:
            metric_data = f'{pg_response_time_prometheus_metrics_name}{{instanceId="{id}" ,resourceName="{name}"}} {response_time}'
            prometheus_push(prometheus_url, metric_data)


def pg_response_monitor_all(pg_info_list):
    """Monitors all PostgreSQL instances listed in pg_info_list."""
    for pg_info in pg_info_list:
        try:
            logging.info(f"Start monitoring {pg_info}")
            pg_response_monitor(pg_info)
        except Exception as e:
            traceback.print_exc()


def main():
    try:
        pg_info_list = get_resource_info()
        pg_response_monitor_all(pg_info_list)
        time.sleep(time_interval)
    except Exception as e:
        logging.error(traceback.format_exc())

if __name__ == "__main__":
    main()
