import configparser
import ipaddress
import shutil
import sys
from os import mkdir
from os.path import exists
from os.path import join as path_join
from pathlib import Path
from threading import Lock

from sqlalchemy import Table, create_engine, MetaData, func
from sqlalchemy.exc import (
    IllegalStateChangeError,
    NoInspectionAvailable,
    NoSuchTableError,
)
from sqlalchemy.orm import sessionmaker, scoped_session
from nxc.loaders.protocolloader import ProtocolLoader
from nxc.logger import nxc_logger
from nxc.paths import WORKSPACE_DIR


def create_db_engine(db_path):
    return create_engine(f"sqlite:///{db_path}", isolation_level="AUTOCOMMIT", future=True)


def open_config(config_path):
    try:
        config = configparser.ConfigParser()
        config.read(config_path)
    except Exception as e:
        print(f"[-] Error reading nxc.conf: {e}")
        sys.exit(1)
    return config


def get_workspace(config):
    return config.get("nxc", "workspace")


def set_workspace(config_path, workspace_name):
    config = open_config(config_path)
    config.set("nxc", "workspace", workspace_name)
    write_configfile(config, config_path)
    print(f"[*] Workspace set to {workspace_name}")


def get_db(config):
    return config.get("nxc", "last_used_db")


def write_configfile(config, config_path):
    with open(config_path, "w") as configfile:
        config.write(configfile)


def init_protocol_dbs(workspace_name, p_loader=None):
    """Check for each protocol if the database exists, if not create it."""
    if p_loader is None:
        p_loader = ProtocolLoader()
    protocols = p_loader.get_protocols()
    for protocol in protocols:
        protocol_object = p_loader.load_protocol(protocols[protocol]["dbpath"])
        proto_db_path = path_join(WORKSPACE_DIR, workspace_name, f"{protocol}.db")

        if not exists(proto_db_path):
            print(f"[*] Initializing {protocol.upper()} protocol database")
            db_engine = create_db_engine(proto_db_path)
            protocol_object.database.db_schema(db_engine)
            db_engine.dispose()


def create_workspace(workspace_name, p_loader=None):
    """
    Create a new workspace with the given name.

    Args:
    ----
        workspace_name (str): The name of the workspace.

    Returns:
    -------
        None
    """
    if exists(path_join(WORKSPACE_DIR, workspace_name)):
        print(f"[-] Workspace {workspace_name} already exists")
    else:
        print(f"[*] Creating {workspace_name} workspace")
        mkdir(path_join(WORKSPACE_DIR, workspace_name))

    init_protocol_dbs(workspace_name, p_loader)


def delete_workspace(workspace_name):
    shutil.rmtree(path_join(WORKSPACE_DIR, workspace_name))
    print(f"[*] Workspace {workspace_name} deleted")


def initialize_db():
    if not exists(path_join(WORKSPACE_DIR, "default")):
        create_workspace("default")

    # Even if the default workspace exists, we still need to check if every protocol has a database (in case of a new protocol)
    init_protocol_dbs("default")


def format_host_query(q, filter_term, HostsTable):
    """One annoying thing is that if you search for an ip such as '10.10.10.5',
    it will return 10.10.10.5 and 10.10.10.52, so we have to check if its an ip address first
    """
    # the FTP and SSH protocols call the column host instead of IP
    # TODO: normalize these column names
    if hasattr(HostsTable.c, "ip"):
        ip_column = HostsTable.c.ip
        nxc_logger.debug("Using 'ip' column for filtering")
    elif hasattr(HostsTable.c, "host"):
        ip_column = HostsTable.c.host
        nxc_logger.debug("Using 'host' column for filtering")
    else:
        nxc_logger.debug("Neither 'ip' nor 'host' columns found in the table")
        return q

    # first we check if its an ip address
    try:
        ipaddress.ip_address(filter_term)
        nxc_logger.debug(f"filter_term is an IP address: {filter_term}")
        q = q.filter(ip_column == filter_term)
    except ValueError:
        nxc_logger.debug(f"filter_term is not an IP address: {filter_term}")
        like_term = func.lower(f"%{filter_term}%")

        # check if the hostname column exists for hostname searching
        q = q.filter(ip_column.like(like_term) | func.lower(HostsTable.c.hostname).like(like_term)) if hasattr(HostsTable.c, "hostname") else q.filter(ip_column.like(like_term))

    return q


class BaseDB:
    def __init__(self, db_engine):
        self.db_engine = db_engine
        self.db_path = self.db_engine.url.database
        self.protocol = Path(self.db_path).stem.upper()
        self.metadata = MetaData()
        self.reflect_tables()
        session_factory = sessionmaker(bind=self.db_engine, expire_on_commit=True)

        session = scoped_session(session_factory)
        self.sess = session()
        self.lock = Lock()

    def reflect_tables(self):
        raise NotImplementedError("Reflect tables not implemented")

    def reflect_table(self, table):
        with self.db_engine.connect():
            try:
                reflected_table = Table(table.__tablename__, self.metadata, autoload_with=self.db_engine)

                # Check for column addition / deletion
                reflected_columns = set(reflected_table.columns.keys())
                orm_columns = {column.name for column in table.__table__.columns}
                if reflected_columns != orm_columns:
                    raise ValueError(f"Schema mismatch detected! ORM columns: {orm_columns}, Reflected columns: {reflected_columns}")

                # Check for constraint changes
                reflected_constraints = [(type(c), c.columns.keys()) for c in reflected_table._sorted_constraints]
                orm_constraints = [(type(c), c.columns.keys()) for c in table.__table__._sorted_constraints]
                if reflected_constraints != orm_constraints:
                    raise ValueError(f"Schema mismatch detected! ORM constraints: {orm_constraints}, Reflected constraints: {reflected_constraints}")

                return reflected_table
            except (NoInspectionAvailable, NoSuchTableError, ValueError) as e:
                nxc_logger.fail(f"Schema mismatch detected for table '{table.__tablename__}' in protocol '{self.protocol}'")
                nxc_logger.debug(e)
                nxc_logger.fail("This is probably because a newer version of nxc is being run on an old DB schema.")
                nxc_logger.fail(f"Optionally save the old DB data (`cp {self.db_path} ~/nxc_{self.protocol.lower()}.bak`)")
                nxc_logger.fail(f"Then remove the {self.protocol} DB (`rm -f {self.db_path}`) and run nxc to initialize the new DB")
                sys.exit()

    def shutdown_db(self):
        try:
            self.sess.close()
        # due to the async nature of nxc, sometimes session state is a bit messy and this will throw:
        # Method 'close()' can't be called here; method '_connection_for_bind()' is already in progress and
        # this would cause an unexpected state change to <SessionTransactionState.CLOSED: 5>
        except IllegalStateChangeError as e:
            nxc_logger.debug(f"Error while closing session db object: {e}")

    def clear_database(self):
        for table in self.metadata.sorted_tables:
            self.db_execute(table.delete())

    def db_execute(self, *args):
        self.lock.acquire()
        res = self.sess.execute(*args)
        self.lock.release()
        return res
