from __future__ import annotations

from dataclasses import dataclass
from typing import Iterable, Tuple, List, Dict, Any, Optional
def _norm_name(name: Optional[str]) -> Optional[str]:
    if name is None:
        return None
    s = str(name)
    # strip common quotes/backticks
    if s.startswith('`') and s.endswith('`') and len(s) >= 2:
        s = s[1:-1]
    if s.startswith('"') and s.endswith('"') and len(s) >= 2:
        s = s[1:-1]
    return s


import sqlglot
from sqlglot import exp

# ---- Data type mapping: MySQL -> KWDB ----
MYSQL_TO_KWDB_TYPES: dict[str, str] = {
    # direct maps (non-unsigned, general)
    "BOOLEAN": "BOOL",
    "BOOL": "BOOL",
    "TINYINT(1)": "BOOL",
    "TINYINT": "INT2",
    "SMALLINT": "INT2",
    "MEDIUMINT": "INT4",
    "INT": "INT4",
    "INTEGER": "INT4",
    "BIGINT": "INT8",
    "FLOAT": "FLOAT4",
    "DOUBLE": "FLOAT8",
    "DATE": "TIMESTAMP",
    "DATETIME": "TIMESTAMP",
    "BINARY": "BYTEA",
    "VARBINARY": "VARBYTES",
    "LONGVARBINARY": "VARBYTES",
    "LONG VARBINARY": "VARBYTES",
    "BLOB": "BYTES",
    "MEDIUMBLOB": "BYTES",
    "LONGBLOB": "VARBYTES",
    "LONGTEXT": "TEXT",
}


# ---- Function mapping ----
FUNC_MAP: dict[str, str] = {
    "TRIM": "BTRIM",
    "POSITION": "STRPOS",
    "CURDATE": "CURRENT_DATE",
    "DATEDIFF": "AGE",
    "DAY": "EXTRACT",  # needs special handling
    "RAND": "RANDOM",
    "NOW": "TRANSACTION_TIMESTAMP",
}


@dataclass
class TransformResult:
    sql: str
    warnings: list[str]


@dataclass
class TransformSplitResult:
    ddl_sql: str
    dml_sql: str
    constraints_sql: str
    warnings: list[str]

def _is_unsigned(node: exp.DataType) -> bool:
    # sqlglot represents unsigned in different ways; try multiple
    if node.args.get("is_unsigned"):
        return True
    # Some versions store 'unsigned' as a boolean arg
    if node.args.get("unsigned"):
        return True
    # Fallback: check text
    return False


def _extract_enum_values(node: exp.DataType) -> list[str] | None:
    """Extract ENUM values from DataType node.
    Returns list of enum values as strings, or None if not an ENUM type.
    """
    # Check if this is an ENUM type
    name = None
    if isinstance(node.this, str):
        name = node.this.upper()
    elif hasattr(node.this, 'name'):
        name = node.this.name.upper()
    else:
        name = str(node.this).upper()
    
    if name != "ENUM":
        return None
    
    # Extract enum values from expressions
    # ENUM('value1', 'value2', ...) - values are in node.expressions
    enum_values = []
    if node.expressions:
        for expr in node.expressions:
            # Each expression should be a Literal string
            if isinstance(expr, exp.Literal) and expr.is_string:
                enum_values.append(expr.this)
            elif hasattr(expr, 'this') and isinstance(expr.this, exp.Literal) and expr.this.is_string:
                enum_values.append(expr.this.this)
            elif isinstance(expr, str):
                enum_values.append(expr)
            # Try to get value from args
            elif hasattr(expr, 'args') and 'this' in expr.args:
                val = expr.args['this']
                if isinstance(val, exp.Literal) and val.is_string:
                    enum_values.append(val.this)
                elif isinstance(val, str):
                    enum_values.append(val)
    
    return enum_values if enum_values else None


def _calculate_varchar_length_for_enum(enum_values: list[str]) -> int:
    """Calculate VARCHAR length for ENUM conversion.
    Formula: max(enum_value_lengths) + 10 (safety buffer)
    """
    if not enum_values:
        return 20  # Default fallback
    
    max_length = max(len(val) for val in enum_values)
    return max_length + 10


def _tinyint_one_is_bool(node: exp.DataType) -> bool:
    """Check if this is TINYINT(1) which should be converted to BOOL."""
    # Get type name
    if isinstance(node.this, str):
        name = node.this.upper()
    else:
        # Handle Type enum
        name = str(node.this).upper()
    
    if "TINYINT" not in name:
        return False
    
    # Check if there's exactly one expression parameter with value 1
    exprs = node.expressions or []
    if not exprs:
        return False
    
    # SQLGlot wraps literals in DataTypeParam
    param = exprs[0]
    try:
        # Get the literal value from DataTypeParam
        if hasattr(param, 'this'):
            # DataTypeParam.this is the Literal
            if isinstance(param.this, exp.Literal) and param.this.is_number:
                val = int(param.this.this)
                return val == 1
            # Sometimes it's directly the value
            elif param.this == 1:
                return True
        # Try args access
        if hasattr(param, 'args'):
            if 'this' in param.args:
                val_obj = param.args['this']
                if isinstance(val_obj, exp.Literal) and val_obj.is_number:
                    return int(val_obj.this) == 1
                elif val_obj == 1:
                    return True
    except (AttributeError, ValueError, TypeError):
        pass
    return False


def _rewrite_datatypes(expression: exp.Expression) -> None:
    for node in expression.find_all(exp.DataType):
        # Remove COLLATE clause from column definitions
        # COLLATE can be stored in different ways in sqlglot AST
        if hasattr(node, 'collate') and node.collate is not None:
            node.set("collate", None)
        if hasattr(node, 'args') and 'collate' in node.args:
            node.args.pop('collate', None)
        
        # Handle both string and enum types
        if isinstance(node.this, str):
            name = node.this.upper()
        elif hasattr(node.this, 'name'):
            # For enum types (e.g., Type.MEDIUMBLOB), use the name attribute
            name = node.this.name.upper()
        else:
            name = str(node.this).upper()

        # Heuristic: sqlglot may normalize BLOB types to VARBINARY; check original MySQL text
        # to distinguish BLOB/MEDIUMBLOB/LONGBLOB from actual VARBINARY
        try:
            original_dt_sql = node.sql(dialect="mysql").upper()
        except Exception:
            original_dt_sql = ""
        
        # Handle BLOB types - sqlglot normalizes them to VARBINARY, but we need to distinguish
        # Check the original enum name to identify BLOB types
        if hasattr(node.this, 'name'):
            enum_name = node.this.name.upper()
            if "LONGBLOB" in enum_name:
                node.set("this", "VARBYTES")
                node.set("expressions", [])
                continue
            elif "MEDIUMBLOB" in enum_name:
                node.set("this", "BYTES")
                node.set("expressions", [])
                continue
            elif "BLOB" in enum_name and "MEDIUMBLOB" not in enum_name and "LONGBLOB" not in enum_name:
                node.set("this", "BYTES")
                node.set("expressions", [])
                continue
        
        # Handle VARBINARY types (only if not already handled as BLOB above)
        if name == "VARBINARY" or "VARBINARY" in original_dt_sql or "LONG VARBINARY" in original_dt_sql or "LONGVARBINARY" in original_dt_sql:
            node.set("this", "VARBYTES")
            node.set("expressions", [])
            continue

        # Handle ENUM -> VARCHAR(n) + CHECK constraint
        # Store enum values in node metadata for later CHECK constraint generation
        enum_values = _extract_enum_values(node)
        if enum_values is not None:
            # Calculate VARCHAR length: max(enum_value_lengths) + 10
            varchar_length = _calculate_varchar_length_for_enum(enum_values)
            # Convert ENUM to VARCHAR(n)
            node.set("this", "VARCHAR")
            node.set("expressions", [exp.Literal.number(varchar_length)])
            # Store enum values in node metadata for CHECK constraint generation
            node.args["__enum_values"] = enum_values
            continue

        # Special-case: TINYINT(1) -> BOOL
        if _tinyint_one_is_bool(node):
            node.set("this", "BOOL")
            node.set("expressions", [])
            continue

        # Unsigned handling
        if _is_unsigned(node):
            if name == "TINYINT":
                node.set("this", "INT2")
                node.set("expressions", [])
                continue
            if name == "SMALLINT":
                node.set("this", "INT4")
                node.set("expressions", [])
                continue
            if name in ("MEDIUMINT",):
                node.set("this", "INT4")
                node.set("expressions", [])
                continue
            if name in ("INT", "INTEGER"):
                node.set("this", "INT8")
                node.set("expressions", [])
                continue
            if name == "BIGINT":
                # Use NUMERIC(20)
                node.set("this", "NUMERIC")
                node.set("expressions", [exp.Literal.number(20)])
                continue

        # Direct simple mapping
        mapped = MYSQL_TO_KWDB_TYPES.get(name)
        if mapped:
            node.set("this", mapped)
            # Clear length parameters for types that don't support them in KWDB
            # But keep length for BINARY -> BYTEA (BINARY(10) -> BYTEA(10))
            if mapped in ("VARBYTES", "BYTES", "TEXT"):
                # VARBYTES, BYTES, TEXT don't support length parameters
                node.set("expressions", [])
            elif mapped == "BYTEA" and name != "BINARY":
                # BYTEA from BLOB/MEDIUMBLOB should not have length, but BINARY(10) -> BYTEA(10) should keep length
                node.set("expressions", [])


def _rewrite_functions(expression: exp.Expression) -> None:
    for func in expression.find_all(exp.Func):
        name = func.name.upper()
        if name in ("DAY",):
            # DAY(x) -> EXTRACT('day' FROM x)
            arg = func.expressions[0] if func.expressions else exp.Null()
            func.replace(
                exp.Extract(this=exp.Var(this="day"), expression=arg)
            )
            continue
        mapped = FUNC_MAP.get(name)
        if mapped:
            func.set("this", exp.Identifier(this=mapped))


def _rewrite_create_database(expr: exp.Create) -> None:
    # Remove COLLATE, convert CHARACTER SET → ENCODING
    if not isinstance(expr.this, exp.Schema):
        return
    # Options live under properties
    new_properties = []
    encoding_seen = False
    props = expr.args.get("properties")
    prop_list = list(props.expressions) if isinstance(props, exp.Properties) else list(props or [])
    for prop in prop_list:
        if isinstance(prop, exp.CollateProperty):
            # drop
            continue
        if isinstance(prop, exp.CharacterSetProperty):
            val = prop.this.name if isinstance(prop.this, exp.Identifier) else prop.this
            new_properties.append(exp.Property(this=exp.Var(this="ENCODING"), value=exp.Literal.string(str(val).upper())))
            encoding_seen = True
            continue
        new_properties.append(prop)
    if new_properties:
        expr.set("properties", exp.Properties(expressions=new_properties))
    else:
        expr.set("properties", None)
    if not encoding_seen:
        # no-op; KWDB default
        pass

    # Ensure IF NOT EXISTS on CREATE DATABASE
    try:
        # sqlglot Create supports an 'exists' flag for IF NOT EXISTS
        expr.set("exists", True)
    except Exception:
        # Best-effort; downstream SQL post-process will also handle if needed
        pass


def _rewrite_create_table(expr: exp.Create) -> None:
    # Basic rules: AUTO_INCREMENT -> SEQUENCE + nextval(); remove ENGINE/CHARSET table options
    # Extract all comments and generate COMMENT ON statements separately
    if not isinstance(expr.this, exp.Schema):
        return
    columns = list(expr.this.expressions or [])
    auto_inc_column_name: str | None = None
    auto_inc_column: exp.ColumnDef | None = None
    column_comments: Dict[str, str] = {}  # {column_name: comment_text}
    enum_check_constraints: List[Tuple[str, str, list[str]]] = []  # [(table_name, col_name, enum_values)]
    on_update_columns: List[Tuple[str, str]] = []  # [(table_name, col_name)] for ON UPDATE CURRENT_TIMESTAMP
    
    for col in columns:
        if isinstance(col, exp.ColumnDef):
            # AUTO_INCREMENT -> will use SEQUENCE + nextval()
            constraints = list(col.args.get("constraints") or [])
            # Check both direct AutoIncrementColumnConstraint and ColumnConstraint with AutoIncrement kind
            auto = None
            for c in constraints:
                if isinstance(c, exp.AutoIncrementColumnConstraint):
                    auto = c
                    break
                elif isinstance(c, exp.ColumnConstraint) and hasattr(c, 'kind'):
                    if isinstance(c.kind, exp.AutoIncrementColumnConstraint):
                        auto = c
                        break
            if auto:
                # Don't convert to SERIAL; keep INT and we'll add DEFAULT nextval() later
                # Just mark this column for SEQUENCE creation
                constraints.remove(auto)
                # Remember which column had AUTO_INCREMENT
                if isinstance(col.this, exp.Identifier):
                    auto_inc_column_name = col.this.name if hasattr(col.this, "name") else str(col.this.this)
                    auto_inc_column = col

            # Extract column comments and remove from CREATE TABLE
            # Get column name
            col_name = None
            if isinstance(col.this, exp.Identifier):
                col_name = col.this.name if hasattr(col.this, "name") else str(col.this.this)
            elif isinstance(col.this, exp.Literal) and col.this.is_string:
                col_name = str(col.this.this)
            
            # Get column data type (will be used for multiple checks)
            dtype = col.args.get("kind")  # ColumnDef.kind is the DataType
            
            # Check if this column has ENUM type (converted to VARCHAR with __enum_values metadata)
            # The ENUM -> VARCHAR conversion happens in _rewrite_datatypes, which stores enum values
            # in the DataType node's __enum_values metadata
            if isinstance(dtype, exp.DataType) and dtype.args.get("__enum_values"):
                enum_values = dtype.args.get("__enum_values")
                if enum_values and col_name:
                    # Get table name for CHECK constraint
                    table_name = expr.this.this if hasattr(expr.this, "this") else None
                    if table_name:
                        table_name_clean = str(table_name).strip('"`')
                        col_name_clean = str(col_name).strip('"`')
                        enum_check_constraints.append((table_name_clean, col_name_clean, enum_values))
            
            # Check for ON UPDATE CURRENT_TIMESTAMP
            # Try to detect it from the column's original MySQL SQL representation
            has_on_update = False
            try:
                # Get the original MySQL SQL for this column to detect ON UPDATE
                col_sql = col.sql(dialect="mysql").upper()
                if "ON UPDATE" in col_sql and ("CURRENT_TIMESTAMP" in col_sql or "NOW()" in col_sql):
                    has_on_update = True
            except Exception:
                pass
            
            # Check if this column is BOOL type (converted from TINYINT(1))
            # If so, we need to convert DEFAULT '1' to DEFAULT 'true' and DEFAULT '0' to DEFAULT 'false'
            # Note: _rewrite_datatypes is called before _rewrite_create_table, so the type should already be converted
            is_bool_column = False
            if isinstance(dtype, exp.DataType):
                # Check if the type is BOOL (could be converted from TINYINT(1))
                type_name = None
                if isinstance(dtype.this, str):
                    type_name = dtype.this.upper()
                elif hasattr(dtype.this, 'name'):
                    type_name = dtype.this.name.upper()
                else:
                    type_name = str(dtype.this).upper()
                
                if type_name == "BOOL":
                    is_bool_column = True
                # Also check if it was originally TINYINT(1) by checking the original SQL
                elif _tinyint_one_is_bool(dtype):
                    is_bool_column = True
            
            new_constraints = []
            for c in constraints:
                # Extract CommentColumnConstraint
                if hasattr(exp, "CommentColumnConstraint") and isinstance(c, exp.CommentColumnConstraint):
                    if hasattr(c, "this") and isinstance(c.this, exp.Literal) and c.this.is_string:
                        if col_name:
                            column_comments[col_name] = c.this.this
                    continue
                elif isinstance(c, exp.ColumnConstraint) and hasattr(c, "kind"):
                    if hasattr(exp, "CommentColumnConstraint") and isinstance(c.kind, exp.CommentColumnConstraint):
                        if hasattr(c.kind, "this") and isinstance(c.kind.this, exp.Literal) and c.kind.this.is_string:
                            if col_name:
                                column_comments[col_name] = c.kind.this.this
                        continue
                
                # Convert DEFAULT '1'/'0' to 'true'/'false' for BOOL columns
                # Handle both direct DefaultColumnConstraint and ColumnConstraint wrapping DefaultColumnConstraint
                default_constraint = None
                if isinstance(c, exp.DefaultColumnConstraint):
                    default_constraint = c
                elif isinstance(c, exp.ColumnConstraint) and hasattr(c, "kind") and isinstance(c.kind, exp.DefaultColumnConstraint):
                    default_constraint = c.kind
                
                if is_bool_column and default_constraint:
                    default_value = default_constraint.this
                    # Check if default value is '1' or '0' (as string or number)
                    should_convert = False
                    new_value = None
                    
                    if isinstance(default_value, exp.Literal):
                        # Handle both string and numeric literals
                        # Note: MySQL may parse '1' as a numeric literal with is_string=True
                        val_to_check = None
                        if default_value.is_string:
                            # String literal: '1' or '0'
                            val_to_check = str(default_value.this).strip("'\"")
                        elif default_value.is_number:
                            # Numeric literal: 1 or 0
                            try:
                                val_to_check = str(int(default_value.this))
                            except (ValueError, TypeError):
                                pass
                        else:
                            # Try to get the value directly
                            try:
                                val_to_check = str(default_value.this).strip("'\"")
                            except:
                                pass
                        
                        if val_to_check == '1':
                            should_convert = True
                            new_value = exp.Literal.string('true')
                        elif val_to_check == '0':
                            should_convert = True
                            new_value = exp.Literal.string('false')
                    elif isinstance(default_value, str):
                        # Direct string value
                        val_str = str(default_value).strip("'\"")
                        if val_str == '1':
                            should_convert = True
                            new_value = exp.Literal.string('true')
                        elif val_str == '0':
                            should_convert = True
                            new_value = exp.Literal.string('false')
                    elif isinstance(default_value, (int, float)):
                        # Direct numeric value
                        if default_value == 1:
                            should_convert = True
                            new_value = exp.Literal.string('true')
                        elif default_value == 0:
                            should_convert = True
                            new_value = exp.Literal.string('false')
                    
                    if should_convert and new_value:
                        # Create a new DefaultColumnConstraint with the converted value
                        new_default_constraint = exp.DefaultColumnConstraint(this=new_value)
                        # If original was wrapped in ColumnConstraint, wrap the new one too
                        if isinstance(c, exp.ColumnConstraint):
                            new_constraint = exp.ColumnConstraint(kind=new_default_constraint)
                            new_constraints.append(new_constraint)
                        else:
                            new_constraints.append(new_default_constraint)
                        continue
                
                # Keep all other constraints (ON UPDATE will be removed in SQL post-processing)
                new_constraints.append(c)
            
            # If ON UPDATE CURRENT_TIMESTAMP was detected, mark this column for trigger generation
            if has_on_update and col_name:
                table_name = expr.this.this if hasattr(expr.this, "this") else None
                if table_name:
                    table_name_clean = str(table_name).strip('"`')
                    col_name_clean = str(col_name).strip('"`')
                    on_update_columns.append((table_name_clean, col_name_clean))
            
            col.set("constraints", new_constraints)

    # Strip table-level options like ENGINE, CHARSET if any
    props_accum = []
    props = expr.args.get("properties")
    prop_list = list(props.expressions) if isinstance(props, exp.Properties) else list(props or [])
    auto_inc_next_value: int | None = None
    table_comment: str | None = None
    for prop in prop_list:
        if isinstance(prop, (exp.EngineProperty, exp.CharacterSetProperty, exp.CollateProperty)):
            continue
        # Handle dialect-specific table comment node
        if hasattr(exp, "SchemaCommentProperty") and isinstance(prop, exp.SchemaCommentProperty):
            val = None
            if isinstance(prop.this, exp.Literal) and prop.this.is_string:
                val = prop.this.this
            elif hasattr(prop, "value") and isinstance(prop.value, exp.Literal) and prop.value.is_string:
                val = prop.value.this
            if val is not None:
                table_comment = val
                # drop from CREATE
                continue
        # Capture table option AUTO_INCREMENT = N
        if hasattr(exp, "AutoIncrementProperty") and isinstance(prop, exp.AutoIncrementProperty):
            try:
                if isinstance(prop.this, exp.Literal) and prop.this.is_number:
                    auto_inc_next_value = int(prop.this.this)
            except Exception:
                pass
            # Do not carry this property forward
            continue
        if isinstance(prop, exp.Property):
            key = prop.this.this.upper() if hasattr(prop.this, "this") else str(prop.this).upper()
            if key == "AUTO_INCREMENT":
                try:
                    val = prop.args.get("value") or prop.args.get("this")
                    if isinstance(val, exp.Literal) and val.is_number:
                        auto_inc_next_value = int(val.this)
                except Exception:
                    pass
                # Do not carry this property forward
                continue
            if key == "COMMENT":
                # Table-level comment, capture and drop from CREATE
                val = prop.args.get("value") or prop.args.get("this")
                if isinstance(val, exp.Literal) and val.is_string:
                    table_comment = val.this
                continue
        props_accum.append(prop)
    # Do NOT append table-level COMMENT to CREATE TABLE - we'll generate COMMENT ON TABLE separately
    if props_accum:
        expr.set("properties", exp.Properties(expressions=props_accum))
    else:
        expr.set("properties", None)

    # Store comments metadata for downstream COMMENT ON statement generation
    if column_comments or table_comment:
        table_name = expr.this.this if hasattr(expr.this, "this") else None
        if table_name:
            table_name_clean = str(table_name).strip('"`')
            expr.args["__table_comments"] = {
                "table_name": table_name_clean,
                "table_comment": table_comment,
                "column_comments": column_comments
            }
    
    # Store ENUM CHECK constraints metadata for downstream generation
    if enum_check_constraints:
        expr.args["__enum_check_constraints"] = enum_check_constraints
    
    # Store ON UPDATE CURRENT_TIMESTAMP columns metadata for trigger generation
    if on_update_columns:
        expr.args["__on_update_columns"] = on_update_columns

    # Store sequence metadata on the node for downstream emission
    # Also add DEFAULT nextval() directly to the AST to avoid regex post-processing
    if auto_inc_column_name and isinstance(expr.this, exp.Schema) and auto_inc_column:
        table_name = expr.this.this if hasattr(expr.this, "this") else None
        if table_name:
            # Clean table and column names (remove quotes)
            table_name_clean = str(table_name).strip('"`')
            col_name_clean = str(auto_inc_column_name).strip('"`')
            
            start_value = int(auto_inc_next_value) if auto_inc_next_value else 1
            expr.args["__create_sequence"] = (table_name_clean, col_name_clean, start_value)
            
            # Add DEFAULT nextval() directly to the column in AST
            # This avoids complex regex post-processing
            seq_name = f"{table_name_clean}_{col_name_clean}_seq"
            # Check if DEFAULT already exists
            has_default = False
            col_constraints = list(auto_inc_column.args.get("constraints") or [])
            for c in col_constraints:
                if isinstance(c, exp.DefaultColumnConstraint):
                    has_default = True
                    break
            
            if not has_default:
                # Add DEFAULT nextval('sequence_name') constraint
                # Use sqlglot.parse_one to create the function call expression
                import sqlglot
                nextval_sql = f"nextval('{seq_name}')"
                try:
                    nextval_call = sqlglot.parse_one(nextval_sql, read="postgres")
                except Exception:
                    # Fallback: create Func expression manually
                    nextval_call = exp.Func(
                        this=exp.Identifier(this="nextval"),
                        expressions=[exp.Literal.string(seq_name)]
                    )
                default_constraint = exp.DefaultColumnConstraint(this=nextval_call)
                col_constraints.append(default_constraint)
                auto_inc_column.set("constraints", col_constraints)
    # Table comment will be handled in SQL post-processing to use COMMENT = '...' syntax



def _collect_table_schemas(statements: Iterable[sqlglot.Expression]) -> Dict[str, Dict[str, Any]]:
    """Collect table column order, which columns are BOOL (from TINYINT(1)), and which are JSON.
    Returns: { table_name: { "order": [col1, ...], "bool_cols": set([col,...]), "json_cols": set([col,...]) } }
    """
    schemas: Dict[str, Dict[str, Any]] = {}
    for st in statements:
        if isinstance(st, exp.Create) and st.args.get("kind") and st.args["kind"].upper() == "TABLE" and isinstance(st.this, exp.Schema):
            table_name_raw = st.this.this if hasattr(st.this, "this") else None
            table_name = _norm_name(table_name_raw)
            if not table_name:
                continue
            order: List[str] = []
            bool_cols: set[str] = set()
            json_cols: set[str] = set()
            for child in list(st.this.expressions or []):
                if isinstance(child, exp.ColumnDef) and isinstance(child.this, exp.Identifier):
                    col_name_raw = child.this.this if hasattr(child.this, "this") else child.this.name
                    col_name = _norm_name(col_name_raw)
                    if col_name is None:
                        continue
                    order.append(col_name.lower())
                    dtype = child.kind if hasattr(child, "kind") else None
                    if isinstance(dtype, exp.DataType):
                        # Check for BOOL (from TINYINT(1))
                        if _tinyint_one_is_bool(dtype):
                            bool_cols.add(col_name.lower())
                        # Check for JSON type
                        type_name = None
                        if isinstance(dtype.this, str):
                            type_name = dtype.this.upper()
                        elif hasattr(dtype.this, 'name'):
                            type_name = dtype.this.name.upper()
                        else:
                            type_name = str(dtype.this).upper()
                        if type_name == "JSON":
                            json_cols.add(col_name.lower())
            schemas[table_name.lower()] = {"order": order, "bool_cols": bool_cols, "json_cols": json_cols}
    return schemas


def _format_index_column_token(token: str) -> str:
    """Format a column token for index/constraint column lists.
    Keeps ASC/DESC and drops NULLS FIRST/LAST, ensuring only the identifier is quoted once.
    """
    import re
    s = token.strip().rstrip(",")
    # Strip any NULLS FIRST/LAST qualifier
    s = re.sub(r"\bNULLS\s+(FIRST|LAST)\b", "", s, flags=re.IGNORECASE)
    m = re.match(r'^\s*[`\"]?([\w\.]+)[`\"]?\s*(?:(ASC|DESC))?\s*$', s, flags=re.IGNORECASE)
    if not m:
        # Fallback: split by space, quote first token only
        parts = s.split()
        if not parts:
            return s
        ident = parts[0].strip('`"')
        tail = ' ' + ' '.join(parts[1:]) if len(parts) > 1 else ''
        return f'"{ident}"{tail}'
    ident = m.group(1)
    order = m.group(2)
    out = f'"{ident}"'
    if order:
        out += f' {order.upper()}'
    return out



def _generate_kwdb_sql(expression: exp.Expression) -> str:
    """
    Generate KWDB SQL directly from AST, avoiding PostgreSQL dialect output that may override KWDB types.
    This function ensures KWDB-specific type names are preserved.
    
    Strategy:
    1. We've already modified AST types to KWDB types in _rewrite_datatypes
    2. Use PostgreSQL dialect as base (KWDB is PostgreSQL-compatible)
    3. Only do lightweight type name fixes (PostgreSQL may normalize BOOL->BOOLEAN, INT4->INTEGER)
    """
    import re
    # Column comments are now extracted in _rewrite_create_table and will be generated
    # as separate COMMENT ON COLUMN statements, so we don't need to handle them here
    
    # Use PostgreSQL dialect as base, but we'll fix type names afterward
    # The key is that we've already modified the AST types to KWDB types in _rewrite_datatypes
    # So PostgreSQL dialect should output them correctly, but we need to ensure type name consistency
    try:
        sql = expression.sql(dialect="postgres")
    except Exception:
        # Fallback to MySQL dialect if PostgreSQL fails
        try:
            sql = expression.sql(dialect="mysql")
        except Exception:
            return str(expression)
    
    # Ensure table names with uppercase letters are quoted to preserve case
    # This is critical for matching INSERT statements that use quoted table names
    if isinstance(expression, exp.Create) and expression.args.get("kind") and expression.args["kind"].upper() == "TABLE":
        table_name = expression.this.this if hasattr(expression.this, "this") else None
        if table_name:
            table_name_str = str(table_name).strip('"`')
            # If table name contains uppercase letters, ensure it's quoted
            if any(c.isupper() for c in table_name_str):
                # Replace unquoted table name with quoted version
                # Pattern: CREATE TABLE table_name or CREATE TABLE "table_name"
                pattern = rf'CREATE\s+TABLE\s+["`]?{re.escape(table_name_str)}["`]?'
                replacement = f'CREATE TABLE "{table_name_str}"'
                sql = re.sub(pattern, replacement, sql, flags=re.IGNORECASE, count=1)
        
        # Column comments are now generated as separate COMMENT ON COLUMN statements
        # in transform_expression, so we don't add them back to CREATE TABLE here
    
    # Ensure KWDB type names are used (PostgreSQL dialect may normalize some types)
    # This is a lightweight post-processing that only fixes type names, not complex syntax
    # Order matters: INTEGER must be replaced before INT to avoid partial matches
    # Based on MySQL to KaiwuDB type mapping table
    type_fixes = [
        # Boolean types
        (r"\bBOOLEAN\b", "BOOL"),  # PostgreSQL may output BOOLEAN instead of BOOL
        
        # Integer types (order matters: INTEGER before INT)
        (r"\bINTEGER\b", "INT4"),  # PostgreSQL may output INTEGER instead of INT4 (must be before INT)
        (r"\bSMALLINT\b", "INT2"),  # PostgreSQL may output SMALLINT instead of INT2
        (r"\bMEDIUMINT\b", "INT4"),  # PostgreSQL may output MEDIUMINT instead of INT4
        (r"\bBIGINT\b", "INT8"),  # PostgreSQL may output BIGINT instead of INT8
        (r"\bINT\b", "INT4"),  # PostgreSQL may output INT instead of INT4 (after INTEGER to avoid partial match)
        
        # Floating point types
        (r"\bREAL\b", "FLOAT4"),  # PostgreSQL may output REAL instead of FLOAT4
        (r"\bDOUBLE\s+PRECISION\b", "FLOAT8"),  # PostgreSQL may output DOUBLE PRECISION instead of FLOAT8
        
        # Date/Time types (MySQL DATE/DATETIME -> KWDB TIMESTAMP)
        (r"\bDATE\b", "TIMESTAMP"),  # MySQL DATE -> KWDB TIMESTAMP
        (r"\bDATETIME\b", "TIMESTAMP"),  # MySQL DATETIME -> KWDB TIMESTAMP
        # Note: TIMESTAMP and TIME remain as-is (TIMESTAMP->TIMESTAMP, TIME->TIME)
        
        # Binary/Blob types (PostgreSQL may output BLOB instead of BYTES)
        (r"\bBLOB\b", "BYTES"),  # PostgreSQL may output BLOB instead of BYTES (for BLOB/MEDIUMBLOB)
    ]
    for pattern, replacement in type_fixes:
        sql = re.sub(pattern, replacement, sql)
    
    # Handle BYTEA -> BYTES conversion for BLOB types (but keep BYTEA for BINARY with length)
    # Replace BYTEA that is not followed by a length parameter (i.e., not BYTEA(...))
    # This handles BLOB/MEDIUMBLOB which should be BYTES, while BINARY(10) stays as BYTEA(10)
    sql = re.sub(r'\bBYTEA\b(?!\s*\()', 'BYTES', sql)
    
    # Remove COLLATE clauses from column definitions
    # Pattern: COLLATE collation_name (where collation_name can be quoted or unquoted)
    # This should match: COLLATE utf8mb4_unicode_ci, COLLATE "utf8mb4_unicode_ci", etc.
    # Collation names can contain letters, numbers, underscores, and hyphens
    # Match: COLLATE followed by optional quotes, collation name (with underscores/hyphens), optional quotes, and whitespace
    # Example: "uuid" CHAR(36) COLLATE utf8mb4_unicode_ci NOT NULL -> "uuid" CHAR(36) NOT NULL
    sql = re.sub(r'\s+COLLATE\s+(?:["\']?[\w_-]+["\']?)\s*', ' ', sql, flags=re.IGNORECASE)
    # Clean up any multiple spaces that might result (but preserve single spaces)
    sql = re.sub(r'  +', ' ', sql)
    
    return sql


def transform_expression(expression: exp.Expression, table_schemas: Optional[Dict[str, Dict[str, Any]]] = None) -> Tuple[str, list[str], list[str]]:
    warnings: list[str] = []
    extra_sql: list[str] = []
    _rewrite_datatypes(expression)
    _rewrite_functions(expression)

    # Handle MySQL USE db: emit as-is (KWDB supports USE database)
    try:
        mysql_sql = expression.sql(dialect="mysql").strip()
    except Exception:
        mysql_sql = ""
    if mysql_sql.upper().startswith("USE "):
        import re
        m = re.match(r"USE\s+[`\"]?([A-Za-z0-9_]+)[`\"]?", mysql_sql, flags=re.IGNORECASE)
        db = m.group(1) if m else None
        if db:
            # Emit USE directly
            return f'USE {db}', warnings, extra_sql

    # Normalize DROP DATABASE to KWDB syntax consistently with DROP TABLE handling
    # (quote identifiers, ensure IF EXISTS is preserved/added if present)
    if isinstance(expression, exp.Drop):
        up = (mysql_sql or "").upper()
        if up.startswith("DROP DATABASE"):
            import re
            # Extract optional IF EXISTS and database name
            m = re.match(r"DROP\s+DATABASE\s+(IF\s+EXISTS\s+)?[`\"]?([A-Za-z0-9_]+)[`\"]?", mysql_sql, flags=re.IGNORECASE)
            if m:
                has_if_exists = bool(m.group(1))
                dbname = m.group(2)
                # Always append CASCADE as requested
                kw = f'DROP DATABASE {"IF EXISTS " if has_if_exists else ""}"{dbname}" CASCADE;'
                return kw, warnings, extra_sql
            # Fallback to postgres dialect rendering
            try:
                return expression.sql(dialect="postgres"), warnings, extra_sql
            except Exception:
                pass

    if isinstance(expression, exp.Create):
        kind = expression.args.get("kind")
        if kind and kind.upper() == "DATABASE":
            _rewrite_create_database(expression)
        elif kind and kind.upper() == "TABLE":
            _rewrite_create_table(expression)
            # Create SEQUENCE for AUTO_INCREMENT columns
            seq_meta = expression.args.get("__create_sequence")
            if seq_meta:
                table_name, col_name, start_val = seq_meta
                # Names should already be cleaned, but strip quotes just in case
                table_name = str(table_name).strip('"`')
                col_name = str(col_name).strip('"`')
                seq_name = f"{table_name}_{col_name}_seq"
                extra_sql.append(f"CREATE SEQUENCE {seq_name} START WITH {start_val};")
            
            # Generate COMMENT ON statements for table and columns
            comments_meta = expression.args.get("__table_comments")
            if comments_meta:
                table_name_clean = comments_meta["table_name"]
                table_comment = comments_meta.get("table_comment")
                column_comments = comments_meta.get("column_comments", {})
                
                # Generate COMMENT ON TABLE
                if table_comment:
                    # Escape single quotes in comment
                    escaped_comment = table_comment.replace("'", "''")
                    extra_sql.append(f"COMMENT ON TABLE {table_name_clean} IS '{escaped_comment}';")
                
                # Generate COMMENT ON COLUMN for each column
                for col_name, comment_text in column_comments.items():
                    # Escape single quotes in comment
                    escaped_comment = comment_text.replace("'", "''")
                    extra_sql.append(f"COMMENT ON COLUMN {table_name_clean}.{col_name} IS '{escaped_comment}';")
            
            # Generate CHECK constraints for ENUM columns
            enum_checks = expression.args.get("__enum_check_constraints")
            if enum_checks:
                for table_name_clean, col_name_clean, enum_values in enum_checks:
                    # Build CHECK constraint: column_name IN ('value1', 'value2', ...)
                    # Escape single quotes in enum values
                    escaped_values = []
                    for val in enum_values:
                        escaped_val = val.replace("'", "''")
                        escaped_values.append(f"'{escaped_val}'")
                    values_list = ", ".join(escaped_values)
                    check_sql = f"ALTER TABLE {table_name_clean} ADD CONSTRAINT chk_{table_name_clean}_{col_name_clean}_enum CHECK ({col_name_clean} IN ({values_list}));"
                    extra_sql.append(check_sql)
            
            # Generate triggers for ON UPDATE CURRENT_TIMESTAMP columns
            # KWDB trigger syntax: CREATE TRIGGER trigger_name trigger_time trigger_event ON tbl_name FOR EACH ROW trigger_body
            # trigger_body supports using NEW.col_name to reference updated row data
            # For BEGIN...END block, use DELIMITER to change statement terminator
            on_update_cols = expression.args.get("__on_update_columns")
            if on_update_cols:
                # Create column-specific trigger for each column
                for table_name_clean, col_name_clean in on_update_cols:
                    # Create trigger using KWDB native syntax with DELIMITER
                    trigger_name = f"update_{table_name_clean}_{col_name_clean}"
                    # KWDB trigger body: use DELIMITER \\, BEGIN/END block, then restore DELIMITER
                    # Use SET NEW.column = value syntax
                    # In Python f-string, \\\\ produces two backslashes in output
                    trigger_sql = f"""DELIMITER \\\\
CREATE TRIGGER {trigger_name}
BEFORE UPDATE ON {table_name_clean} FOR EACH ROW
BEGIN
    SET NEW.{col_name_clean} = CURRENT_TIMESTAMP;
END \\\\
DELIMITER;"""
                    extra_sql.append(trigger_sql)

    if isinstance(expression, exp.Insert):
        # Bypass INSERT transformation: emit original MySQL text
        return expression.sql(dialect="mysql"), warnings, extra_sql

    # Generate KWDB SQL using custom generator (ensures type names are correct)
    sql = _generate_kwdb_sql(expression)
    # Ensure CREATE DATABASE has IF NOT EXISTS
    if isinstance(expression, exp.Create) and expression.args.get("kind") and expression.args["kind"].upper() == "DATABASE":
        import re
        if not re.search(r"CREATE\s+DATABASE\s+IF\s+NOT\s+EXISTS", sql, flags=re.IGNORECASE):
            sql = re.sub(r"CREATE\s+DATABASE\s+", "CREATE DATABASE IF NOT EXISTS ", sql, flags=re.IGNORECASE, count=1)
    # Minimal post-processing for CREATE TABLE (most conversion is done in AST)
    if isinstance(expression, exp.Create) and expression.args.get("kind") and expression.args["kind"].upper() == "TABLE":
        import re
        # Only strip AUTO_INCREMENT=N if PostgreSQL dialect outputs it (shouldn't happen, but defensive)
        sql = re.sub(r"\)\s*AUTO_INCREMENT=\d+", ")", sql)
        
        # Column and table comments are now generated as separate COMMENT ON statements
        # in extra_sql, so we don't need to handle them here
        
        # Strip any WITH (COMMENT='...') that PostgreSQL dialect might output
        sql = re.sub(r"\s+WITH\s*\(\s*COMMENT\s*=\s*'[^']+'\s*\)", "", sql, flags=re.IGNORECASE)
        
        # Remove ON UPDATE CURRENT_TIMESTAMP clauses from column definitions
        # Pattern: ON UPDATE CURRENT_TIMESTAMP (case insensitive)
        # This should match: DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
        sql = re.sub(r'\s+ON\s+UPDATE\s+CURRENT_TIMESTAMP', '', sql, flags=re.IGNORECASE)
        # Also handle variations like ON UPDATE NOW()
        sql = re.sub(r'\s+ON\s+UPDATE\s+NOW\s*\(\s*\)', '', sql, flags=re.IGNORECASE)
        # Clean up any double spaces that might result
        sql = re.sub(r'  +', ' ', sql)

        return sql, warnings, extra_sql


def transform_statements(statements: Iterable[sqlglot.Expression], table_schemas: Optional[Dict[str, Any]] = None) -> TransformResult:
    out_lines: list[str] = []
    warnings: list[str] = []
    # Use provided table_schemas or collect if not provided (for backward compatibility)
    if table_schemas is None:
        table_schemas = _collect_table_schemas(statements)
    for stmt in statements:
        res = transform_expression(stmt, table_schemas)
        if not res:
            # Defensive fallback: render via postgres dialect without extras
            try:
                sql = stmt.sql(dialect="postgres")
            except Exception:
                sql = stmt.sql(dialect="mysql")
            w = []
            extra = []
        else:
            sql, w, extra = res
        # For CREATE TABLE, add sequences BEFORE the CREATE TABLE statement
        # and COMMENT ON / CHECK constraints / TRIGGERS AFTER the CREATE TABLE statement
        # For other statements, add extra SQL after the main statement
        if isinstance(stmt, exp.Create) and stmt.args.get("kind") and stmt.args["kind"].upper() == "TABLE":
            # Separate SEQUENCE statements, COMMENT ON, CHECK constraints, and TRIGGERS
            sequences = [x for x in extra if x.strip().upper().startswith("CREATE SEQUENCE")]
            comments = [x for x in extra if x.strip().upper().startswith("COMMENT ON")]
            checks = [x for x in extra if x.strip().upper().startswith("ALTER TABLE") and "CHECK" in x.upper()]
            # Triggers may start with DELIMITER \\ or CREATE TRIGGER
            triggers = [x for x in extra if "CREATE TRIGGER" in x.upper() or (x.strip().upper().startswith("DELIMITER") and "CREATE TRIGGER" in x.upper())]
            
            # Output: SEQUENCE first, then CREATE TABLE, then COMMENT ON, then CHECK constraints, then triggers
            for seq in sequences:
                out_lines.append(seq.rstrip("; ") + ";")
            out_lines.append(sql.rstrip("; ") + ";")
            for comment in comments:
                out_lines.append(comment.rstrip("; ") + ";")
            for check in checks:
                out_lines.append(check.rstrip("; ") + ";")
            # Triggers
            for trigger in triggers:
                # If trigger contains DELIMITER, it already has DELIMITER ; at the end, don't add extra semicolon
                if "DELIMITER" in trigger.upper():
                    out_lines.append(trigger.rstrip())
                else:
                    out_lines.append(trigger.rstrip("; ") + ";")
        else:
            out_lines.append(sql.rstrip("; ") + ";")
            for x in extra:
                out_lines.append(x.rstrip("; ") + ";")
        warnings.extend(w)
    return TransformResult(sql="\n".join(out_lines) + "\n", warnings=warnings)


def _extract_inline_constraints(create_expr: exp.Create) -> Dict[str, Any]:
    """Extract inline constraints/indexes from CREATE TABLE for later emission.
    Returns a dict with keys: primary_keys (List[str]), unique (List[Dict]), indexes (List[Dict]), foreign_keys (List[Dict]).
    Also mutates the AST to remove those constraints from the CREATE TABLE.
    """
    result: Dict[str, Any] = {
        "primary_keys": [],  # list[str]
        "unique": [],        # list[{name, columns}]
        "indexes": [],       # list[{name, columns}]
        "foreign_keys": [],  # list[{name, columns, ref_table, ref_columns}]
        "not_null": [],      # list[str]
        "checks": [],        # list[{name, expression_sql}]
    }

    if not isinstance(create_expr.this, exp.Schema):
        return result

    table_name = create_expr.this.this if hasattr(create_expr.this, "this") else None

    # Work on a copy of children
    new_children: List[exp.Expression] = []
    for child in list(create_expr.this.expressions or []):
        # Column-level constraints
        if isinstance(child, exp.ColumnDef):
            col_name = None
            if isinstance(child.this, exp.Identifier):
                col_name = child.this.this if hasattr(child.this, "this") else child.this.name

            constraints = list(child.args.get("constraints") or [])
            kept_constraints = []
            for c in constraints:
                # Primary key on column: keep inline in CREATE (do not extract)
                if isinstance(c, (exp.PrimaryKeyColumnConstraint,)):
                    kept_constraints.append(c)
                    continue
                # Unique on column
                if isinstance(c, (exp.UniqueColumnConstraint,)):
                    if col_name:
                        idx_name = f"uq_{table_name}_{col_name}" if table_name and col_name else None
                        result["unique"].append({"name": idx_name, "columns": [str(col_name)]})
                    continue
                # NOT NULL on column: keep inline in CREATE (do not extract)
                if isinstance(c, (exp.NotNullColumnConstraint,)):
                    kept_constraints.append(c)
                    continue
                # CHECK on column
                if hasattr(exp, "CheckColumnConstraint") and isinstance(c, exp.CheckColumnConstraint):
                    # Capture the raw SQL of the check expression
                    try:
                        check_sql = c.this.sql(dialect="postgres") if hasattr(c, "this") else c.sql(dialect="postgres")
                    except Exception:
                        check_sql = c.sql(dialect="mysql") if hasattr(c, "sql") else ""
                    chk_name = f"chk_{table_name}_{col_name}" if table_name and col_name else None
                    result["checks"].append({"name": chk_name, "expression_sql": check_sql})
                    continue
                # Foreign key column-level (rare in MySQL); detect by presence of references
                if hasattr(c, "this") and hasattr(c, "expression"):
                    try:
                        c_sql = c.sql(dialect="mysql")
                    except Exception:
                        c_sql = ""
                    if "REFERENCES" in c_sql.upper() and col_name:
                        # Very lightweight parse
                        # REFERENCES `ref_table`(`ref_col`)
                        import re
                        m = re.search(r"REFERENCES\s+`?([\w\.]+)`?\s*\(([^)]+)\)", c_sql, flags=re.IGNORECASE)
                        ref_table = m.group(1) if m else ""
                        ref_cols = [s.strip(" `") for s in (m.group(2).split(",") if m else [])]
                        fk_name = f"fk_{table_name}_{col_name}_to_{ref_table}" if table_name and col_name and ref_table else None
                        result["foreign_keys"].append({
                            "name": fk_name,
                            "columns": [str(col_name)],
                            "ref_table": ref_table,
                            "ref_columns": ref_cols,
                        })
                    else:
                        kept_constraints.append(c)
                else:
                    kept_constraints.append(c)
            if kept_constraints:
                child.set("constraints", kept_constraints)
            else:
                child.set("constraints", [])
            new_children.append(child)
            continue

        # Table-level constraints / indexes (child is not ColumnDef)
        clsname = child.__class__.__name__.upper()
        handled = False
        # PRIMARY KEY (...): keep inline in CREATE (do not extract)
        # UNIQUE [INDEX|KEY] name (cols) or UNIQUE (cols)
        if not handled and (isinstance(child, getattr(exp, "Unique", tuple())) or "UNIQUE" in clsname or clsname.endswith("UNIQUE")):
            idx_name = None
            if hasattr(child, "this") and isinstance(child.this, exp.Identifier):
                idx_name = child.this.name
            cols = []
            for ce in getattr(child, "expressions", []) or []:
                if isinstance(ce, exp.Identifier):
                    cols.append(ce.name)
                elif hasattr(ce, "this") and isinstance(ce.this, exp.Identifier):
                    cols.append(ce.this.name)
            if cols:
                if not idx_name and table_name:
                    idx_name = f"uq_{table_name}_{'_'.join(cols)}"
                result["unique"].append({"name": idx_name, "columns": [str(c) for c in cols]})
                handled = True
        # KEY/INDEX name (cols) — exclude PRIMARY KEY nodes
        if not handled and (
            isinstance(child, getattr(exp, "Index", tuple()))
            or (("INDEX" in clsname or clsname.endswith("INDEX") or clsname.endswith("KEY")) and "PRIMARY" not in clsname)
        ):
            idx_name = None
            if hasattr(child, "this") and isinstance(child.this, exp.Identifier):
                idx_name = child.this.name
            cols = []
            for ce in getattr(child, "expressions", []) or []:
                if isinstance(ce, exp.Identifier):
                    cols.append(ce.name)
                elif hasattr(ce, "this") and isinstance(ce.this, exp.Identifier):
                    cols.append(ce.this.name)
            if cols:
                if not idx_name and table_name:
                    idx_name = f"idx_{table_name}_{'_'.join(cols)}"
                result["indexes"].append({"name": idx_name, "columns": [str(c) for c in cols]})
                handled = True
        # FOREIGN KEY (... ) REFERENCES ...
        if not handled and (isinstance(child, getattr(exp, "ForeignKey", tuple())) or "FOREIGN" in clsname or "REFERENCE" in clsname):
            cols: List[str] = []
            for ce in getattr(child, "expressions", []) or []:
                if isinstance(ce, exp.Identifier):
                    cols.append(ce.name)
                elif hasattr(ce, "this") and isinstance(ce.this, exp.Identifier):
                    cols.append(ce.this.name)
            ref_table = None
            ref_cols: List[str] = []
            # Try to access references attribute
            refs = getattr(child, "references", None)
            if refs is not None:
                if isinstance(refs.this, exp.Schema):
                    ref_table = refs.this.name
                if refs.expressions:
                    for rc in refs.expressions:
                        if isinstance(rc, exp.Identifier):
                            ref_cols.append(rc.name)
            if not ref_table:
                # Try parse from SQL if not available
                try:
                    csql = child.sql(dialect="mysql")
                except Exception:
                    csql = ""
                import re
                m = re.search(r"REFERENCES\s+`?([\w\.]+)`?\s*\(([^)]+)\)", csql, flags=re.IGNORECASE)
                if m:
                    ref_table = m.group(1)
                    ref_cols = [s.strip(" `") for s in m.group(2).split(",")]
            fk_name = None
            if hasattr(child, "this") and isinstance(child.this, exp.Identifier):
                fk_name = child.this.name
            if not fk_name and table_name and cols and ref_table:
                fk_name = f"fk_{table_name}_{'_'.join(cols)}_to_{ref_table}"
            if cols and ref_table and ref_cols:
                result["foreign_keys"].append({
                    "name": fk_name,
                    "columns": [str(c) for c in cols],
                    "ref_table": ref_table,
                    "ref_columns": [str(c) for c in ref_cols],
                })
                handled = True
        # CHECK ( ... )
        if not handled and (isinstance(child, getattr(exp, "Check", tuple())) or "CHECK" in clsname):
            # extract expression SQL
            try:
                # child.this may hold the boolean expression
                expr_sql = child.this.sql(dialect="postgres") if hasattr(child, "this") else child.sql(dialect="postgres")
            except Exception:
                expr_sql = child.sql(dialect="mysql") if hasattr(child, "sql") else ""
            chk_name = None
            if hasattr(child, "this") and isinstance(child.this, exp.Identifier):
                chk_name = child.this.name
            if not chk_name and table_name:
                chk_name = f"chk_{table_name}_{len(result['checks'])+1}"
            result["checks"].append({"name": chk_name, "expression_sql": expr_sql})
            handled = True

        if not handled:
            # Keep child if we didn't recognize as constraint/index
            new_children.append(child)
        # else: drop it from CREATE TABLE

    # Primary keys are kept inline; no deduplication/extraction needed

    create_expr.this.set("expressions", new_children)
    return result


def transform_statements_split(statements: Iterable[sqlglot.Expression], table_schemas: Optional[Dict[str, Any]] = None) -> TransformSplitResult:
    """Transform statements into three separated outputs: DDL (schema without indexes/constraints),
    DML (INSERTs), and deferred constraints/indexes.
    """
    ddl_lines: List[str] = []
    dml_lines: List[str] = []
    cons_lines: List[str] = []
    fk_lines: List[str] = []
    warnings: List[str] = []

    # Use provided table_schemas or collect if not provided (for backward compatibility)
    if table_schemas is None:
        table_schemas = _collect_table_schemas(statements)
    for stmt in statements:
        # Handle CREATE TABLE specially: extract and remove inline constraints, emit them later
        if isinstance(stmt, exp.Create) and stmt.args.get("kind") and stmt.args["kind"].upper() == "TABLE":
            constraints = _extract_inline_constraints(stmt)
            res = transform_expression(stmt, table_schemas)
            if not res:
                try:
                    sql = stmt.sql(dialect="postgres")
                except Exception:
                    sql = stmt.sql(dialect="mysql")
                w = []
                extra = []
            else:
                sql, w, extra = res
            # Regex fallback: extract lingering inline indexes/constraints not represented in AST
            try:
                import re
                table_name = stmt.this.this if hasattr(stmt.this, "this") else None
                if table_name:
                    # Extract UNIQUE/INDEX/KEY definitions inside CREATE TABLE (...)
                    def _emit_index(m):
                        unique_kw = m.group(1) or ""
                        name = (m.group(2) or m.group(5) or "").strip('"')
                        cols_raw = m.group(3) or m.group(6)
                        raw_tokens = [c for c in cols_raw.split(',') if c.strip()]
                        cols_fmt = [_format_index_column_token(tok) for tok in raw_tokens]
                        if cols_fmt:
                            tn = f'"{_norm_name(table_name) or table_name}"'
                            # derive bare names for index naming
                            bare = [t.split()[0].strip('"') for t in cols_fmt]
                            if unique_kw.strip().upper().startswith("UNIQUE"):
                                idx_name = name or f"uq_{_norm_name(table_name) or table_name}_{'_'.join(bare)}"
                                cons_lines.append(f'CREATE UNIQUE INDEX {idx_name} ON {tn} (' + ", ".join(cols_fmt) + ");")
                            else:
                                idx_name = name or f"idx_{_norm_name(table_name) or table_name}_{'_'.join(bare)}"
                                cons_lines.append(f'CREATE INDEX {idx_name} ON {tn} (' + ", ".join(cols_fmt) + ");")
                        return ""  # remove from CREATE TABLE

                    # Pattern covers: [UNIQUE] KEY name (cols) | [UNIQUE] INDEX name (cols)
                    idx_pat = re.compile(r"\s*,\s*(UNIQUE\s+)?(?:KEY|INDEX)\s+\"?([^\"(\s]+)?\"?\s*\(([^)]*)\)|\s*,\s*(UNIQUE\s+)?INDEX\s+\"?([^\"(\s]+)?\"?\s*\(([^)]*)\)", re.IGNORECASE)
                    sql = re.sub(idx_pat, _emit_index, sql)

                    # Additional pattern: plain UNIQUE [name] (cols) — MySQL allows this form, need to extract too
                    def _emit_plain_unique(m):
                        name = (m.group(1) or '').strip('"')
                        cols_raw = m.group(2)
                        raw_tokens = [c for c in cols_raw.split(',') if c.strip()]
                        cols_fmt = [_format_index_column_token(tok) for tok in raw_tokens]
                        if cols_fmt:
                            tn = f'"{_norm_name(table_name) or table_name}"'
                            bare = [t.split()[0].strip('"') for t in cols_fmt]
                            idx_name = name or f"uq_{_norm_name(table_name) or table_name}_{'_'.join(bare)}"
                            cons_lines.append(f'CREATE UNIQUE INDEX {idx_name} ON {tn} (' + ", ".join(cols_fmt) + ");")
                        return ""
                    plain_unique_pat = re.compile(r"\s*,\s*UNIQUE\s+\"?([^\"(\s]+)?\"?\s*\(([^)]*)\)", re.IGNORECASE)
                    sql = re.sub(plain_unique_pat, _emit_plain_unique, sql)

                    # Extract FOREIGN KEY constraints
                    def _emit_fk(m):
                        cname = (m.group(1) or "").strip('"')
                        local_cols = [c.strip().strip('"') for c in m.group(2).split(',')]
                        ref_table = m.group(3).strip('"')
                        ref_cols = [c.strip().strip('"') for c in m.group(4).split(',')]
                        fk_name = cname or f"fk_{table_name}_{'_'.join(local_cols)}_to_{ref_table}"
                        fk_lines.append(
                            f'ALTER TABLE {table_name} ADD CONSTRAINT {fk_name} FOREIGN KEY (' + ", ".join(f'"{c}"' for c in local_cols) + f') REFERENCES "{ref_table}" (' + ", ".join(f'"{c}"' for c in ref_cols) + ");"
                        )
                        return ""

                    fk_pat = re.compile(r"\s*,\s*CONSTRAINT\s+\"?([^\"\s]+)?\"?\s+FOREIGN\s+KEY\s*\(([^)]*)\)\s+REFERENCES\s+\"?([^\"\s]+)\"?\s*\(([^)]*)\)", re.IGNORECASE)
                    sql = re.sub(fk_pat, _emit_fk, sql)

                    # Keep PRIMARY KEY and NOT NULL inline; do not strip them from CREATE TABLE

                    # Extract CHECK constraints if appear inline
                    def _emit_check(m):
                        cname = (m.group(1) or "").strip('"')
                        expr = m.group(2)
                        chk_name = cname or f"chk_{table_name}_{len(cons_lines)+1}"
                        cons_lines.append(f'ALTER TABLE {table_name} ADD CONSTRAINT {chk_name} CHECK ({expr});')
                        return ""
                    chk_pat = re.compile(r"\s*,\s*CONSTRAINT\s+\"?([^\"\s]+)?\"?\s+CHECK\s*\((.*?)\)", re.IGNORECASE | re.DOTALL)
                    sql = re.sub(chk_pat, _emit_check, sql)
            except Exception:
                pass
            # Separate SEQUENCE statements, COMMENT ON statements, CHECK constraints, and TRIGGERS
            sequences = [x for x in extra if x.strip().upper().startswith("CREATE SEQUENCE")]
            comments = [x for x in extra if x.strip().upper().startswith("COMMENT ON")]
            checks = [x for x in extra if x.strip().upper().startswith("ALTER TABLE") and "CHECK" in x.upper()]
            # Triggers may start with DELIMITER \\ or CREATE TRIGGER
            triggers = [x for x in extra if "CREATE TRIGGER" in x.upper() or (x.strip().upper().startswith("DELIMITER") and "CREATE TRIGGER" in x.upper())]
            
            # Output: SEQUENCE first, then CREATE TABLE, then COMMENT ON
            # CHECK constraints and TRIGGERS go to cons_lines (constraints bucket)
            for seq in sequences:
                ddl_lines.append(seq.rstrip("; ") + ";")
            ddl_lines.append(sql.rstrip("; ") + ";")
            for comment in comments:
                ddl_lines.append(comment.rstrip("; ") + ";")
            # ENUM CHECK constraints go to constraints bucket
            for check in checks:
                cons_lines.append(check.rstrip("; ") + ";")
            # Triggers
            for trigger in triggers:
                # If trigger contains DELIMITER, it already has DELIMITER ; at the end, don't add extra semicolon
                if "DELIMITER" in trigger.upper():
                    cons_lines.append(trigger.rstrip())
                else:
                    cons_lines.append(trigger.rstrip("; ") + ";")

            # Keep NOT NULL and PRIMARY KEY inline; do not emit separately
            # Emit unique indexes
            for u in constraints.get("unique", []):
                table_name = stmt.this.this if hasattr(stmt.this, "this") else None
                if table_name and u.get("columns"):
                    name = u.get("name") or f"uq_{table_name}_{'_'.join(u['columns'])}"
                    cols = ", ".join(f'"{c}"' for c in u["columns"])
                    cons_lines.append(f'CREATE UNIQUE INDEX {name} ON {table_name} ({cols});')
            # Emit plain indexes
            for idx in constraints.get("indexes", []):
                table_name = stmt.this.this if hasattr(stmt.this, "this") else None
                if table_name and idx.get("columns"):
                    name = idx.get("name") or f"idx_{table_name}_{'_'.join(idx['columns'])}"
                    cols = ", ".join(f'"{c}"' for c in idx["columns"])
                    cons_lines.append(f'CREATE INDEX {name} ON {table_name} ({cols});')
            # Emit CHECK constraints next
            for chk in constraints.get("checks", []):
                table_name = stmt.this.this if hasattr(stmt.this, "this") else None
                if table_name and chk.get("expression_sql"):
                    name = chk.get("name") or f"chk_{table_name}_{len(cons_lines)+1}"
                    # Ensure expression is not prefixed with CHECK keyword already
                    expr_sql = chk["expression_sql"]
                    expr = expr_sql
                    up = expr_sql.strip().upper()
                    if up.startswith("CHECK"):
                        # try to strip leading CHECK
                        import re
                        m = re.search(r"CHECK\s*\((.*)\)\s*$", expr_sql, flags=re.IGNORECASE | re.DOTALL)
                        expr = m.group(1) if m else expr_sql
                    cons_lines.append(f'ALTER TABLE {table_name} ADD CONSTRAINT {name} CHECK ({expr});')
            # Emit foreign keys last
            for fk in constraints.get("foreign_keys", []):
                table_name = stmt.this.this if hasattr(stmt.this, "this") else None
                if table_name and fk.get("columns") and fk.get("ref_table") and fk.get("ref_columns"):
                    name = fk.get("name") or f"fk_{table_name}_{'_'.join(fk['columns'])}_to_{fk['ref_table']}"
                    cols = ", ".join(f'"{c}"' for c in fk["columns"])
                    ref_cols = ", ".join(f'"{c}"' for c in fk["ref_columns"])
                    fk_lines.append(
                        f'ALTER TABLE {table_name} ADD CONSTRAINT {name} FOREIGN KEY ({cols}) REFERENCES "{fk["ref_table"]}" ({ref_cols});'
                    )
            # Emit CHECK constraints
            for chk in constraints.get("checks", []):
                table_name = stmt.this.this if hasattr(stmt.this, "this") else None
                if table_name and chk.get("expression_sql"):
                    name = chk.get("name") or f"chk_{table_name}_{len(cons_lines)+1}"
                    # Ensure expression is not prefixed with CHECK keyword already
                    expr_sql = chk["expression_sql"]
                    expr = expr_sql
                    up = expr_sql.strip().upper()
                    if up.startswith("CHECK"):
                        # try to strip leading CHECK
                        import re
                        m = re.search(r"CHECK\s*\((.*)\)\s*$", expr_sql, flags=re.IGNORECASE | re.DOTALL)
                        expr = m.group(1) if m else expr_sql
                    cons_lines.append(f'ALTER TABLE {table_name} ADD CONSTRAINT {name} CHECK ({expr});')
            warnings.extend(w)
            continue

        # Explicit CREATE INDEX/ALTER ... ADD CONSTRAINT go to constraints bucket
        if isinstance(stmt, exp.Create):
            kind = stmt.args.get("kind")
            if kind is None:
                # CREATE SEQUENCE: keep with DDL
                res = transform_expression(stmt, table_schemas)
                if not res:
                    try:
                        sql = stmt.sql(dialect="postgres")
                    except Exception:
                        sql = stmt.sql(dialect="mysql")
                    w = []
                    extra = []
                else:
                    sql, w, extra = res
                ddl_lines.append(sql.rstrip("; ") + ";")
                for x in extra:
                    ddl_lines.append(x.rstrip("; ") + ";")
                warnings.extend(w)
                continue
            kind_upper = kind.upper()
            if kind_upper in ("INDEX",):
                res = transform_expression(stmt, table_schemas)
                if not res:
                    try:
                        sql = stmt.sql(dialect="postgres")
                    except Exception:
                        sql = stmt.sql(dialect="mysql")
                    w = []
                    extra = []
                else:
                    sql, w, extra = res
                cons_lines.append(sql.rstrip("; ") + ";")
                for x in extra:
                    cons_lines.append(x.rstrip("; ") + ";")
                warnings.extend(w)
                continue

        if isinstance(stmt, exp.Alter):
            # Route ALTER ADD CONSTRAINT/INDEX to constraints
            res = transform_expression(stmt, table_schemas)
            if not res:
                try:
                    sql = stmt.sql(dialect="postgres")
                except Exception:
                    sql = stmt.sql(dialect="mysql")
                w = []
                extra = []
            else:
                sql, w, extra = res
            # If it's a FK alter, defer to fk_lines; else normal cons_lines
            target_list = fk_lines if "FOREIGN KEY" in sql.upper() else cons_lines
            target_list.append(sql.rstrip("; ") + ";")
            for x in extra:
                target_list.append(x.rstrip("; ") + ";")
            warnings.extend(w)
            continue

        # DML (INSERT and others)
        if isinstance(stmt, exp.Insert):
            res = transform_expression(stmt, table_schemas)
            if not res:
                try:
                    sql = stmt.sql(dialect="postgres")
                except Exception:
                    sql = stmt.sql(dialect="mysql")
                w = []
                extra = []
            else:
                sql, w, extra = res
            dml_lines.append(sql.rstrip("; ") + ";")
            for x in extra:
                dml_lines.append(x.rstrip("; ") + ";")
            warnings.extend(w)
            continue

        # Other DDL (DATABASE, DROP, COMMENT) keep with DDL
        res = transform_expression(stmt, table_schemas)
        if not res:
            try:
                sql = stmt.sql(dialect="postgres")
            except Exception:
                sql = stmt.sql(dialect="mysql")
            w = []
            extra = []
        else:
            sql, w, extra = res
        ddl_lines.append(sql.rstrip("; ") + ";")
        for x in extra:
            ddl_lines.append(x.rstrip("; ") + ";")
        warnings.extend(w)

    # Ensure FKs are last
    constraints_combined = cons_lines + fk_lines
    return TransformSplitResult(
        ddl_sql="\n".join(ddl_lines) + ("\n" if ddl_lines else ""),
        dml_sql="\n".join(dml_lines) + ("\n" if dml_lines else ""),
        constraints_sql="\n".join(constraints_combined) + ("\n" if constraints_combined else ""),
        warnings=warnings,
    )



