from __future__ import annotations

from dataclasses import dataclass
from pathlib import Path
from typing import Iterable, List, Optional
import re

import sqlglot
from sqlglot.errors import ParseError


@dataclass
class ParsedStatement:
    raw_sql: str
    ast: Optional[sqlglot.Expression] = None
    error: Optional[str] = None
    original_sql: Optional[str] = None  # Original SQL before preprocessing (for FULLTEXT INDEX extraction)


def iter_parse_sql_text(sql_text: str, *, read_dialect: str = "mysql") -> Iterable[ParsedStatement]:
    """Parse SQL text, but bypass INSERT statements (passthrough raw).

    - Split by ';' (respecting quotes/comments roughly)
    - Filter out LOCK TABLES and UNLOCK TABLES statements (MySQL-specific, not needed in PostgreSQL)
    - Non-INSERT → parse to AST
    - INSERT → return raw_sql only (ast=None)
    """
    for stmt in _split_sql_statements(sql_text):
        cleaned = _strip_comments(stmt)
        if not cleaned.strip():
            continue
        # Filter out LOCK TABLES and UNLOCK TABLES statements (MySQL-specific table locking)
        cleaned_upper = cleaned.lstrip().upper()
        if cleaned_upper.startswith("LOCK TABLES") or cleaned_upper.startswith("UNLOCK TABLES"):
            continue  # Skip these statements - they're not needed in PostgreSQL
        if cleaned_upper.startswith("INSERT"):
            s = stmt if stmt.strip().endswith(';') else stmt.strip() + ';'
            yield ParsedStatement(raw_sql=s, ast=None)
            continue
        # Preprocess to remove FULLTEXT INDEX before parsing
        preprocessed_stmt = _preprocess_fulltext_index(stmt)
        # Save original SQL if it contains FULLTEXT INDEX (for later extraction)
        original_sql = stmt if "FULLTEXT" in stmt.upper() and "CREATE TABLE" in stmt.upper() else None
        try:
            ast = sqlglot.parse_one(preprocessed_stmt, read=read_dialect)
            yield ParsedStatement(raw_sql="", ast=ast, original_sql=original_sql)
        except ParseError as parse_err:
            yield ParsedStatement(raw_sql=stmt, ast=None, error=str(parse_err), original_sql=original_sql)


def _strip_comments(s: str) -> str:
    s = re.sub(r"/\*.*?\*/", " ", s, flags=re.DOTALL)
    s = re.sub(r"(?m)^\s*--.*$", " ", s)
    return s


def _preprocess_fulltext_index(sql: str) -> str:
    """Preprocess SQL to remove FULLTEXT INDEX from CREATE TABLE statements.
    This is needed because SQLGlot may not parse FULLTEXT INDEX correctly.
    The FULLTEXT INDEX will be extracted later by regex in transformer/pipeline.
    """
    # Pattern to match FULLTEXT INDEX or FULLTEXT KEY inside CREATE TABLE
    # Match: , FULLTEXT INDEX "name" (columns) or , FULLTEXT KEY "name" (columns)
    # This should be inside a CREATE TABLE statement
    if "CREATE TABLE" in sql.upper() and "FULLTEXT" in sql.upper():
        # Remove FULLTEXT INDEX definitions from CREATE TABLE
        # Pattern: , FULLTEXT (INDEX|KEY) "name" (columns)
        fulltext_pattern = re.compile(
            r",\s*FULLTEXT\s+(?:INDEX|KEY)\s+\"?[^\"(\s]+\"?\s*\([^)]*\)",
            re.IGNORECASE
        )
        sql = fulltext_pattern.sub("", sql)
    return sql


def _split_sql_statements(sql_text: str) -> Iterable[str]:
    buf: List[str] = []
    in_single = in_double = in_backtick = False
    in_ml = in_sl = False
    i = 0
    n = len(sql_text)
    while i < n:
        ch = sql_text[i]
        nxt = sql_text[i+1] if i+1 < n else ''
        if in_sl:
            buf.append(ch)
            if ch == '\n':
                in_sl = False
            i += 1
            continue
        if in_ml:
            buf.append(ch)
            if ch == '*' and nxt == '/':
                buf.append(nxt)
                i += 2
                in_ml = False
                continue
            i += 1
            continue
        if not (in_single or in_double or in_backtick):
            if ch == '-' and nxt == '-':
                in_sl = True
                buf.append(ch)
                buf.append(nxt)
                i += 2
                continue
            if ch == '/' and nxt == '*':
                in_ml = True
                buf.append(ch)
                buf.append(nxt)
                i += 2
                continue
        if ch == "'" and not (in_double or in_backtick):
            in_single = not in_single
            buf.append(ch)
            i += 1
            continue
        if ch == '"' and not (in_single or in_backtick):
            in_double = not in_double
            buf.append(ch)
            i += 1
            continue
        if ch == '`' and not (in_single or in_double):
            in_backtick = not in_backtick
            buf.append(ch)
            i += 1
            continue
        if ch == ';' and not (in_single or in_double or in_backtick or in_ml or in_sl):
            stmt = ''.join(buf).strip()
            if stmt:
                yield stmt
            buf = []
            i += 1
            continue
        buf.append(ch)
        i += 1
    tail = ''.join(buf).strip()
    if tail:
        yield tail


def iter_parse_sql_file(file_path: Path | str, *, read_dialect: str = "mysql", chunk_size: int = 1024 * 1024) -> Iterable[ParsedStatement]:
    """Parse SQL file in chunks, but bypass INSERT statements (passthrough raw).
    
    This is a memory-efficient version that reads the file in chunks instead of
    loading the entire file into memory at once.
    
    Args:
        file_path: Path to SQL file
        read_dialect: SQL dialect for parsing
        chunk_size: Size of each chunk to read from file (default 1MB)
    
    Yields:
        ParsedStatement objects
    """
    file_path = Path(file_path)
    if not file_path.exists():
        raise FileNotFoundError(f"File not found: {file_path}")
    
    # Use the existing _split_sql_statements logic but adapted for file reading
    # We'll read in chunks and use a sliding buffer approach
    # State must persist across chunks
    buf: List[str] = []
    in_single = in_double = in_backtick = False
    in_ml = in_sl = False
    
    with open(file_path, 'r', encoding='utf-8') as f:
        while True:
            chunk = f.read(chunk_size)
            if not chunk:
                break
            
            i = 0
            n = len(chunk)
            while i < n:
                ch = chunk[i]
                nxt = chunk[i+1] if i+1 < n else ''
                
                if in_sl:
                    buf.append(ch)
                    if ch == '\n':
                        in_sl = False
                    i += 1
                    continue
                if in_ml:
                    buf.append(ch)
                    if ch == '*' and nxt == '/':
                        buf.append(nxt)
                        i += 2
                        in_ml = False
                        continue
                    i += 1
                    continue
                if not (in_single or in_double or in_backtick):
                    if ch == '-' and nxt == '-':
                        in_sl = True
                        buf.append(ch)
                        buf.append(nxt)
                        i += 2
                        continue
                    if ch == '/' and nxt == '*':
                        in_ml = True
                        buf.append(ch)
                        buf.append(nxt)
                        i += 2
                        continue
                if ch == "'" and not (in_double or in_backtick):
                    in_single = not in_single
                    buf.append(ch)
                    i += 1
                    continue
                if ch == '"' and not (in_single or in_backtick):
                    in_double = not in_double
                    buf.append(ch)
                    i += 1
                    continue
                if ch == '`' and not (in_single or in_double):
                    in_backtick = not in_backtick
                    buf.append(ch)
                    i += 1
                    continue
                if ch == ';' and not (in_single or in_double or in_backtick or in_ml or in_sl):
                    stmt = ''.join(buf).strip()
                    if stmt:
                        # Process this complete statement
                        cleaned = _strip_comments(stmt)
                        if cleaned.strip():
                            # Filter out LOCK TABLES and UNLOCK TABLES statements (MySQL-specific table locking)
                            cleaned_upper = cleaned.lstrip().upper()
                            if cleaned_upper.startswith("LOCK TABLES") or cleaned_upper.startswith("UNLOCK TABLES"):
                                # Skip these statements - they're not needed in PostgreSQL
                                buf = []
                                i += 1
                                continue
                            if cleaned_upper.startswith("INSERT"):
                                s = stmt if stmt.strip().endswith(';') else stmt.strip() + ';'
                                yield ParsedStatement(raw_sql=s, ast=None)
                            else:
                                # Preprocess to remove FULLTEXT INDEX before parsing
                                preprocessed_stmt = _preprocess_fulltext_index(stmt)
                                # Save original SQL if it contains FULLTEXT INDEX (for later extraction)
                                original_sql = stmt if "FULLTEXT" in stmt.upper() and "CREATE TABLE" in stmt.upper() else None
                                try:
                                    ast = sqlglot.parse_one(preprocessed_stmt, read=read_dialect)
                                    yield ParsedStatement(raw_sql="", ast=ast, original_sql=original_sql)
                                except ParseError as parse_err:
                                    yield ParsedStatement(raw_sql=stmt, ast=None, error=str(parse_err), original_sql=original_sql)
                    buf = []
                    i += 1
                    continue
                buf.append(ch)
                i += 1
    
    # Process remaining statement if any
    tail = ''.join(buf).strip()
    if tail:
        cleaned = _strip_comments(tail)
        if cleaned.strip():
            # Filter out LOCK TABLES and UNLOCK TABLES statements (MySQL-specific table locking)
            cleaned_upper = cleaned.lstrip().upper()
            if cleaned_upper.startswith("LOCK TABLES") or cleaned_upper.startswith("UNLOCK TABLES"):
                # Skip these statements - they're not needed in PostgreSQL
                return
            if cleaned_upper.startswith("INSERT"):
                s = tail if tail.strip().endswith(';') else tail.strip() + ';'
                yield ParsedStatement(raw_sql=s, ast=None)
            else:
                # Preprocess to remove FULLTEXT INDEX before parsing
                preprocessed_tail = _preprocess_fulltext_index(tail)
                # Save original SQL if it contains FULLTEXT INDEX (for later extraction)
                original_sql = tail if "FULLTEXT" in tail.upper() and "CREATE TABLE" in tail.upper() else None
                try:
                    ast = sqlglot.parse_one(preprocessed_tail, read=read_dialect)
                    yield ParsedStatement(raw_sql="", ast=ast, original_sql=original_sql)
                except ParseError as parse_err:
                    yield ParsedStatement(raw_sql=tail, ast=None, error=str(parse_err), original_sql=original_sql)


