"""
DDL并行处理模块：预扫描+分表并行优化
实现：
1. 预扫描：使用状态机流式扫描找到所有CREATE TABLE边界（文件偏移量）
2. 并行处理：将(offset_start, offset_end)列表分给12个worker并行处理DDL转换
3. 状态机流式扫描：行级状态机，遇到CREATE TABLE就触发转换，内存占用常数级
"""

from __future__ import annotations

from dataclasses import dataclass
from pathlib import Path
from typing import List, Tuple, Optional, Dict, Any
import re
import logging
import time
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
import multiprocessing
import os

from sqlglot import exp
from sqlglot.errors import ParseError
import sqlglot

from .transformer import (
    transform_expression, 
    _extract_table_schema_from_create,
    _extract_inline_constraints,
    _format_index_column_token,
    _norm_name
)

logger = logging.getLogger(__name__)


def _scan_chunk_worker(file_path: Path, chunk_start: int, chunk_end: int) -> List[TableBoundary]:
    """
    Worker函数：扫描文件的一个chunk，找到CREATE TABLE边界
    必须在模块级别定义，以便pickle序列化
    """
    
    boundaries = []
    
    try:
        # 读取chunk内容
        with open(file_path, 'rb') as f:
            f.seek(chunk_start)
            chunk_bytes = f.read(chunk_end - chunk_start)
            chunk_text = chunk_bytes.decode('utf-8', errors='replace')
        
        # 使用正则表达式找到CREATE TABLE位置
        import re
        create_table_pattern = re.compile(
            r'(?i)\bCREATE\s+TABLE\s+',
            re.MULTILINE
        )
        
        matches = list(create_table_pattern.finditer(chunk_text))
        
        # 对每个匹配，找到边界
        scanner = CreateTableScanner()
        for match in matches:
            # 计算实际文件位置（chunk内的相对位置 + chunk起始位置）
            relative_pos = match.start()
            # 需要读取更多内容来找到完整的CREATE TABLE（可能跨chunk）
            # 这里简化处理：只处理完全在chunk内的CREATE TABLE
            boundary = scanner._find_table_boundary(chunk_text, relative_pos, len(chunk_bytes))
            if boundary:
                # 调整offset为文件绝对位置
                boundary.offset_start += chunk_start
                boundary.offset_end += chunk_start
                boundaries.append(boundary)
    
    except Exception as e:
        import logging
        logger = logging.getLogger(__name__)
        logger.warning(f"Error scanning chunk [{chunk_start}:{chunk_end}]: {e}")
    
    return boundaries


@dataclass
class TableBoundary:
    """CREATE TABLE语句的边界信息"""
    offset_start: int  # 文件偏移量（字节位置）
    offset_end: int    # 结束位置
    table_name: Optional[str] = None  # 表名（可选，预扫描时可能为空）


class CreateTableScanner:
    """
    状态机流式扫描器：找到所有CREATE TABLE语句的边界
    使用字符级状态机，内存占用常数级，支持跨行CREATE TABLE
    """
    
    def __init__(self):
        # 引号和注释状态
        self.in_single_quote = False
        self.in_double_quote = False
        self.in_backtick = False
        self.in_single_line_comment = False
        self.in_multi_line_comment = False
        
        # CREATE TABLE检测状态机
        self.state = 'NORMAL'  # NORMAL, MATCHING_CREATE, MATCHING_TABLE, IN_TABLE_BODY, FINDING_SEMICOLON
        self.create_table_start = None
        self.paren_depth = 0
        
        # CREATE TABLE关键字匹配缓冲区（用于跨chunk匹配）
        self.keyword_buffer = []  # 存储最近几个字符，用于匹配CREATE TABLE
        
    def reset(self):
        """重置状态机（但保留跨chunk的keyword_buffer）"""
        self.in_single_quote = False
        self.in_double_quote = False
        self.in_backtick = False
        self.in_single_line_comment = False
        self.in_multi_line_comment = False
        self.state = 'NORMAL'
        self.create_table_start = None
        self.paren_depth = 0
        # 注意：不重置keyword_buffer，因为CREATE可能跨chunk
    
    def scan_file(self, file_path: Path, max_workers: int = 12) -> List[TableBoundary]:
        """
        并行扫描文件，找到所有CREATE TABLE语句的边界
        文件按256MB切块，多个worker并行扫描，主进程归并结果
        返回: [TableBoundary, ...]
        """
        file_size = file_path.stat().st_size
        chunk_size = 256 * 1024 * 1024  # 256MB chunks
        
        # 对于小文件，直接单线程扫描
        if file_size < chunk_size:
            return self._scan_file_single_thread(file_path)
        
        # 大文件：并行扫描
        logger.info(f"File size: {file_size / (1024*1024):.2f} MB, using parallel scanning with {max_workers} workers")
        
        # 计算chunk边界
        chunks = []
        offset = 0
        while offset < file_size:
            chunk_end = min(offset + chunk_size, file_size)
            chunks.append((offset, chunk_end))
            offset = chunk_end
        
        logger.info(f"Split file into {len(chunks)} chunks (256MB each) for parallel scanning")
        
        # 并行扫描每个chunk
        from concurrent.futures import ProcessPoolExecutor
        
        with ProcessPoolExecutor(max_workers=max_workers) as executor:
            futures = [
                executor.submit(_scan_chunk_worker, file_path, chunk_start, chunk_end)
                for chunk_start, chunk_end in chunks
            ]
            
            # 收集所有结果
            all_boundaries = []
            for i, future in enumerate(futures):
                chunk_boundaries = future.result()
                logger.debug(f"Chunk {i+1}/{len(chunks)}: found {len(chunk_boundaries)} CREATE TABLE statements")
                all_boundaries.extend(chunk_boundaries)
        
        # 按offset_start排序并去重（可能有跨chunk的重复）
        all_boundaries.sort(key=lambda b: b.offset_start)
        
        # 去重：如果两个boundary的start位置很接近（<100字节），认为是同一个
        unique_boundaries = []
        last_end = -1
        for boundary in all_boundaries:
            if boundary.offset_start > last_end + 100:  # 至少间隔100字节才认为是新的
                unique_boundaries.append(boundary)
                last_end = boundary.offset_end
        
        logger.info(f"Found {len(unique_boundaries)} valid CREATE TABLE statements (after deduplication)")
        return unique_boundaries
    
    def _scan_file_single_thread(self, file_path: Path) -> List[TableBoundary]:
        """单线程扫描（用于小文件）"""
        import re
        
        logger.info("Reading file for CREATE TABLE scanning (single-threaded)...")
        with open(file_path, 'rb') as f:
            file_bytes = f.read()
            file_text = file_bytes.decode('utf-8', errors='replace')
        
        logger.info(f"File size: {len(file_text)} characters, searching for CREATE TABLE...")
        
        create_table_pattern = re.compile(
            r'(?i)\bCREATE\s+TABLE\s+',
            re.MULTILINE
        )
        
        matches = list(create_table_pattern.finditer(file_text))
        logger.info(f"Found {len(matches)} potential CREATE TABLE statements")
        
        boundaries = []
        for match in matches:
            start_pos = match.start()
            boundary = self._find_table_boundary(file_text, start_pos, len(file_bytes))
            if boundary:
                boundaries.append(boundary)
        
        logger.info(f"Found {len(boundaries)} valid CREATE TABLE statements")
        return boundaries
    
    def _find_table_boundary(self, text: str, start_pos: int, file_size_bytes: int) -> Optional[TableBoundary]:
        """
        从CREATE TABLE的开始位置，找到语句的结束位置
        使用状态机跟踪括号深度和引号状态
        """
        # 重置状态
        self.reset()
        
        # 找到开括号的位置
        paren_start = None
        i = start_pos
        n = len(text)
        
        # 跳过CREATE TABLE后面的内容，找到第一个(
        while i < n:
            ch = text[i]
            if ch == '(':
                paren_start = i
                self.paren_depth = 1
                self.create_table_start = start_pos
                break
            elif ch == ';':
                # 没有开括号就遇到分号，不是有效的CREATE TABLE
                return None
            i += 1
        
        if paren_start is None:
            return None
        
        # 从开括号开始，跟踪括号深度找到结束位置
        i = paren_start + 1
        in_single = in_double = in_backtick = False
        in_sl_comment = False
        in_ml_comment = False
        
        while i < n:
            ch = text[i]
            nxt = text[i+1] if i+1 < n else ''
            
            # 处理注释
            if in_sl_comment:
                if ch == '\n':
                    in_sl_comment = False
                i += 1
                continue
            
            if in_ml_comment:
                if ch == '*' and nxt == '/':
                    in_ml_comment = False
                    i += 2
                    continue
                i += 1
                continue
            
            # 检测注释开始
            if not (in_single or in_double or in_backtick):
                if ch == '-' and nxt == '-':
                    in_sl_comment = True
                    i += 2
                    continue
                if ch == '/' and nxt == '*':
                    in_ml_comment = True
                    i += 2
                    continue
            
            # 处理引号
            if ch == "'" and not (in_double or in_backtick):
                in_single = not in_single
                i += 1
                continue
            
            if ch == '"' and not (in_single or in_backtick):
                in_double = not in_double
                i += 1
                continue
            
            if ch == '`' and not (in_single or in_double):
                in_backtick = not in_backtick
                i += 1
                continue
            
            # 在引号内，跳过
            if in_single or in_double or in_backtick:
                i += 1
                continue
            
            # 跟踪括号深度
            if ch == '(':
                self.paren_depth += 1
            elif ch == ')':
                self.paren_depth -= 1
                if self.paren_depth == 0:
                    # 找到表体结束，继续找分号
                    i += 1
                    # 跳过可能的表选项（ENGINE=InnoDB等）
                    while i < n:
                        ch = text[i]
                        if ch == ';':
                            # 找到分号，CREATE TABLE语句结束
                            # 计算字节偏移量
                            end_pos = i + 1
                            start_bytes = len(text[:start_pos].encode('utf-8'))
                            end_bytes = len(text[:end_pos].encode('utf-8'))
                            
                            return TableBoundary(
                                offset_start=start_bytes,
                                offset_end=end_bytes
                            )
                        elif ch.isspace():
                            i += 1
                            continue
                        else:
                            # 可能是表选项，继续
                            i += 1
                    # 如果到文件末尾还没找到分号，返回当前位置
                    if i >= n:
                        end_bytes = len(text.encode('utf-8'))
                        start_bytes = len(text[:start_pos].encode('utf-8'))
                        return TableBoundary(
                            offset_start=start_bytes,
                            offset_end=end_bytes
                        )
            
            i += 1
        
        # 如果没找到结束位置，返回None
        return None
    


def process_ddl_chunk_worker(args: Tuple[int, int, Path, str]) -> Dict[str, Any]:
    """
    Worker函数：处理一个DDL chunk（一个CREATE TABLE语句的范围）
    
    Args:
        args: (offset_start, offset_end, file_path, read_dialect)
    
    Returns:
        {
            'ddl_sql': str,
            'constraints_sql': str,
            'table_schema': dict,
            'table_name': str,
            'warnings': list,
            'errors': list
        }
    """
    offset_start, offset_end, file_path, read_dialect = args
    
    result = {
        'ddl_sql': '',
        'constraints_sql': '',
        'table_schema': None,
        'table_name': None,
        'warnings': [],
        'errors': []
    }
    
    try:
        # 读取这个范围的SQL（使用二进制模式以确保seek准确）
        with open(file_path, 'rb') as f:
            f.seek(offset_start)
            sql_bytes = f.read(offset_end - offset_start)
            sql_text = sql_bytes.decode('utf-8', errors='replace')
        
        # 解析CREATE TABLE语句
        try:
            ast = sqlglot.parse_one(sql_text, read=read_dialect)
            
            if isinstance(ast, exp.Create) and ast.args.get("kind") and ast.args["kind"].upper() == "TABLE":
                # 提取表名
                table_name_raw = ast.this.this if hasattr(ast.this, "this") else None
                table_name = _norm_name(table_name_raw)
                result['table_name'] = table_name
                
                # 提取schema（在转换前）
                schema = _extract_table_schema_from_create(ast)
                if schema:
                    result['table_schema'] = schema
                
                # 提取约束（在转换前）
                constraints = _extract_inline_constraints(ast)
                
                # 转换DDL
                res = transform_expression(ast, table_schemas={})
                if not res:
                    try:
                        sql = ast.sql(dialect="postgres")
                    except Exception:
                        sql = ast.sql(dialect="mysql")
                    w = []
                    extra = []
                else:
                    sql, w, extra = res
                
                result['warnings'].extend(w)
                
                # 处理约束
                cons_lines = []
                fk_lines = []
                
                # 处理提取的约束
                for u in constraints.get("unique", []):
                    if u.get("columns"):
                        name = u.get("name") or f"uq_{table_name}_{'_'.join(u['columns'])}"
                        cols = ", ".join(f'"{c}"' for c in u["columns"])
                        cons_lines.append(f'CREATE UNIQUE INDEX {name} ON "{table_name}" ({cols});')
                
                for idx in constraints.get("indexes", []):
                    if idx.get("columns"):
                        name = idx.get("name") or f"idx_{table_name}_{'_'.join(idx['columns'])}"
                        cols = ", ".join(f'"{c}"' for c in idx["columns"])
                        cons_lines.append(f'CREATE INDEX {name} ON "{table_name}" ({cols});')
                
                for fk in constraints.get("foreign_keys", []):
                    if fk.get("columns") and fk.get("ref_table") and fk.get("ref_columns"):
                        name = fk.get("name") or f"fk_{table_name}_{'_'.join(fk['columns'])}_to_{fk['ref_table']}"
                        cols = ", ".join(f'"{c}"' for c in fk["columns"])
                        ref_cols = ", ".join(f'"{c}"' for c in fk["ref_columns"])
                        fk_lines.append(
                            f'ALTER TABLE "{table_name}" ADD CONSTRAINT {name} FOREIGN KEY ({cols}) REFERENCES "{fk["ref_table"]}" ({ref_cols});'
                        )
                
                # 分离SEQUENCE, COMMENT ON等
                sequences = [x for x in extra if x.strip().upper().startswith("CREATE SEQUENCE")]
                comments = [x for x in extra if x.strip().upper().startswith("COMMENT ON")]
                
                # 构建DDL SQL
                ddl_parts = []
                for seq in sequences:
                    ddl_parts.append(seq.rstrip("; ") + ";")
                ddl_parts.append(sql.rstrip("; ") + ";")
                for comment in comments:
                    ddl_parts.append(comment.rstrip("; ") + ";")
                
                result['ddl_sql'] = '\n'.join(ddl_parts)
                result['constraints_sql'] = '\n'.join(cons_lines + fk_lines)
        
        except ParseError as e:
            result['errors'].append(f"Parse error at offset {offset_start}: {str(e)}")
        except Exception as e:
            result['errors'].append(f"Error processing DDL at offset {offset_start}: {str(e)}")
    
    except Exception as e:
        result['errors'].append(f"Error reading file chunk [{offset_start}:{offset_end}]: {str(e)}")
    
    return result


def process_ddl_parallel(
    file_path: Path,
    boundaries: List[TableBoundary],
    read_dialect: str = "mysql",
    max_workers: int = 12
) -> Tuple[Dict[str, str], Dict[str, Dict[str, Any]], List[str], List[str]]:
    """
    并行处理所有CREATE TABLE语句
    
    Args:
        file_path: SQL文件路径
        boundaries: CREATE TABLE边界列表
        read_dialect: SQL方言
        max_workers: 最大worker数量
    
    Returns:
        (ddl_results, table_schemas, all_warnings, all_errors)
        - ddl_results: {table_name: ddl_sql}
        - table_schemas: {table_name_lower: schema_dict}
        - all_warnings: 所有警告
        - all_errors: 所有错误
    """
    logger.info(f"Starting parallel DDL processing: {len(boundaries)} CREATE TABLE statements, {max_workers} workers")
    
    # 准备worker参数
    worker_args = [
        (b.offset_start, b.offset_end, file_path, read_dialect)
        for b in boundaries
    ]
    
    # 使用ProcessPoolExecutor进行并行处理
    ddl_results: Dict[str, str] = {}
    table_schemas: Dict[str, Dict[str, Any]] = {}
    all_warnings: List[str] = []
    all_errors: List[str] = []
    
    start_time = time.time()
    
    with ProcessPoolExecutor(max_workers=max_workers) as executor:
        results = list(executor.map(process_ddl_chunk_worker, worker_args))
    
    # 收集结果
    for result in results:
        if result['errors']:
            all_errors.extend(result['errors'])
        
        if result['warnings']:
            all_warnings.extend(result['warnings'])
        
        if result['table_name']:
            table_name = result['table_name']
            table_name_lower = table_name.lower()
            
            if result['ddl_sql']:
                ddl_results[table_name] = result['ddl_sql']
            
            if result['table_schema']:
                table_schemas[table_name_lower] = result['table_schema']
    
    duration = time.time() - start_time
    logger.info(f"Parallel DDL processing completed in {duration:.2f} seconds")
    logger.info(f"  - Processed {len(ddl_results)} tables")
    logger.info(f"  - Average: {duration / len(boundaries):.3f} seconds per table" if boundaries else "")
    
    return ddl_results, table_schemas, all_warnings, all_errors

