# -*- coding: utf - 8 -*-
"""
MySQL SQL文件解析器

使用sqlparse库解析MySQL导出的SQL文件，识别并分类不同类型的SQL语句。
"""

import re
import logging
from typing import List, Dict, Any, Optional
try:
    from dataclasses import dataclass
except ImportError:
    # Python < 3.7 fallback
    def dataclass(cls):
        return cls
from enum import Enum

import sqlparse
from sqlparse.sql import Statement, Token
# from sqlparse.tokens import Keyword, Name, Punctuation  # noqa: F401


class StatementType(Enum):
    """SQL语句类型枚举"""
    CREATE_DATABASE = "CREATE_DATABASE"
    DROP_DATABASE = "DROP_DATABASE"
    USE_DATABASE = "USE_DATABASE"
    CREATE_TABLE = "CREATE_TABLE"
    DROP_TABLE = "DROP_TABLE"
    INSERT_INTO = "INSERT_INTO"
    CREATE_INDEX = "CREATE_INDEX"
    CREATE_SEQUENCE = "CREATE_SEQUENCE"
    COMMENT = "COMMENT"
    UNKNOWN = "UNKNOWN"


@dataclass
class ParsedStatement:
    """解析后的SQL语句"""
    statement_type: StatementType
    original_sql: str
    parsed_sql: Statement
    table_name: Optional[str] = None
    database_name: Optional[str] = None
    columns: Optional[List[Dict[str, Any]]] = None
    indexes: Optional[List[Dict[str, Any]]] = None
    constraints: Optional[List[Dict[str, Any]]] = None


class MySQLParser:
    """MySQL SQL文件解析器"""

    def __init__(self):
        self.logger = logging.getLogger(__name__)
        self.statements: List[ParsedStatement] = []

    def parse_file(self, file_path: str, encoding: str = 'utf-8') -> List[ParsedStatement]:
        """
        解析MySQL SQL文件 - 使用流式处理减少内存占用

        Args:
            file_path: SQL文件路径
            encoding: 文件编码

        Returns:
            解析后的语句列表
        """
        self.logger.info(f"开始解析SQL文件: {file_path}")

        try:
            # 使用流式处理大文件
            import os
            file_size = os.path.getsize(file_path)
            if file_size > 100 * 1024 * 1024:  # 100MB以上使用流式处理
                return self._parse_file_streaming(file_path, encoding)
            else:
                with open(file_path, 'r', encoding=encoding) as f:
                    content = f.read()
                return self.parse_content(content)
        except Exception as e:
            self.logger.error(f"读取文件失败: {e}")
            raise

    def _parse_file_streaming(self, file_path: str, encoding: str = 'utf-8') -> List[ParsedStatement]:
        """
        流式处理大文件，减少内存占用
        """
        self.logger.info(f"使用流式处理解析大文件: {file_path}")
        
        buffer = ""
        statement_buffer = ""
        paren_count = 0
        
        with open(file_path, 'r', encoding=encoding, buffering=8192) as f:
            while True:
                chunk = f.read(8192)  # 8KB chunks
                if not chunk:
                    break
                    
                buffer += chunk
                
                # 处理缓冲区中的完整行
                while '\n' in buffer:
                    line, buffer = buffer.split('\n', 1)
                    line = line.strip()
                    
                    if not line or line.startswith('--'):
                        continue
                        
                    statement_buffer += line + ' '
                    
                    # 简单的SQL语句边界检测
                    paren_count += line.count('(') - line.count(')')
                    
                    if line.endswith(';') and paren_count <= 0:
                        # 语句结束
                        stmt_str = statement_buffer.strip()
                        if stmt_str:
                            try:
                                parsed_stmt = self._parse_single_statement(stmt_str)
                                if parsed_stmt:
                                    self.statements.append(parsed_stmt)
                            except Exception as e:
                                self.logger.warning(f"流式解析语句时出错: {e}")
                        
                        statement_buffer = ""
                        paren_count = 0
                        
                        # 定期清理内存
                        if len(self.statements) % 1000 == 0:
                            import gc
                            gc.collect()
        
        # 处理最后的语句
        if statement_buffer.strip():
            try:
                parsed_stmt = self._parse_single_statement(statement_buffer.strip())
                if parsed_stmt:
                    self.statements.append(parsed_stmt)
            except Exception as e:
                self.logger.warning(f"解析最后语句时出错: {e}")
        
        self.logger.info(f"流式解析完成，共处理{len(self.statements)}个语句")
        return self.statements

    def parse_content(self, content: str) -> List[ParsedStatement]:
        """
        解析SQL内容

        Args:
            content: SQL文件内容

        Returns:
            解析后的语句列表
        """
        self.statements = []

        # 预处理：移除注释和空行
        content = self._preprocess_content(content)

        # 分割SQL语句
        raw_statements = sqlparse.split(content)

        for i, stmt_str in enumerate(raw_statements):
            if not stmt_str.strip():
                continue

            try:
                # 解析单个语句
                parsed_stmt = self._parse_single_statement(stmt_str.strip())
                if parsed_stmt:
                    self.statements.append(parsed_stmt)

            except Exception as e:
                self.logger.warning(f"解析第{i + 1}个语句时出错: {e}")
                self.logger.debug(f"问题语句: {stmt_str[:100]}...")

        self.logger.info(f"解析完成，共处理{len(self.statements)}个语句")
        return self.statements

    def _preprocess_content(self, content: str) -> str:
        """预处理SQL内容"""
        # 移除MySQL特有的注释
        content = re.sub(r'/\*.*?\*/', '', content, flags=re.DOTALL)
        content = re.sub(r'--.*?\n', '\n', content)

        # 移除SET语句（MySQL特有）
        content = re.sub(r'SET\s+.*?;', '', content, flags=re.IGNORECASE)

        # 移除LOCK / UNLOCK TABLES语句
        content = re.sub(r'LOCK\s+TABLES\s+.*?;', '', content, flags=re.IGNORECASE)
        content = re.sub(r'UNLOCK\s+TABLES\s*;', '', content, flags=re.IGNORECASE)

        return content

    def _parse_single_statement(self, stmt_str: str) -> Optional[ParsedStatement]:
        """解析单个SQL语句"""
        parsed = sqlparse.parse(stmt_str)[0]
        stmt_type = self._identify_statement_type(parsed)

        if stmt_type == StatementType.UNKNOWN:
            self.logger.debug(f"未识别的语句类型: {stmt_str[:50]}...")
            return None

        parsed_stmt = ParsedStatement(
            statement_type=stmt_type,
            original_sql=stmt_str,
            parsed_sql=parsed
        )

        # 根据语句类型进行详细解析
        if stmt_type == StatementType.CREATE_TABLE:
            self._parse_create_table(parsed_stmt)
        elif stmt_type == StatementType.INSERT_INTO:
            self._parse_insert_into(parsed_stmt)
        elif stmt_type in [StatementType.CREATE_DATABASE, StatementType.DROP_DATABASE]:
            self._parse_database_statement(parsed_stmt)
        elif stmt_type == StatementType.CREATE_INDEX:
            self._parse_create_index(parsed_stmt)

        return parsed_stmt

    def _identify_statement_type(self, parsed: Statement) -> StatementType:
        """识别SQL语句类型"""
        tokens = [token for token in parsed.flatten() if not token.is_whitespace]

        if not tokens:
            return StatementType.UNKNOWN

        _first_token = tokens[0].ttype
        first_value = tokens[0].value.upper()

        if first_value == 'CREATE':
            if len(tokens) > 1:
                second_value = tokens[1].value.upper()
                if second_value == 'DATABASE':
                    return StatementType.CREATE_DATABASE
                elif second_value == 'TABLE':
                    return StatementType.CREATE_TABLE
                elif second_value in ['INDEX', 'UNIQUE']:
                    return StatementType.CREATE_INDEX

        elif first_value == 'DROP':
            if len(tokens) > 1:
                second_value = tokens[1].value.upper()
                if second_value == 'DATABASE':
                    return StatementType.DROP_DATABASE
                elif second_value == 'TABLE':
                    return StatementType.DROP_TABLE

        elif first_value == 'INSERT':
            return StatementType.INSERT_INTO

        elif first_value == 'USE':
            return StatementType.USE_DATABASE

        return StatementType.UNKNOWN

    def _parse_create_table(self, stmt: ParsedStatement):
        """解析CREATE TABLE语句"""
        tokens = list(stmt.parsed_sql.flatten())

        # 提取表名
        table_name = None
        for i, token in enumerate(tokens):
            if token.value.upper() == 'TABLE':
                # 下一个非空白token应该是表名
                for j in range(i + 1, len(tokens)):
                    if not tokens[j].is_whitespace:
                        table_name = tokens[j].value.strip('`"[]')
                        break
                break

        stmt.table_name = table_name

        # 解析列定义
        stmt.columns = self._extract_columns(stmt.original_sql)

        # 解析索引和约束
        stmt.indexes = self._extract_indexes(stmt.original_sql)
        stmt.constraints = self._extract_constraints(stmt.original_sql)

    def _parse_insert_into(self, stmt: ParsedStatement):
        """解析INSERT INTO语句"""
        tokens = list(stmt.parsed_sql.flatten())

        # 提取表名
        for i, token in enumerate(tokens):
            if token.value.upper() == 'INTO':
                for j in range(i + 1, len(tokens)):
                    if not tokens[j].is_whitespace:
                        stmt.table_name = tokens[j].value.strip('`"[]')
                        break
                break

    def _parse_database_statement(self, stmt: ParsedStatement):
        """解析数据库相关语句"""
        tokens = list(stmt.parsed_sql.flatten())

        for i, token in enumerate(tokens):
            if token.value.upper() in ['DATABASE', 'SCHEMA']:
                for j in range(i + 1, len(tokens)):
                    if not tokens[j].is_whitespace and tokens[j].value.upper() not in ['IF', 'NOT', 'EXISTS']:
                        stmt.database_name = tokens[j].value.strip('`"[]')
                        break
                break

    def _parse_create_index(self, stmt: ParsedStatement):
        """解析CREATE INDEX语句"""
        # 提取索引相关信息
        match = re.search(r'ON\s+(\w+)', stmt.original_sql, re.IGNORECASE)
        if match:
            stmt.table_name = match.group(1).strip('`"[]')

    def _extract_columns(self, sql: str) -> List[Dict[str, Any]]:
        """从CREATE TABLE语句中提取列定义"""
        columns = []

        # 使用正则表达式提取列定义部分
        match = re.search(r'\((.*)\)', sql, re.DOTALL)
        if not match:
            return columns

        columns_part = match.group(1)

        # 简单的列解析（可以根据需要增强）
        lines = columns_part.split(',')
        for line in lines:
            line = line.strip()
            if line and not line.upper().startswith(('PRIMARY', 'KEY', 'INDEX', 'UNIQUE', 'FOREIGN')):
                parts = line.split()
                if len(parts) >= 2:
                    col_name = parts[0].strip('`"[]')
                    col_type = parts[1].upper()

                    column_info = {
                        'name': col_name,
                        'type': col_type,
                        'nullable': 'NOT NULL' not in line.upper(),
                        'auto_increment': 'AUTO_INCREMENT' in line.upper(),
                        'default': self._extract_default_value(line)
                    }
                    columns.append(column_info)

        return columns

    def _extract_indexes(self, sql: str) -> List[Dict[str, Any]]:
        """提取索引定义"""
        indexes = []

        # 提取PRIMARY KEY
        primary_match = re.search(r'PRIMARY\s + KEY\s*\((.*?)\)', sql, re.IGNORECASE)
        if primary_match:
            indexes.append({
                'type': 'PRIMARY',
                'columns': [col.strip().strip('`"[]') for col in primary_match.group(1).split(',')]
            })

        # 提取其他索引
        index_matches = re.finditer(r'(UNIQUE\s+)?KEY\s+(\w+)\s*\((.*?)\)', sql, re.IGNORECASE)
        for match in index_matches:
            indexes.append({
                'type': 'UNIQUE' if match.group(1) else 'INDEX',
                'name': match.group(2).strip('`"[]'),
                'columns': [col.strip().strip('`"[]') for col in match.group(3).split(',')]
            })

        return indexes

    def _extract_constraints(self, sql: str) -> List[Dict[str, Any]]:
        """提取约束定义"""
        constraints = []

        # 提取外键约束
        fk_matches = re.finditer(
            r'FOREIGN\s + KEY\s*\((.*?)\)\s + REFERENCES\s+(\w+)\s*\((.*?)\)',
            sql, re.IGNORECASE
        )
        for match in fk_matches:
            constraints.append({
                'type': 'FOREIGN_KEY',
                'columns': [col.strip().strip('`"[]') for col in match.group(1).split(',')],
                'ref_table': match.group(2).strip('`"[]'),
                'ref_columns': [col.strip().strip('`"[]') for col in match.group(3).split(',')]
            })

        return constraints

    def _extract_default_value(self, column_def: str) -> Optional[str]:
        """提取列的默认值"""
        match = re.search(r'DEFAULT\s+([^,\s]+)', column_def, re.IGNORECASE)
        if match:
            return match.group(1).strip("'\"")
        return None

    def get_statements_by_type(self, stmt_type: StatementType) -> List[ParsedStatement]:
        """根据类型获取语句"""
        return [stmt for stmt in self.statements if stmt.statement_type == stmt_type]

    def get_table_names(self) -> List[str]:
        """获取所有表名"""
        table_names = []
        for stmt in self.statements:
            if stmt.table_name and stmt.table_name not in table_names:
                table_names.append(stmt.table_name)
        return table_names

    def get_database_names(self) -> List[str]:
        """获取所有数据库名"""
        db_names = []
        for stmt in self.statements:
            if stmt.database_name and stmt.database_name not in db_names:
                db_names.append(stmt.database_name)
        return db_names
