import os
import pymysql
import time
import logging
import socket
from contextlib import contextmanager
from typing import Optional, Dict, Any, Tuple
from .logger import FlinkLogger, log_database_operation


def is_database_connection_error(error: Exception) -> Tuple[bool, str]:
    """检查异常是否为数据库连接错误
    
    Returns:
        (is_connection_error, error_message): 
        - is_connection_error: 是否为连接错误
        - error_message: 友好的错误消息
    """
    error_msg = str(error)
    error_code = getattr(error, 'args', [None])[0] if hasattr(error, 'args') and error.args else None
    
    # 检查是否为连接超时错误
    is_timeout = (
        "timed out" in error_msg.lower() or 
        "timeout" in error_msg.lower() or
        isinstance(error, (socket.timeout, TimeoutError))
    )
    
    # 检查是否为连接错误
    is_connection_error = (
        "can't connect" in error_msg.lower() or
        "connection refused" in error_msg.lower() or
        "connection reset" in error_msg.lower() or
        isinstance(error, (OSError, ConnectionError)) or
        (isinstance(error, pymysql.err.OperationalError) and error_code == 2003)
    )
    
    if is_timeout or is_connection_error:
        if is_timeout:
            friendly_msg = "数据库连接超时，请检查数据库服务是否可用"
        else:
            friendly_msg = "无法连接到数据库，请检查数据库配置和网络连接"
        return True, friendly_msg
    
    return False, error_msg


def get_db_connection():
    """Create DB connection from env vars with safe defaults.
    
    对于连接超时错误，快速失败（5秒内），避免长时间等待。
    
    Environment variables:
      DB_HOST, DB_PORT, DB_USER, DB_PASSWORD, DB_NAME
    """
    # 保存原来的socket默认超时设置
    old_timeout = socket.getdefaulttimeout()
    
    # 设置socket默认超时为5秒，确保快速失败
    # 注意：这会影响当前线程中所有新创建的socket连接
    socket.setdefaulttimeout(5.0)
    
    config = {
        "host": os.environ.get("DB_HOST", "10.1.1.26"),
        "port": int(os.environ.get("DB_PORT", "3306")),
        "user": os.environ.get("DB_USER", "flink_cluster_monitor"),
        "password": os.environ.get("DB_PASSWORD", "nXhkTJYJ7D5rhGT5"),
        "database": os.environ.get("DB_NAME", "flink_cluster_monitor"),
        "charset": "utf8mb4",
        "cursorclass": pymysql.cursors.DictCursor,
        "autocommit": False,
        "connect_timeout": 5,   # pymysql连接超时5秒
        "read_timeout": 30,     # 读取超时30秒
        "write_timeout": 30,    # 写入超时30秒
    }
    
    start_time = time.time()
    connection = None
    
    try:
        connection = pymysql.connect(**config)
        duration = time.time() - start_time
        
        # 记录数据库连接成功日志
        logger = logging.getLogger('flink.business')
        logger.info(
            f"Database connection established | "
            f"Host: {config['host']}:{config['port']} | "
            f"Database: {config['database']} | "
            f"Duration: {round(duration * 1000, 2)}ms"
        )
        
        return connection
        
    except (pymysql.err.OperationalError, socket.timeout, TimeoutError, OSError) as e:
        # 对于连接超时、网络错误等，快速失败，不重试
        duration = time.time() - start_time
        is_conn_error, friendly_msg = is_database_connection_error(e)
        
        logger = logging.getLogger('flink.error')
        error_type = '连接错误' if is_conn_error else '操作错误'
        logger.error(
            f"Database connection failed (快速失败) | "
            f"Host: {config['host']}:{config['port']} | "
            f"Database: {config['database']} | "
            f"Duration: {round(duration * 1000, 2)}ms | "
            f"Error: {str(e)} | "
            f"Type: {error_type} | "
            f"Message: {friendly_msg}"
        )
        
        # 重新抛出异常，让调用方处理
        raise
        
    except Exception as e:
        # 其他异常也快速失败
        duration = time.time() - start_time
        logger = logging.getLogger('flink.error')
        logger.error(
            f"Database connection failed (未知错误) | "
            f"Host: {config['host']}:{config['port']} | "
            f"Database: {config['database']} | "
            f"Duration: {round(duration * 1000, 2)}ms | "
            f"Error: {str(e)} | "
            f"ErrorType: {type(e).__name__}"
        )
        raise
    
    finally:
        # 恢复原来的socket默认超时设置
        socket.setdefaulttimeout(old_timeout)


@contextmanager
def get_db_cursor(autocommit: bool = False):
    """获取数据库游标的上下文管理器"""
    connection = None
    cursor = None
    start_time = time.time()
    
    try:
        connection = get_db_connection()
        cursor = connection.cursor()
        
        logger = logging.getLogger('flink.business')
        logger.debug("Database cursor acquired")
        
        yield cursor
        
        if autocommit:
            connection.commit()
            logger.debug("Database transaction committed")
            
        duration = time.time() - start_time
        logger.debug(f"Database operation completed in {round(duration * 1000, 2)}ms")
        
    except Exception as e:
        if connection:
            connection.rollback()
            error_logger = logging.getLogger('flink.error')
            error_logger.error("Database transaction rolled back due to error")
        
        duration = time.time() - start_time
        error_logger = logging.getLogger('flink.error')
        error_logger.error(f"Database cursor operation failed: {str(e)}, duration: {round(duration * 1000, 2)}ms")
        raise
        
    finally:
        if cursor:
            cursor.close()
        if connection:
            connection.close()
            logger = logging.getLogger('flink.business')
            logger.debug("Database connection closed")


class DatabaseLogger:
    """数据库操作日志记录器"""
    
    @staticmethod
    @log_database_operation("execute_query")
    def execute_query(sql: str, params: tuple = None, fetch_one: bool = False, 
                     fetch_all: bool = True) -> Optional[Dict[Any, Any]]:
        """执行查询并返回结果（带重试机制）
        
        对于连接错误（超时、连接失败等），不重试，直接失败。
        对于其他错误（如锁超时、死锁等），会重试。
        """
        max_retries = 3
        last_error = None
        
        for retry in range(max_retries):
            try:
                if retry > 0:
                    logger = logging.getLogger('flink.business')
                    logger.info(f"🔄 重试MySQL查询 (尝试{retry+1}/{max_retries})")
                
                with get_db_cursor() as cursor:
                    cursor.execute(sql, params or ())
                    
                    if fetch_one:
                        result = cursor.fetchone()
                    elif fetch_all:
                        result = cursor.fetchall()
                    else:
                        result = None
                        
                    # 记录查询日志
                    logger = logging.getLogger('flink.business')
                    logger.debug(
                        f"SQL Query executed | "
                        f"Rows affected: {cursor.rowcount} | "
                        f"SQL: {sql[:100]}{'...' if len(sql) > 100 else ''}"
                    )
                    
                    return result
                    
            except (pymysql.err.OperationalError, socket.timeout, TimeoutError, OSError) as e:
                # 连接错误、超时错误等，不重试，直接失败
                error_msg = str(e)
                error_code = getattr(e, 'args', [None])[0] if hasattr(e, 'args') and e.args else None
                
                # 判断是否为连接错误（不应该重试）
                is_connection_error = (
                    "timed out" in error_msg.lower() or
                    "timeout" in error_msg.lower() or
                    "can't connect" in error_msg.lower() or
                    "connection refused" in error_msg.lower() or
                    "connection reset" in error_msg.lower() or
                    isinstance(e, (socket.timeout, TimeoutError, OSError)) or
                    (isinstance(e, pymysql.err.OperationalError) and error_code == 2003)
                )
                
                if is_connection_error:
                    # 连接错误，直接抛出，不重试
                    logger = logging.getLogger('flink.error')
                    logger.error(f"❌ MySQL查询失败（连接错误，不重试）: {e}")
                    raise
                
                # 其他操作错误（如锁超时等），可以重试
                last_error = e
                logger = logging.getLogger('flink.error')
                logger.warning(f"⚠️ MySQL查询失败 (尝试{retry+1}/{max_retries}): {e}")
                
                if retry < max_retries - 1:
                    # 递增等待：第1次失败等2秒，第2次失败等5秒
                    wait_time = 2 if retry == 0 else 5
                    logger.info(f"⏰ 等待{wait_time}秒后重试...")
                    time.sleep(wait_time)
                else:
                    # 3次都失败了
                    logger.error(f"❌ MySQL查询重试{max_retries}次均失败")
                    raise
                    
            except Exception as e:
                # 其他异常，根据错误类型决定是否重试
                last_error = e
                logger = logging.getLogger('flink.error')
                logger.warning(f"⚠️ MySQL查询失败 (尝试{retry+1}/{max_retries}): {e}")
                
                # 对于严重错误（如语法错误），不重试
                if isinstance(e, (pymysql.err.ProgrammingError, pymysql.err.DataError)):
                    logger.error(f"❌ MySQL查询语法错误，不重试: {e}")
                    raise
                
                if retry < max_retries - 1:
                    # 递增等待：第1次失败等2秒，第2次失败等5秒
                    wait_time = 2 if retry == 0 else 5
                    logger.info(f"⏰ 等待{wait_time}秒后重试...")
                    time.sleep(wait_time)
                else:
                    # 3次都失败了
                    logger.error(f"❌ MySQL查询重试{max_retries}次均失败")
                    raise
        
        # 最终失败，抛出最后的异常
        raise last_error
    
    @staticmethod
    @log_database_operation("execute_update")
    def execute_update(sql: str, params: tuple = None) -> int:
        """执行更新操作并返回影响的行数"""
        with get_db_cursor(autocommit=True) as cursor:
            cursor.execute(sql, params or ())
            affected_rows = cursor.rowcount
            
            # 记录更新日志
            logger = logging.getLogger('flink.business')
            logger.info(
                f"SQL Update executed | "
                f"Rows affected: {affected_rows} | "
                f"SQL: {sql[:100]}{'...' if len(sql) > 100 else ''}"
            )
            
            return affected_rows
    
    @staticmethod
    @log_database_operation("execute_batch")
    def execute_batch(sql: str, params_list: list) -> int:
        """批量执行SQL操作"""
        total_affected = 0
        
        with get_db_cursor(autocommit=True) as cursor:
            for params in params_list:
                cursor.execute(sql, params)
                total_affected += cursor.rowcount
            
            # 记录批量操作日志
            logger = logging.getLogger('flink.business')
            logger.info(
                f"SQL Batch executed | "
                f"Batch size: {len(params_list)} | "
                f"Total rows affected: {total_affected} | "
                f"SQL: {sql[:100]}{'...' if len(sql) > 100 else ''}"
            )
            
            return total_affected


