import os
import pymysql
import time
import logging
from contextlib import contextmanager
from typing import Optional, Dict, Any
from .logger import FlinkLogger, log_database_operation


def get_starrocks_connection():
    """Create StarRocks connection from env vars with safe defaults.

    Environment variables:
      STARROCKS_HOST, STARROCKS_PORT, STARROCKS_USER, STARROCKS_PASSWORD
    """
    config = {
        "host": os.environ.get("STARROCKS_HOST", "10.1.1.65"),
        "port": int(os.environ.get("STARROCKS_PORT", "9030")),
        "user": os.environ.get("STARROCKS_USER", "zhangjie"),
        "password": os.environ.get("STARROCKS_PASSWORD", "X9DM4cW5&6@B"),
        "charset": "utf8mb4",
        "cursorclass": pymysql.cursors.DictCursor,
        "autocommit": False,
    }
    
    # 添加重试逻辑（与 MySQL 保持一致）
    max_retries = 3
    connection = None
    last_error = None
    
    for retry in range(max_retries):
        start_time = time.time()
        try:
            if retry > 0:
                logger = logging.getLogger('flink.business')
                logger.info(f"🔄 重试StarRocks连接 (尝试{retry+1}/{max_retries})")
            
            connection = pymysql.connect(**config)
            duration = time.time() - start_time
            
            # 记录StarRocks连接成功日志
            logger = logging.getLogger('flink.business')
            logger.info(
                f"StarRocks connection established | "
                f"Host: {config['host']}:{config['port']} | "
                f"User: {config['user']} | "
                f"Duration: {round(duration * 1000, 2)}ms"
            )
            
            return connection
            
        except Exception as e:
            duration = time.time() - start_time
            last_error = e
            
            # 记录StarRocks连接失败日志
            FlinkLogger.log_error(
                error=e,
                context="StarRocks Connection",
                extra_data={
                    'host': config['host'],
                    'port': config['port'],
                    'user': config['user'],
                    'duration_ms': round(duration * 1000, 2),
                    'retry': retry + 1,
                    'max_retries': max_retries
                }
            )
            
            logger = logging.getLogger('flink.error')
            logger.error(
                f"StarRocks connection failed (尝试{retry+1}/{max_retries}) | "
                f"Host: {config['host']}:{config['port']} | "
                f"User: {config['user']} | "
                f"Duration: {round(duration * 1000, 2)}ms | "
                f"Error: {str(e)}"
            )
            
            if retry < max_retries - 1:
                # 递增等待：第1次失败等30秒，第2次失败等60秒
                wait_time = 30 if retry == 0 else 60
                logger.info(f"⏰ 等待{wait_time}秒后重试...")
                time.sleep(wait_time)
            else:
                # 3次都失败了
                logger.error(f"❌ StarRocks连接重试{max_retries}次均失败")
    
    # 最终失败，抛出最后的异常
    raise last_error


@contextmanager
def get_starrocks_cursor(autocommit: bool = False):
    """获取StarRocks数据库游标的上下文管理器"""
    connection = None
    cursor = None
    start_time = time.time()
    
    try:
        connection = get_starrocks_connection()
        cursor = connection.cursor()
        
        logger = logging.getLogger('flink.business')
        logger.debug("StarRocks cursor acquired")
        
        yield cursor
        
        if autocommit:
            connection.commit()
            logger.debug("StarRocks transaction committed")
            
        duration = time.time() - start_time
        logger.debug(f"StarRocks operation completed in {round(duration * 1000, 2)}ms")
        
    except Exception as e:
        if connection:
            connection.rollback()
            logger = logging.getLogger('flink.error')
            logger.error("StarRocks transaction rolled back due to error")
        
        duration = time.time() - start_time
        FlinkLogger.log_error(
            error=e,
            context="StarRocks Cursor Operation",
            extra_data={'duration_ms': round(duration * 1000, 2)}
        )
        raise
        
    finally:
        if cursor:
            cursor.close()
        if connection:
            connection.close()
            logger = logging.getLogger('flink.business')
            logger.debug("StarRocks connection closed")


class StarRocksLogger:
    """StarRocks数据库操作日志记录器"""
    
    @staticmethod
    @log_database_operation("execute_starrocks_query")
    def execute_query(sql: str, params: tuple = None, fetch_one: bool = False, 
                     fetch_all: bool = True) -> Optional[Dict[Any, Any]]:
        """执行StarRocks查询并返回结果（带重试机制）"""
        max_retries = 3
        last_error = None
        
        for retry in range(max_retries):
            try:
                if retry > 0:
                    logger = logging.getLogger('flink.business')
                    logger.info(f"🔄 重试StarRocks查询 (尝试{retry+1}/{max_retries})")
                
                with get_starrocks_cursor() as cursor:
                    cursor.execute(sql, params or ())
                    
                    if fetch_one:
                        result = cursor.fetchone()
                    elif fetch_all:
                        result = cursor.fetchall()
                    else:
                        result = None
                        
                    # 记录查询日志
                    logger = logging.getLogger('flink.business')
                    logger.debug(
                        f"StarRocks Query executed | "
                        f"Rows affected: {cursor.rowcount} | "
                        f"SQL: {sql[:100]}{'...' if len(sql) > 100 else ''}"
                    )
                    
                    return result
                    
            except Exception as e:
                last_error = e
                logger = logging.getLogger('flink.error')
                logger.warning(f"⚠️ StarRocks查询失败 (尝试{retry+1}/{max_retries}): {e}")
                
                if retry < max_retries - 1:
                    # 递增等待：第1次失败等30秒，第2次失败等60秒
                    wait_time = 30 if retry == 0 else 60
                    logger.info(f"⏰ 等待{wait_time}秒后重试...")
                    time.sleep(wait_time)
                else:
                    # 3次都失败了
                    logger.error(f"❌ StarRocks查询重试{max_retries}次均失败")
        
        # 最终失败，抛出最后的异常
        raise last_error
    
    @staticmethod
    @log_database_operation("execute_starrocks_update")
    def execute_update(sql: str, params: tuple = None) -> int:
        """执行StarRocks更新操作并返回影响的行数"""
        with get_starrocks_cursor(autocommit=True) as cursor:
            cursor.execute(sql, params or ())
            affected_rows = cursor.rowcount
            
            # 记录更新日志
            logger = logging.getLogger('flink.business')
            logger.info(
                f"StarRocks Update executed | "
                f"Rows affected: {affected_rows} | "
                f"SQL: {sql[:100]}{'...' if len(sql) > 100 else ''}"
            )
            
            return affected_rows
    
    @staticmethod
    @log_database_operation("execute_starrocks_batch")
    def execute_batch(sql: str, params_list: list) -> int:
        """批量执行StarRocks SQL操作"""
        total_affected = 0
        
        with get_starrocks_cursor(autocommit=True) as cursor:
            for params in params_list:
                cursor.execute(sql, params)
                total_affected += cursor.rowcount
            
            # 记录批量操作日志
            logger = logging.getLogger('flink.business')
            logger.info(
                f"StarRocks Batch executed | "
                f"Batch size: {len(params_list)} | "
                f"Total rows affected: {total_affected} | "
                f"SQL: {sql[:100]}{'...' if len(sql) > 100 else ''}"
            )
            
            return total_affected


