#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
DuckDB数据库引擎模块
处理数据库连接、表管理和查询执行
"""

import os
import time
import logging
from pathlib import Path
from typing import Dict, List, Tuple, Optional, Union, Any
import threading

import duckdb


class DBConnectionPool:
    """DuckDB连接池管理类"""
    
    def __init__(self, db_path: Optional[str] = None, max_connections: int = 5):
        """初始化连接池
        
        Args:
            db_path: 数据库文件路径，None表示使用内存数据库
            max_connections: 最大连接数量
        """
        self.db_path = db_path or ":memory:"
        self.max_connections = max_connections
        self._connections = []
        self._lock = threading.Lock()
        self._used_connections = {}  # 线程ID -> 连接对象
        self.logger = logging.getLogger("db_engine")
    
    def get_connection(self) -> duckdb.DuckDBPyConnection:
        """获取一个数据库连接
        
        Returns:
            DuckDB连接对象
        """
        thread_id = threading.get_ident()
        
        # 检查当前线程是否已有分配的连接
        if thread_id in self._used_connections:
            return self._used_connections[thread_id]
        
        with self._lock:
            # 尝试复用已有连接
            if self._connections:
                conn = self._connections.pop()
                self._used_connections[thread_id] = conn
                return conn
            
            # 如果已达到最大连接数，等待并重试
            if len(self._used_connections) >= self.max_connections:
                self.logger.warning("已达到最大连接数 %d，等待连接释放", self.max_connections)
                # 实际应用中应该使用条件变量等待
            
            # 创建新连接
            conn = duckdb.connect(self.db_path)
            self._used_connections[thread_id] = conn
            return conn
    
    def release_connection(self, conn: Optional[duckdb.DuckDBPyConnection] = None):
        """释放当前线程的连接回连接池
        
        Args:
            conn: 可选的连接对象，默认释放当前线程的连接
        """
        thread_id = threading.get_ident()
        
        with self._lock:
            if not conn and thread_id in self._used_connections:
                conn = self._used_connections[thread_id]
                del self._used_connections[thread_id]
                
            if conn and conn not in self._connections:
                self._connections.append(conn)
    
    def close_all(self):
        """关闭所有连接"""
        with self._lock:
            # 关闭已使用的连接
            for conn in self._used_connections.values():
                conn.close()
            self._used_connections.clear()
            
            # 关闭连接池中的连接
            for conn in self._connections:
                conn.close()
            self._connections.clear()


class DBEngine:
    """DuckDB数据库引擎类"""
    
    def __init__(self):
        """初始化数据库引擎"""
        # 优先使用环境变量中的数据库路径
        db_path = os.environ.get("DB_PATH")
        if not db_path:
            db_path = str(Path.home() / ".filedb.duckdb")
            
        self.db_path = db_path
        self.connection_pool = DBConnectionPool(db_path)
        self.logger = logging.getLogger("db_engine")
        
        # 初始化数据库
        self._init_database()
    
    def _init_database(self):
        """初始化数据库结构"""
        conn = self.connection_pool.get_connection()
        try:
            # 创建元数据表
            conn.execute("""
                CREATE TABLE IF NOT EXISTS metadata (
                    file_id VARCHAR PRIMARY KEY,
                    file_path VARCHAR,
                    file_type VARCHAR,
                    table_name VARCHAR, 
                    loaded_at TIMESTAMP,
                    last_accessed TIMESTAMP,
                    row_count INTEGER,
                    column_count INTEGER
                )
            """)
            
            # 检查并迁移查询历史表
            self._migrate_query_history_table(conn)
            
            self.logger.info("数据库初始化完成")
        except Exception as e:
            self.logger.error("数据库初始化失败: %s", str(e))
        finally:
            self.connection_pool.release_connection(conn)
    
    def _migrate_query_history_table(self, conn):
        """迁移查询历史表结构"""
        try:
            # 尝试检查 executed_at 列是否存在
            try:
                # 尝试查询 executed_at 列，如果不存在会抛出异常
                conn.execute("SELECT executed_at FROM query_history LIMIT 1").fetchone()
                self.logger.debug("query_history 表结构已是最新版本")
                return
            except Exception:
                # 列不存在或表不存在，需要迁移
                pass
            
            # 检查表是否存在
            try:
                conn.execute("SELECT COUNT(*) FROM query_history").fetchone()
                table_exists = True
                self.logger.info("检测到旧版本的 query_history 表，正在迁移...")
            except Exception:
                table_exists = False
                self.logger.info("创建新的 query_history 表")
            
            if table_exists:
                # 表存在但结构旧，需要迁移
                try:
                    # 备份旧数据
                    conn.execute("""
                        CREATE TABLE query_history_backup AS 
                        SELECT * FROM query_history
                    """)
                    
                    # 删除旧表
                    conn.execute("DROP TABLE query_history")
                    
                    # 创建新表
                    conn.execute("""
                        CREATE TABLE query_history (
                            id BIGINT PRIMARY KEY,
                            query_text TEXT,
                            executed_at TIMESTAMP,
                            duration_ms INTEGER,
                            row_count INTEGER,
                            status VARCHAR
                        )
                    """)
                    
                    # 迁移数据（为旧数据添加默认的 executed_at 值）
                    conn.execute("""
                        INSERT INTO query_history (id, query_text, executed_at, duration_ms, row_count, status)
                        SELECT 
                            id, 
                            query_text, 
                            CURRENT_TIMESTAMP as executed_at,
                            COALESCE(duration_ms, 0) as duration_ms,
                            COALESCE(row_count, 0) as row_count,
                            COALESCE(status, 'UNKNOWN') as status
                        FROM query_history_backup
                    """)
                    
                    # 删除备份表
                    conn.execute("DROP TABLE query_history_backup")
                    
                    self.logger.info("query_history 表迁移完成")
                except Exception as migrate_error:
                    self.logger.error(f"迁移过程中出错: {str(migrate_error)}")
                    # 如果迁移失败，删除可能的残留表并重新创建
                    try:
                        conn.execute("DROP TABLE IF EXISTS query_history")
                        conn.execute("DROP TABLE IF EXISTS query_history_backup")
                    except:
                        pass
                    # 创建新表
                    conn.execute("""
                        CREATE TABLE query_history (
                            id BIGINT PRIMARY KEY,
                            query_text TEXT,
                            executed_at TIMESTAMP,
                            duration_ms INTEGER,
                            row_count INTEGER,
                            status VARCHAR
                        )
                    """)
                    self.logger.info("重新创建了 query_history 表")
            else:
                # 表不存在，创建新表
                conn.execute("""
                    CREATE TABLE query_history (
                        id BIGINT PRIMARY KEY,
                        query_text TEXT,
                        executed_at TIMESTAMP,
                        duration_ms INTEGER,
                        row_count INTEGER,
                        status VARCHAR
                    )
                """)
                self.logger.info("创建了新的 query_history 表")
                
        except Exception as e:
            self.logger.error(f"迁移 query_history 表失败: {str(e)}")
            # 最后的备用方案：强制重新创建表
            try:
                conn.execute("DROP TABLE IF EXISTS query_history")
                conn.execute("""
                    CREATE TABLE query_history (
                        id BIGINT PRIMARY KEY,
                        query_text TEXT,
                        executed_at TIMESTAMP,
                        duration_ms INTEGER,
                        row_count INTEGER,
                        status VARCHAR
                    )
                """)
                self.logger.info("强制重新创建了 query_history 表")
            except Exception as create_error:
                self.logger.error(f"强制创建 query_history 表也失败: {str(create_error)}")
    
    def load_file(self, file_path: str, table_name: Optional[str] = None, 
                 file_type: Optional[str] = None, **options) -> Tuple[bool, str]:
        """加载文件到数据库中
        
        Args:
            file_path: 文件路径
            table_name: 表名，如果为None则使用文件名
            file_type: 文件类型，如果为None则根据扩展名判断
            options: 加载选项，如CSV分隔符等
            
        Returns:
            (成功标志, 消息)
        """
        path = Path(file_path)
        
        # 检查文件是否存在
        if not path.exists():
            return False, f"文件不存在: {file_path}"
        
        # 确定文件类型
        if not file_type:
            suffix = path.suffix.lower()
            if suffix == '.csv':
                file_type = 'csv'
            elif suffix in ('.parquet', '.pq'):
                file_type = 'parquet'
            else:
                return False, f"不支持的文件类型: {suffix}"
        
        # 确定表名
        if not table_name:
            table_name = path.stem.replace(' ', '_').replace('-', '_')
            
            # 确保表名不以数字开头（避免SQL解析错误）
            if table_name and table_name[0].isdigit():
                table_name = f"t_{table_name}"
        
        conn = self.connection_pool.get_connection()
        try:
            start_time = time.time()
            
            # 根据文件类型加载数据
            if file_type == 'csv':
                delimiter = options.get('delimiter', ',')
                header = options.get('header', True)
                header_option = 'true' if header else 'false'
                
                conn.execute(f"""
                    CREATE OR REPLACE TABLE "{table_name}" AS 
                    SELECT * FROM read_csv_auto('{file_path}', delim='{delimiter}', header={header_option})
                """)
            elif file_type == 'parquet':
                conn.execute(f"""
                    CREATE OR REPLACE TABLE "{table_name}" AS 
                    SELECT * FROM read_parquet('{file_path}')
                """)
            
            # 获取行数和列数
            result = conn.execute(f'SELECT COUNT(*) as row_count FROM "{table_name}"').fetchone()
            row_count = result[0] if result else 0
            
            result = conn.execute(f"""
                SELECT COUNT(*) as column_count 
                FROM pragma_table_info('"{table_name}"')
            """).fetchone()
            column_count = result[0] if result else 0
            
            # 更新元数据
            file_id = str(hash(file_path))
            now = time.strftime('%Y-%m-%d %H:%M:%S')
            
            conn.execute("""
                INSERT OR REPLACE INTO metadata 
                (file_id, file_path, file_type, table_name, loaded_at, last_accessed, row_count, column_count)
                VALUES (?, ?, ?, ?, ?, ?, ?, ?)
            """, [file_id, file_path, file_type, table_name, now, now, row_count, column_count])
            
            duration = time.time() - start_time
            self.logger.info(f"文件 {file_path} 加载完成，用时 {duration:.2f} 秒")
            return True, f"成功加载 {row_count} 行数据到表 {table_name}"
            
        except Exception as e:
            self.logger.error(f"加载文件失败: {str(e)}")
            return False, f"加载文件失败: {str(e)}"
        finally:
            self.connection_pool.release_connection(conn)
    
    def execute_query(self, query: str) -> Tuple[bool, Union[object, str], float]:
        """执行SQL查询
        
        Args:
            query: SQL查询语句
            
        Returns:
            (成功标志, 结果数据或错误消息, 执行时间)
        """
        conn = self.connection_pool.get_connection()
        try:
            start_time = time.time()
            
            # 执行查询并获取结果
            cursor = conn.execute(query)
            
            # 对于大数据集，不立即转换为DataFrame，而是返回cursor
            # 这样可以在UI层进行分页处理
            try:
                # 尝试获取结果的行数（如果可能）
                result = cursor.fetch_df()
                duration = time.time() - start_time
                
                # 记录查询历史
                query_id = int(time.time() * 1000)
                now = time.strftime('%Y-%m-%d %H:%M:%S')
                
                conn.execute("""
                    INSERT INTO query_history 
                    (id, query_text, executed_at, duration_ms, row_count, status)
                    VALUES (?, ?, ?, ?, ?, ?)
                """, [query_id, query, now, int(duration * 1000), len(result), 'SUCCESS'])
                
                return True, result, duration
                
            except Exception as fetch_error:
                # 如果获取结果失败，可能是内存不足
                self.logger.warning(f"获取查询结果时出现问题: {str(fetch_error)}")
                # 重新执行查询，但添加LIMIT来限制结果大小
                if 'LIMIT ' not in query.upper():
                    limited_query = query.strip()
                    if limited_query.endswith(';'):
                        limited_query = limited_query[:-1] + " LIMIT 10000;"
                    else:
                        limited_query = limited_query + " LIMIT 10000"
                    
                    self.logger.info("由于内存限制，自动添加 LIMIT 10000 到查询")
                    cursor = conn.execute(limited_query)
                    result = cursor.fetch_df()
                    duration = time.time() - start_time
                    
                    # 记录查询历史
                    query_id = int(time.time() * 1000)
                    now = time.strftime('%Y-%m-%d %H:%M:%S')
                    
                    conn.execute("""
                        INSERT INTO query_history 
                        (id, query_text, executed_at, duration_ms, row_count, status)
                        VALUES (?, ?, ?, ?, ?, ?)
                    """, [query_id, limited_query, now, int(duration * 1000), len(result), 'SUCCESS'])
                    
                    return True, result, duration
                else:
                    raise fetch_error
            
        except Exception as e:
            self.logger.error(f"执行查询失败: {str(e)}")
            # 记录失败的查询
            try:
                query_id = int(time.time() * 1000)
                now = time.strftime('%Y-%m-%d %H:%M:%S')
                conn.execute("""
                    INSERT INTO query_history 
                    (id, query_text, executed_at, duration_ms, row_count, status)
                    VALUES (?, ?, ?, ?, ?, ?)
                """, [query_id, query, now, 0, 0, 'ERROR'])
            except Exception as insert_error:
                self.logger.error(f"记录查询历史失败: {str(insert_error)}")
                
            return False, f"执行查询失败: {str(e)}", 0
        finally:
            self.connection_pool.release_connection(conn)
    
    def get_table_schema(self, table_name: str) -> List[Dict[str, Any]]:
        """获取表结构
        
        Args:
            table_name: 表名
            
        Returns:
            表结构列表
        """
        conn = self.connection_pool.get_connection()
        try:
            # 获取表结构
            result = conn.execute(f"PRAGMA table_info('\"{table_name}\"')").fetchall()
            
            schema = []
            for row in result:
                schema.append({
                    "cid": row[0],
                    "name": row[1],
                    "type": row[2],
                    "notnull": row[3],
                    "dflt_value": row[4],
                    "pk": row[5]
                })
                
            return schema
        except Exception as e:
            self.logger.error(f"获取表结构失败: {str(e)}")
            return []
        finally:
            self.connection_pool.release_connection(conn)
    
    def get_loaded_tables(self) -> List[Dict[str, Any]]:
        """获取已加载的表列表
        
        Returns:
            表元数据列表
        """
        conn = self.connection_pool.get_connection()
        try:
            # 获取所有表信息
            result = conn.execute("""
                SELECT * FROM metadata
                ORDER BY last_accessed DESC
            """).fetchall()
            
            tables = []
            for row in result:
                tables.append({
                    "file_id": row[0],
                    "file_path": row[1],
                    "file_type": row[2],
                    "table_name": row[3],
                    "loaded_at": row[4],
                    "last_accessed": row[5],
                    "row_count": row[6],
                    "column_count": row[7]
                })
                
            return tables
        except Exception as e:
            self.logger.error(f"获取已加载表失败: {str(e)}")
            return []
        finally:
            self.connection_pool.release_connection(conn)
    
    def drop_table(self, table_name: str) -> Tuple[bool, str]:
        """删除数据库表
        
        Args:
            table_name: 表名
            
        Returns:
            (成功标志, 消息)
        """
        conn = self.connection_pool.get_connection()
        try:
            # 先检查表是否存在
            check_result = conn.execute(f"""
                SELECT name FROM sqlite_master 
                WHERE type='table' AND name='{table_name}'
            """).fetchone()
            
            if not check_result:
                return False, f"表 {table_name} 不存在"
            
            # 执行DROP TABLE语句
            conn.execute(f'DROP TABLE IF EXISTS "{table_name}"')
            
            # 从元数据表中删除记录
            conn.execute(f"""
                DELETE FROM metadata 
                WHERE table_name = '{table_name}'
            """)
            
            self.logger.info(f"表 {table_name} 已成功删除")
            return True, f"表 {table_name} 已成功删除"
            
        except Exception as e:
            self.logger.error(f"删除表失败: {str(e)}")
            return False, f"删除表失败: {str(e)}"
        finally:
            self.connection_pool.release_connection(conn)
    
    def close(self):
        """关闭数据库连接"""
        self.connection_pool.close_all()


# 单例实例
_db_engine_instance = None

def get_db_engine() -> DBEngine:
    """获取数据库引擎单例实例
    
    Returns:
        DBEngine实例
    """
    global _db_engine_instance
    if _db_engine_instance is None:
        _db_engine_instance = DBEngine()
    return _db_engine_instance 