import json
from datetime import datetime
from fastapi.concurrency import run_in_threadpool
from loguru import logger
from sqlalchemy import create_engine, text, inspect, MetaData, Table, Column as SQLColumn
from sqlalchemy.schema import CreateTable
from sqlalchemy.sql import select
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.types import String, Integer, Float, Boolean, DateTime, Date, Time, Text, BINARY
import asyncio

from app.models.db import DataSource, DataSet, DataField, FieldChangeLog
from app.services.datasource_service import get_table_columns, TYPE_MAPPING
from config import DATABASETB_URL

# 映射Python类型到SQLAlchemy类型
SQLALCHEMY_TYPE_MAPPING = {
    'string': String,
    'text': Text,
    'integer': Integer,
    'float': Float,
    'decimal': Float,  # 简化处理，实际应使用Numeric或DECIMAL
    'boolean': Boolean,
    'date': Date,
    'datetime': DateTime,
    'time': Time,
    'binary': BINARY,
    'json': Text,  # 简化处理，实际应使用JSON类型
}


async def check_structure_changes(dataset, session):
    """检测数据集结构变更
    
    Args:
        dataset: DataSet模型实例
        session: 数据库会话
        
    Returns:
        变更情况字典
    """
    try:
        # 获取数据源
        data_source = dataset.data_source
        
        # 获取最新的列信息
        latest_columns = await get_table_columns(data_source, dataset.original_name)
        
        # 获取已保存的字段信息
        existing_fields = {}
        for field in dataset.fields:
            existing_fields[field.name] = field
        
        # 初始化变更记录
        changes = {
            "added": [],
            "removed": [],
            "modified": [],
            "unchanged": []
        }
        
        # 检查新增和修改的字段
        for col in latest_columns:
            col_name = col["name"]
            
            if col_name in existing_fields:
                # 字段已存在，检查是否有变更
                field = existing_fields[col_name]
                
                # 比较关键属性
                changes_detected = []
                
                # 检查数据类型变更
                if field.data_type != col["data_type"]:
                    changes_detected.append({
                        "property": "data_type",
                        "old_value": field.data_type,
                        "new_value": col["data_type"]
                    })
                
                # 检查原始数据类型变更
                if field.original_data_type != col["original_data_type"]:
                    changes_detected.append({
                        "property": "original_data_type",
                        "old_value": field.original_data_type,
                        "new_value": col["original_data_type"]
                    })
                
                # 检查是否可为空变更
                if field.is_nullable != col["is_nullable"]:
                    changes_detected.append({
                        "property": "is_nullable",
                        "old_value": field.is_nullable,
                        "new_value": col["is_nullable"]
                    })
                
                # 检查主键状态变更
                if field.is_primary_key != col["is_primary_key"]:
                    changes_detected.append({
                        "property": "is_primary_key",
                        "old_value": field.is_primary_key,
                        "new_value": col["is_primary_key"]
                    })
                
                # 如果检测到变更，添加到修改列表
                if changes_detected:
                    changes["modified"].append({
                        "field": field,
                        "field_name": col_name,
                        "changes": changes_detected
                    })
                else:
                    changes["unchanged"].append({
                        "field": field,
                        "field_name": col_name
                    })
            else:
                # 新增字段
                changes["added"].append({
                    "field_name": col_name,
                    "field_info": col
                })
        
        # 检查删除的字段
        for field_name, field in existing_fields.items():
            if not any(col["name"] == field_name for col in latest_columns):
                changes["removed"].append({
                    "field": field,
                    "field_name": field_name
                })
        
        # 创建变更日志
        await create_change_logs(dataset, changes, session)
        
        # 更新数据集状态
        has_changes = len(changes["added"]) > 0 or len(changes["removed"]) > 0 or len(changes["modified"]) > 0
        
        if has_changes:
            dataset.sync_status = "structure_changed"
            dataset.last_structure_changed_at = datetime.now()
        
        return {
            "has_changes": has_changes,
            "changes": changes
        }
    
    except Exception as e:
        logger.error(f"检测结构变更失败: {str(e)}")
        raise Exception(f"检测结构变更失败: {str(e)}")


async def create_change_logs(dataset, changes, session):
    """创建字段变更日志
    
    Args:
        dataset: DataSet模型实例
        changes: 变更记录字典
        session: 数据库会话
    """
    try:
        # 处理新增字段
        for added in changes["added"]:
            change_log = FieldChangeLog(
                dataset_id=dataset.id,
                field_id=None,  # 新字段还没有ID
                change_type="added",
                field_name=added["field_name"],
                old_value=None,
                new_value=json.dumps(added["field_info"]),
                status="pending",
                created_at=datetime.now()
            )
            
            change_details = {
                "data_type": added["field_info"]["data_type"],
                "original_data_type": added["field_info"]["original_data_type"],
                "is_nullable": added["field_info"]["is_nullable"],
                "is_primary_key": added["field_info"]["is_primary_key"],
                "ordinal_position": added["field_info"]["ordinal_position"]
            }
            
            change_log.set_change_details(change_details)
            session.add(change_log)
        
        # 处理删除字段
        for removed in changes["removed"]:
            field = removed["field"]
            change_log = FieldChangeLog(
                dataset_id=dataset.id,
                field_id=field.id,
                change_type="removed",
                field_name=field.name,
                old_value=json.dumps(field.to_dict()),
                new_value=None,
                status="pending",
                created_at=datetime.now()
            )
            
            change_details = {
                "data_type": field.data_type,
                "original_data_type": field.original_data_type,
                "is_nullable": field.is_nullable,
                "is_primary_key": field.is_primary_key
            }
            
            change_log.set_change_details(change_details)
            session.add(change_log)
        
        # 处理修改字段
        for modified in changes["modified"]:
            field = modified["field"]
            change_log = FieldChangeLog(
                dataset_id=dataset.id,
                field_id=field.id,
                change_type="property_changed",
                field_name=field.name,
                old_value=json.dumps({c["property"]: c["old_value"] for c in modified["changes"]}),
                new_value=json.dumps({c["property"]: c["new_value"] for c in modified["changes"]}),
                status="pending",
                created_at=datetime.now()
            )
            
            # 如果存在数据类型变更，标记为类型变更
            if any(c["property"] == "data_type" for c in modified["changes"]):
                change_log.change_type = "type_changed"
            
            change_log.set_change_details({c["property"]: c["new_value"] for c in modified["changes"]})
            session.add(change_log)
    
    except Exception as e:
        logger.error(f"创建变更日志失败: {str(e)}")
        raise Exception(f"创建变更日志失败: {str(e)}")


async def apply_structure_changes(dataset, session):
    """应用结构变更
    
    Args:
        dataset: DataSet模型实例
        session: 数据库会话
        
    Returns:
        应用结果字典
    """
    try:
        # 获取待处理的变更日志
        pending_logs = await session.execute(
            select(FieldChangeLog).where(
                FieldChangeLog.dataset_id == dataset.id,
                FieldChangeLog.status == "pending"
            )
        )
        pending_logs = pending_logs.scalars().all()
        
        if not pending_logs:
            return {
                "success": True,
                "message": "没有待处理的结构变更",
                "applied_changes": 0
            }
        
        # 获取最新的字段信息
        data_source = dataset.data_source
        latest_columns = await get_table_columns(data_source, dataset.original_name)
        
        # 更新或创建字段
        for log in pending_logs:
            if log.change_type == "added":
                # 创建新字段
                field_info = json.loads(log.new_value)
                
                # 查找最新的列信息
                col_info = next((col for col in latest_columns if col["name"] == log.field_name), None)
                
                if col_info:
                    new_field = DataField(
                        dataset_id=dataset.id,
                        name=log.field_name,
                        display_name=log.field_name,
                        description=col_info.get("comment", ""),
                        data_type=col_info["data_type"],
                        original_data_type=col_info["original_data_type"],
                        is_primary_key=col_info["is_primary_key"],
                        is_nullable=col_info["is_nullable"],
                        ordinal_position=col_info["ordinal_position"],
                        created_at=datetime.now()
                    )
                    
                    session.add(new_field)
                    
                    # 更新变更日志状态
                    log.status = "applied"
                    log.applied_at = datetime.now()
                    session.add(log)
            
            elif log.change_type == "removed":
                # 标记字段删除（保留记录但更新状态）
                if log.field_id:
                    field = await session.get(DataField, log.field_id)
                    if field:
                        # 在这里可以选择物理删除或逻辑删除
                        # 这里采用物理删除方式
                        await session.delete(field)
                
                # 更新变更日志状态
                log.status = "applied"
                log.applied_at = datetime.now()
                session.add(log)
            
            elif log.change_type in ["property_changed", "type_changed"]:
                # 更新字段属性
                if log.field_id:
                    field = await session.get(DataField, log.field_id)
                    if field:
                        # 获取变更详情
                        change_details = log.get_change_details()
                        
                        # 更新字段属性
                        for prop, value in change_details.items():
                            if hasattr(field, prop):
                                setattr(field, prop, value)
                        
                        # 更新更新时间
                        field.updated_at = datetime.now()
                        session.add(field)
                
                # 更新变更日志状态
                log.status = "applied"
                log.applied_at = datetime.now()
                session.add(log)
        
        # 更新数据集状态
        dataset.sync_status = "pending"  # 待同步
        session.add(dataset)
        
        return {
            "success": True,
            "message": "结构变更已应用",
            "applied_changes": len(pending_logs)
        }
    
    except Exception as e:
        logger.error(f"应用结构变更失败: {str(e)}")
        session.rollback()
        raise Exception(f"应用结构变更失败: {str(e)}")


async def sync_dataset(dataset, session):
    """同步数据集
    
    Args:
        dataset: DataSet模型实例
        session: 数据库会话
        
    Returns:
        同步结果字典
    """
    try:
        # 首先检查结构变更
        structure_changes = await check_structure_changes(dataset, session)
        
        # 如果有未处理的结构变更，且策略为通知，则暂停同步
        if structure_changes["has_changes"] and dataset.structure_change_strategy == "notify":
            return {
                "success": False,
                "message": "检测到结构变更，需要先处理变更",
                "has_structure_changes": True,
                "changes": structure_changes["changes"]
            }
        
        # 如果有结构变更，且策略为自动应用，则先应用变更
        if structure_changes["has_changes"] and dataset.structure_change_strategy == "auto_apply":
            await apply_structure_changes(dataset, session)
        
        # 如果策略为忽略，或没有结构变更，继续执行同步
        data_source = dataset.data_source
        
        # 更新数据集状态为同步中
        dataset.sync_status = "syncing"
        session.add(dataset)
        await session.commit()
        
        # 根据同步类型选择同步方法
        if dataset.sync_type == "full":
            result = await full_sync(dataset, data_source)
        else:  # incremental
            result = await incremental_sync(dataset, data_source)
        
        # 更新数据集同步状态
        dataset.last_synced_at = datetime.now()
        dataset.sync_status = "completed"
        
        if result.get("last_sync_value"):
            dataset.last_sync_value = result["last_sync_value"]
        
        session.add(dataset)
        
        return {
            "success": True,
            "message": "数据集同步成功",
            "sync_type": dataset.sync_type,
            "rows_synced": result.get("rows_synced", 0)
        }
    
    except Exception as e:
        logger.error(f"数据集同步失败: {str(e)}")
        
        # 更新数据集状态为失败
        dataset.sync_status = "failed"
        dataset.error_message = str(e)
        session.add(dataset)
        
        return {
            "success": False,
            "message": f"数据集同步失败: {str(e)}",
            "has_structure_changes": False
        }


async def full_sync(dataset, data_source):
    """全量同步数据集
    
    Args:
        dataset: DataSet模型实例
        data_source: DataSource模型实例
        
    Returns:
        同步结果字典
    """
    try:
        # 获取连接字符串
        connection_string = data_source.get_connection_string()
        
        # 创建引擎并连接
        engine = create_engine(connection_string)
        connection = engine.connect()
        
        # 安全处理表名
        table_name = dataset.original_name
        
        # 如果是自定义查询，使用查询语句
        if dataset.type == "query" and dataset.custom_query:
            query = dataset.custom_query
        else:
            # 普通表或视图
            query = f"SELECT * FROM {table_name}"
        
        # 执行查询
        result = connection.execute(text(query))
        
        # 获取列名
        columns = result.keys()
        
        # 计算同步的行数
        rows_synced = result.rowcount
        
        # TODO: 这里添加具体的数据同步处理逻辑
        # 例如将数据写入目标表，更新统计信息等
        # 由于需求中没有明确指定如何存储同步后的数据，这里仅作为示例
        
        # 关闭连接
        connection.close()
        engine.dispose()
        
        return {
            "success": True,
            "rows_synced": rows_synced
        }
    
    except Exception as e:
        logger.error(f"全量同步失败: {str(e)}")
        raise Exception(f"全量同步失败: {str(e)}")


async def incremental_sync(dataset, data_source):
    """增量同步数据集
    
    Args:
        dataset: DataSet模型实例
        data_source: DataSource模型实例
        
    Returns:
        同步结果字典
    """
    try:
        # 检查是否配置了增量字段
        if not dataset.incremental_field:
            raise Exception("未配置增量同步字段")
        
        # 获取上次同步值
        last_sync_value = dataset.last_sync_value
        
        # 获取连接字符串
        connection_string = data_source.get_connection_string()
        
        # 创建引擎并连接
        engine = create_engine(connection_string)
        connection = engine.connect()
        
        # 安全处理表名和字段名
        table_name = dataset.original_name
        incremental_field = dataset.incremental_field
        
        # 构建查询
        if last_sync_value:
            # 对于日期类型，需要特殊处理
            if any(type_name in incremental_field.lower() for type_name in ['date', 'time', 'timestamp']):
                query = f"""
                    SELECT * FROM {table_name} 
                    WHERE {incremental_field} > '{last_sync_value}'
                    ORDER BY {incremental_field} ASC
                """
            else:
                # 其他类型，如数字、字符串等
                query = f"""
                    SELECT * FROM {table_name} 
                    WHERE {incremental_field} > {last_sync_value}
                    ORDER BY {incremental_field} ASC
                """
        else:
            # 首次同步，等同于全量同步
            query = f"SELECT * FROM {table_name} ORDER BY {incremental_field} ASC"
        
        # 执行查询
        result = connection.execute(text(query))
        
        # 获取列名
        columns = result.keys()
        
        # 获取结果并找到最大的增量值
        rows = []
        max_incremental_value = last_sync_value
        for row in result:
            row_dict = {}
            for i, column in enumerate(columns):
                row_dict[column] = row[i]
                
                # 更新最大增量值
                if column == incremental_field:
                    current_value = str(row[i])
                    if not max_incremental_value or current_value > max_incremental_value:
                        max_incremental_value = current_value
            
            rows.append(row_dict)
        
        # 计算同步的行数
        rows_synced = len(rows)
        
        # TODO: 这里添加具体的数据同步处理逻辑
        # 例如将数据写入目标表，更新统计信息等
        
        # 关闭连接
        connection.close()
        engine.dispose()
        
        return {
            "success": True,
            "rows_synced": rows_synced,
            "last_sync_value": max_incremental_value
        }
    
    except Exception as e:
        logger.error(f"增量同步失败: {str(e)}")
        raise Exception(f"增量同步失败: {str(e)}")


async def update_field_metadata(field, field_data, session):
    """更新字段元数据
    
    Args:
        field: DataField模型实例
        field_data: 更新的字段数据字典
        session: 数据库会话
        
    Returns:
        更新后的字段模型
    """
    try:
        # 更新基本信息
        if "display_name" in field_data:
            field.display_name = field_data["display_name"]
        
        if "description" in field_data:
            field.description = field_data["description"]
        
        # 更新同义词列表
        if "synonyms" in field_data:
            field.set_synonyms_list(field_data["synonyms"])
        
        # 更新业务术语映射
        if "business_terms" in field_data:
            field.set_business_terms(field_data["business_terms"])
        
        # 更新语义类型
        if "semantic_type" in field_data:
            field.semantic_type = field_data["semantic_type"]
        
        # 更新样本值
        if "sample_values" in field_data:
            field.set_sample_values(field_data["sample_values"])
        
        # 更新是否可见
        if "visible" in field_data:
            field.visible = field_data["visible"]
        # 更新统计信息
        for stat in ["min_value", "max_value", "avg_value", "distinct_count", "null_count", "null_percentage"]:
            if stat in field_data:
                setattr(field, stat, field_data[stat])
        
        # 更新时间
        field.updated_at = datetime.now()
        
        # 保存更新
        session.add(field)
        
        return field
    
    except Exception as e:
        logger.error(f"更新字段元数据失败: {str(e)}")
        raise Exception(f"更新字段元数据失败: {str(e)}")


async def get_field_relationship(dataset_id, session):
    """获取数据集字段关系
    
    Args:
        dataset_id: 数据集ID
        session: 数据库会话
        
    Returns:
        字段关系列表
    """
    try:
        # 获取数据集字段
        fields_result = await session.execute(
            select(DataField).where(DataField.dataset_id == dataset_id)
        )
        fields = fields_result.scalars().all()
        
        # 找出所有外键关系
        relationships = []
        # 这里不再需要循环检查外键关系，因为相关字段不存在
        # 保留一个空的relationships列表
        
        return relationships
    
    except Exception as e:
        logger.error(f"获取字段关系失败: {str(e)}")
        raise Exception(f"获取字段关系失败: {str(e)}")


async def generate_metadata_suggestions(field, session):
    """生成字段元数据建议
    
    Args:
        field: DataField模型实例
        session: 数据库会话
        
    Returns:
        元数据建议字典
    """
    try:
        # 获取数据集和数据源
        dataset_result = await session.execute(
            select(DataSet).where(DataSet.id == field.dataset_id)
        )
        dataset = dataset_result.scalar_one()
        
        data_source = dataset.data_source
        
        # 获取字段统计信息
        column_stats = await get_column_statistics(data_source, dataset.original_name, field.name)
        
        # 基于字段名称和统计信息生成建议
        
        # 1. 建议的显示名称：将字段名转换为更易读的格式
        suggested_display_name = field.name.replace('_', ' ').title()
        
        # 2. 根据字段名和统计信息生成描述建议
        suggested_description = f"{suggested_display_name}"
        
        # 添加数据类型信息
        suggested_description += f"，数据类型为{field.data_type}"
        
        # 添加约束信息
        if field.is_primary_key:
            suggested_description += "，是该表的主键"
        
        if not field.is_nullable:
            suggested_description += "，不允许为空"
        
        # 添加统计信息
        if field.data_type in ['integer', 'float', 'decimal']:
            suggested_description += f"，取值范围为{column_stats.get('min_value', '未知')}到{column_stats.get('max_value', '未知')}"
            suggested_description += f"，平均值为{column_stats.get('avg_value', '未知')}"
        
        suggested_description += f"，共有{column_stats.get('distinct_count', '未知')}个不同的值"
        suggested_description += f"，空值占比{column_stats.get('null_percentage', '未知')}%"
        
        # 3. 根据字段名生成同义词建议
        suggested_synonyms = []
        
        # 简单的同义词生成逻辑示例
        field_name_lower = field.name.lower()
        
        # ID字段同义词
        if 'id' in field_name_lower or field_name_lower.endswith('_id'):
            suggested_synonyms.extend(['编号', '标识符', 'identifier'])
        
        # 名称字段同义词
        if 'name' in field_name_lower:
            suggested_synonyms.extend(['名称', '名字', 'title'])
        
        # 日期字段同义词
        if 'date' in field_name_lower:
            suggested_synonyms.extend(['日期', '时间', 'time'])
        
        # 价格/金额字段同义词
        if any(keyword in field_name_lower for keyword in ['price', 'amount', 'cost']):
            suggested_synonyms.extend(['价格', '金额', '费用', 'fee'])
        
        # 4. 推断语义类型
        suggested_semantic_type = None
        
        # 根据字段名推断语义类型
        if any(id_term in field_name_lower for id_term in ['id', '_id']):
            suggested_semantic_type = 'identifier'
        elif any(name_term in field_name_lower for name_term in ['name', 'title']):
            if any(user_term in field_name_lower for user_term in ['user', 'person', 'customer', 'employee']):
                suggested_semantic_type = 'person_name'
            else:
                suggested_semantic_type = 'name'
        elif any(address_term in field_name_lower for address_term in ['address', 'location', 'city', 'province', 'country', 'street']):
            suggested_semantic_type = 'address'
        elif any(phone_term in field_name_lower for phone_term in ['phone', 'mobile', 'tel', 'telephone']):
            suggested_semantic_type = 'phone_number'
        elif any(email_term in field_name_lower for email_term in ['email', 'mail']):
            suggested_semantic_type = 'email'
        elif any(date_term in field_name_lower for date_term in ['date', 'time', 'birthday', 'birth_date']):
            if 'birth' in field_name_lower:
                suggested_semantic_type = 'birth_date'
            elif 'create' in field_name_lower or 'created' in field_name_lower:
                suggested_semantic_type = 'create_date'
            elif 'update' in field_name_lower or 'updated' in field_name_lower:
                suggested_semantic_type = 'update_date'
            else:
                suggested_semantic_type = 'date'
        elif any(amount_term in field_name_lower for amount_term in ['price', 'amount', 'cost', 'salary', 'income']):
            suggested_semantic_type = 'monetary_amount'
        elif any(percent_term in field_name_lower for percent_term in ['percent', 'percentage', 'ratio', 'rate']):
            suggested_semantic_type = 'percentage'
        elif any(desc_term in field_name_lower for desc_term in ['desc', 'description', 'detail', 'comment', 'remarks']):
            suggested_semantic_type = 'description'
        elif field.data_type in ['integer', 'float', 'decimal'] and field_name_lower.endswith('_count'):
            suggested_semantic_type = 'count'
        
        # 返回建议
        return {
            "suggested_display_name": suggested_display_name,
            "suggested_description": suggested_description,
            "suggested_synonyms": suggested_synonyms,
            "suggested_semantic_type": suggested_semantic_type,
            "statistics": column_stats
        }
    
    except Exception as e:
        logger.error(f"生成元数据建议失败: {str(e)}")
        return {
            "error": f"生成元数据建议失败: {str(e)}"
        }


class DatasetService:
    """数据集服务类，提供数据集相关操作"""
    
    @staticmethod
    async def get_dataset_data(dataset_id, page=1, page_size=50, search="", session=None):
        """获取数据集数据，支持分页和搜索
        
        Args:
            dataset_id: 数据集ID
            page: 页码，从1开始
            page_size: 每页记录数
            search: 搜索关键词或过滤条件列表，格式为:[{"field":"字段名","operator":"操作符","value":"值","description":"描述"}]
            session: 数据库会话
            
        Returns:
            数据集数据字典，包含columns、data、total_count等信息
        """
        try:
            from sqlalchemy import create_engine, text, inspect, select
            from app.models.db import DataSet, DataSource, DataField
            from sqlalchemy.exc import SQLAlchemyError
            import json
            
            # 预处理搜索参数，尝试将字符串转换为JSON
            search_filters = []
            if search and isinstance(search, str):
                try:
                    # 尝试将search参数解析为JSON
                    search_obj = json.loads(search)
                    if isinstance(search_obj, list):
                        search_filters = search_obj
                        # 将search设为空字符串，避免原有逻辑处理
                        search = ""
                except json.JSONDecodeError:
                    # 解析失败，保持原有的search字符串
                    pass
                except Exception as e:
                    logger.warning(f"解析搜索参数时出错: {str(e)}")
            elif isinstance(search, list):
                # 如果已经是列表，直接使用
                search_filters = search
                search = ""
                
            # 参数校验
            if page < 1:
                page = 1
            if page_size < 1:
                page_size = 50
            if page_size > 50000:  # 限制最大每页数量
                page_size = 50000
                
            # 计算分页参数
            offset = (page - 1) * page_size
            
            # 查询数据集信息
            dataset_query = select(DataSet).where(DataSet.id == dataset_id)
            dataset_result = await session.execute(dataset_query)
            dataset = dataset_result.scalar_one_or_none()
            
            if not dataset:
                raise ValueError(f"数据集不存在: ID={dataset_id}")
            
            # 初始化结果变量
            total_count = 0
            data = []
            columns = []
            
            # 使用列表包装基本类型，以便能够通过引用传递
            total_count_ref = [0]
            
            # 处理数据源
            if dataset.data_source_id != '00000000-0000-0000-0000-000000000000':
                # 外部数据源
                datasource_query = select(DataSource).where(DataSource.id == dataset.data_source_id)
                datasource_result = await session.execute(datasource_query)
                datasource = datasource_result.scalar_one_or_none()
                
                if not datasource:
                    raise ValueError(f"未找到关联的数据源: ID={dataset.data_source_id}")
                
                # 使用数据源服务处理外部数据
                await DatasetService._process_external_data(
                    dataset, datasource, page, page_size, offset, search, 
                    total_count_ref, data, columns, session, search_filters
                )
                # 从引用中获取值
                total_count = total_count_ref[0]
            else:
                # 本地数据源
                await DatasetService._process_local_data(
                    dataset, page, page_size, offset, search,
                    total_count_ref, data, columns, search_filters
                )
                # 从引用中获取值
                total_count = total_count_ref[0]
            
            # 获取字段显示名称映射
            column_display_names = {}
            # 查询数据集的字段信息，获取中文显示名称
            fields_query = select(DataField).where(DataField.dataset_id == dataset_id)
            fields_result = await session.execute(fields_query)
            fields = fields_result.scalars().all()
            
            # 构建字段名到显示名的映射
            column_display_names = {field.name: field.display_name for field in fields}
            
            return {
                "success": True,
                "data": data,
                "column_display_names": column_display_names,
                "total": total_count,
                "page": page,
                "page_size": page_size,
                "columns": columns
            }
        
        except Exception as e:
            logger.error(f"获取数据集数据失败: {str(e)}")
            return {
                "success": False,
                "message": f"获取数据集数据失败: {str(e)}",
                "data": [],
                "total": 0,
                "page": page,
                "page_size": page_size
            }
    
    @staticmethod
    async def _process_local_data(dataset, page, page_size, offset, search, total_count_ref, data, columns, search_filters=None):
        """处理本地数据源数据
        
        Args:
            dataset: 数据集模型实例
            page: 页码
            page_size: 每页记录数
            offset: 偏移量
            search: 搜索关键词
            total_count_ref: 总记录数引用
            data: 数据列表引用
            columns: 列名列表引用
            search_filters: 高级搜索过滤条件
        """
        from sqlalchemy import create_engine, text
        from fastapi.concurrency import run_in_threadpool
        from config import DATABASETB_URL
        import logging
        
        logger = logging.getLogger(__name__)
        
        # 创建一个异步函数来执行同步操作
        async def process_local_data_async():
            nonlocal total_count_ref, data, columns
            
            # 为SQLite创建引擎和连接
            sqlite_engine = create_engine(DATABASETB_URL)
            connection = await run_in_threadpool(lambda: sqlite_engine.connect())
            
            try:
                # 根据数据集类型确定查询方式
                if dataset.type == "query" and dataset.custom_query:
                    # 自定义查询
                    base_query = dataset.custom_query
                else:
                    # 表名查询
                    table_name = dataset.original_name
                    base_query = f"SELECT * FROM {table_name}"
                
                # 获取列名，用于构建搜索条件
                if dataset.type == "query" and dataset.custom_query:
                    # 对于自定义查询，需要查询一行获取列名
                    columns_query = f"SELECT * FROM ({base_query}) AS sq LIMIT 1"
                    columns_result = await run_in_threadpool(
                        lambda: connection.execute(text(columns_query))
                    )
                    columns = columns_result.keys()
                else:
                    # 使用表名获取列名
                    if connection.dialect.name == 'sqlite':
                        pragma_query = f"PRAGMA table_info({dataset.original_name})"
                        pragma_result = await run_in_threadpool(
                            lambda: connection.execute(text(pragma_query))
                        )
                        columns = [row[1] for row in pragma_result.fetchall()]
                    else:
                        # 对于其他数据库，使用简单查询获取列名
                        columns_query = f"SELECT * FROM {dataset.original_name} LIMIT 1"
                        columns_result = await run_in_threadpool(
                            lambda: connection.execute(text(columns_query))
                        )
                        columns = columns_result.keys()
                
                # 构建WHERE子句
                where_conditions = []
                
                # 处理高级搜索过滤条件
                if search_filters and len(search_filters) > 0:
                    for filter_item in search_filters:
                        if isinstance(filter_item, dict) and 'field' in filter_item and 'operator' in filter_item and 'value' in filter_item:
                            field = filter_item['field']
                            operator = filter_item['operator']
                            value = filter_item['value']
                            
                            # 验证字段是否存在
                            if field in columns:
                                # 构建搜索条件
                                if operator == 'eq':  # 等于
                                    where_conditions.append(f"{field} = '{value}'")
                                elif operator == 'neq':  # 不等于
                                    where_conditions.append(f"{field} != '{value}'")
                                elif operator == 'gt':  # 大于
                                    where_conditions.append(f"{field} > '{value}'")
                                elif operator == 'gte':  # 大于等于
                                    where_conditions.append(f"{field} >= '{value}'")
                                elif operator == 'lt':  # 小于
                                    where_conditions.append(f"{field} < '{value}'")
                                elif operator == 'lte':  # 小于等于
                                    where_conditions.append(f"{field} <= '{value}'")
                                elif operator == 'contains':  # 包含
                                    where_conditions.append(f"{field} LIKE '%{value}%'")
                                elif operator == 'starts':  # 开始于
                                    where_conditions.append(f"{field} LIKE '{value}%'")
                                elif operator == 'ends':  # 结束于
                                    where_conditions.append(f"{field} LIKE '%{value}'")
                                elif operator == 'in':  # 在列表中
                                    if isinstance(value, list):
                                        values_str = ", ".join([f"'{v}'" for v in value])
                                        where_conditions.append(f"{field} IN ({values_str})")
                                    else:
                                        where_conditions.append(f"{field} = '{value}'")
                                elif operator == 'between':  # 在范围内
                                    if isinstance(value, list) and len(value) == 2:
                                        where_conditions.append(f"{field} BETWEEN '{value[0]}' AND '{value[1]}'")
                
                # 处理普通搜索条件
                if search and search.strip():
                    # 构建搜索条件
                    search_conditions = []
                    for col in columns:
                        # 根据数据库类型使用正确的语法
                        if connection.dialect.name == 'mysql':
                            search_conditions.append(f"CONVERT({col}, CHAR) LIKE '%{search}%'")
                        elif connection.dialect.name == 'postgresql':
                            search_conditions.append(f"CAST({col} AS TEXT) LIKE '%{search}%'")
                        elif connection.dialect.name == 'oracle':
                            search_conditions.append(f"TO_CHAR({col}) LIKE '%{search}%'")
                        elif connection.dialect.name in ['mssql', 'sqlserver']:
                            search_conditions.append(f"CAST({col} AS NVARCHAR(MAX)) LIKE '%{search}%'")
                        else:
                            # SQLite 和其他数据库
                            search_conditions.append(f"CAST({col} AS TEXT) LIKE '%{search}%'")
                    
                    search_clause = " OR ".join(search_conditions)
                    where_conditions.append(f"({search_clause})")
                
                # 构建完整的查询语句
                if where_conditions:
                    where_clause = " AND ".join(where_conditions)
                    if " WHERE " in base_query.upper():
                        modified_query = f"{base_query} AND ({where_clause})"
                    else:
                        modified_query = f"{base_query} WHERE {where_clause}"
                else:
                    modified_query = base_query
                
                # 计算总数
                if dataset.type == "query" and dataset.custom_query:
                    count_query = f"SELECT COUNT(*) as total FROM ({modified_query}) AS cnt"
                else:
                    # 如果没有WHERE条件且使用表名，可以直接计数
                    if where_conditions:
                        count_query = f"SELECT COUNT(*) as total FROM ({modified_query}) AS cnt"
                    else:
                        count_query = f"SELECT COUNT(*) as total FROM {dataset.original_name}"
                
                # 执行计数查询
                count_result = await run_in_threadpool(
                    lambda: connection.execute(text(count_query))
                )
                total_count_ref[0] = count_result.scalar()
                
                # 添加分页
                paginated_query = f"{modified_query} LIMIT {page_size} OFFSET {offset}"
                
                # 执行分页查询
                result = await run_in_threadpool(
                    lambda: connection.execute(text(paginated_query))
                )
                
                # 获取列名和数据
                columns = result.keys()
                rows = result.fetchall()
                data.extend([dict(zip(columns, row)) for row in rows])
                
                return columns, data, total_count_ref[0]
            finally:
                # 确保关闭连接
                await run_in_threadpool(lambda: connection.close())
                await run_in_threadpool(lambda: sqlite_engine.dispose())
        
        # 执行异步函数
        await process_local_data_async()
    
    @staticmethod
    async def _process_external_data(dataset, datasource, page, page_size, offset, search, total_count_ref, data, columns, session, search_filters=None):
        """处理外部数据源数据
        
        Args:
            dataset: 数据集模型实例
            datasource: 数据源模型实例
            page: 页码
            page_size: 每页记录数
            offset: 偏移量
            search: 搜索关键词
            total_count_ref: 总记录数引用
            data: 数据列表引用
            columns: 列名列表引用
            session: 数据库会话
            search_filters: 高级搜索过滤条件
        """
        from sqlalchemy import create_engine, text, select
        from fastapi.concurrency import run_in_threadpool
        from app.models.db import DataField
        import logging
        
        logger = logging.getLogger(__name__)
        
        # 创建异步函数处理外部数据库
        async def process_external_data_async():
            nonlocal total_count_ref, data, columns
            
            try:
                # 获取数据库类型
                db_type = datasource.type.lower() if datasource.type else "generic"
                
                # 根据数据集类型确定查询方式
                if dataset.type == "query" and dataset.custom_query:
                    # 自定义查询
                    base_query = dataset.custom_query
                else:
                    # 表名查询
                    table_name = dataset.original_name
                    base_query = f"SELECT * FROM {table_name}"
                
                # 获取连接字符串
                connection_string = datasource.get_connection_string()
                
                # 创建引擎并连接
                engine = create_engine(connection_string,
                                       pool_size=1, max_overflow=0,
                                       pool_pre_ping=True, pool_recycle=3600)
                connection = await run_in_threadpool(lambda: engine.connect())
                
                try:
                    # 获取列名（用于搜索）
                    # 首先尝试从 DataField 表获取列名
                    field_query = select(DataField).where(DataField.dataset_id == dataset.id)
                    field_result = await session.execute(field_query)
                    fields = field_result.scalars().all()
                    
                    if fields:
                        # 如果DataField表中有数据，直接使用
                        columns = [field.name for field in fields]
                        logger.info(f"从DataField表获取到{len(columns)}个列名")
                    else:
                        # 否则通过查询获取列名
                        if dataset.type == "query" and dataset.custom_query:
                            # 对于自定义查询，需要查询获取列名
                            base_subquery = f"({base_query}) AS sq"
                            if db_type in ["mysql", "postgresql", "sqlite"]:
                                columns_query = f"SELECT * FROM {base_subquery} LIMIT 1"
                            elif db_type in ["sqlserver", "mssql"]:
                                columns_query = f"SELECT TOP 1 * FROM {base_subquery}"
                            elif db_type == "oracle":
                                columns_query = f"SELECT * FROM {base_subquery} WHERE ROWNUM <= 1"
                            else:
                                columns_query = f"SELECT * FROM {base_subquery} LIMIT 1"
                        else:
                            # 对于表查询，获取列名
                            if db_type in ["mysql", "postgresql", "sqlite"]:
                                columns_query = f"SELECT * FROM {table_name} LIMIT 1"
                            elif db_type in ["sqlserver", "mssql"]:
                                columns_query = f"SELECT TOP 1 * FROM {table_name}"
                            elif db_type == "oracle":
                                columns_query = f"SELECT * FROM {table_name} WHERE ROWNUM <= 1"
                            else:
                                columns_query = f"SELECT * FROM {table_name} LIMIT 1"
                            
                        # 执行查询获取列名
                        logger.info(f"执行获取列名查询: {columns_query} (数据库类型: {db_type})")
                        columns_result = await run_in_threadpool(
                            lambda: connection.execute(text(columns_query))
                        )
                        columns = columns_result.keys()
                    
                    # 构建WHERE子句
                    where_conditions = []
                    
                    # 处理高级搜索过滤条件
                    if search_filters and len(search_filters) > 0:
                        for filter_item in search_filters:
                            if isinstance(filter_item, dict) and 'field' in filter_item and 'operator' in filter_item and 'value' in filter_item:
                                field = filter_item['field']
                                operator = filter_item['operator']
                                value = filter_item['value']
                                
                                # 根据数据库类型和操作符构建SQL条件
                                if operator == 'eq':  # 等于
                                    where_conditions.append(f"{field} = '{value}'")
                                elif operator == 'neq':  # 不等于
                                    where_conditions.append(f"{field} != '{value}'")
                                elif operator == 'gt':  # 大于
                                    where_conditions.append(f"{field} > '{value}'")
                                elif operator == 'gte':  # 大于等于
                                    where_conditions.append(f"{field} >= '{value}'")
                                elif operator == 'lt':  # 小于
                                    where_conditions.append(f"{field} < '{value}'")
                                elif operator == 'lte':  # 小于等于
                                    where_conditions.append(f"{field} <= '{value}'")
                                elif operator == 'contains':  # 包含
                                    if db_type == "oracle":
                                        where_conditions.append(f"INSTR({field}, '{value}') > 0")
                                    else:
                                        where_conditions.append(f"{field} LIKE '%{value}%'")
                                elif operator == 'starts':  # 开始于
                                    if db_type == "oracle":
                                        where_conditions.append(f"INSTR({field}, '{value}') = 1")
                                    else:
                                        where_conditions.append(f"{field} LIKE '{value}%'")
                                elif operator == 'ends':  # 结束于
                                    if db_type == "oracle":
                                        where_conditions.append(f"{field} LIKE '%{value}'")
                                    else:
                                        where_conditions.append(f"{field} LIKE '%{value}'")
                                elif operator == 'in':  # 在列表中
                                    if isinstance(value, list):
                                        values_str = ", ".join([f"'{v}'" for v in value])
                                        where_conditions.append(f"{field} IN ({values_str})")
                                    else:
                                        where_conditions.append(f"{field} = '{value}'")
                                elif operator == 'between':  # 在范围内
                                    if isinstance(value, list) and len(value) == 2:
                                        where_conditions.append(f"{field} BETWEEN '{value[0]}' AND '{value[1]}'")
                                    
                    # 处理普通搜索条件
                    if search and search.strip():
                        # 构建搜索条件
                        search_conditions = []
                        for col in columns:
                            if db_type == "mysql":
                                search_conditions.append(f"CONVERT({col}, CHAR) LIKE '%{search}%'")
                            elif db_type == "postgresql":
                                search_conditions.append(f"CAST({col} AS TEXT) LIKE '%{search}%'")
                            elif db_type == "oracle":
                                search_conditions.append(f"INSTR(TO_CHAR({col}), '{search}') > 0")
                            elif db_type in ["sqlserver", "mssql"]:
                                search_conditions.append(f"CAST({col} AS NVARCHAR(MAX)) LIKE '%{search}%'")
                            else:
                                search_conditions.append(f"CAST({col} AS TEXT) LIKE '%{search}%'")
                                
                        search_clause = " OR ".join(search_conditions)
                        where_conditions.append(f"({search_clause})")
                    
                    # 构建完整的查询语句
                    if where_conditions:
                        where_clause = " AND ".join(where_conditions)
                        if " WHERE " in base_query.upper():
                            modified_query = f"{base_query} AND ({where_clause})"
                        else:
                            modified_query = f"{base_query} WHERE {where_clause}"
                    else:
                        modified_query = base_query
                    
                    # 获取总记录数
                    try:
                        if dataset.type == "query" and dataset.custom_query:
                            if db_type in ["mysql", "postgresql", "sqlite"]:
                                count_query = f"SELECT COUNT(*) AS total FROM ({modified_query}) AS cnt"
                            elif db_type == "oracle":
                                count_query = f"SELECT COUNT(*) AS total FROM ({modified_query})"
                            elif db_type in ["sqlserver", "mssql"]:
                                count_query = f"SELECT COUNT(*) AS total FROM ({modified_query}) AS cnt"
                            else:
                                count_query = f"SELECT COUNT(*) AS total FROM ({modified_query}) AS cnt"
                        else:
                            if where_conditions:
                                count_query = f"SELECT COUNT(*) AS total FROM ({modified_query}) AS cnt"
                            else:
                                count_query = f"SELECT COUNT(*) AS total FROM {dataset.original_name}"
                        
                        # 执行计数查询
                        count_result = await run_in_threadpool(
                            lambda: connection.execute(text(count_query))
                        )
                        total_count_ref[0] = count_result.scalar()
                    except Exception as e:
                        logger.error(f"计数查询失败: {str(e)}")
                        total_count_ref[0] = 1000  # 默认值
                    
                    # 添加分页 - 根据数据库类型
                    if db_type in ["mysql", "postgresql", "sqlite"]:
                        paginated_query = f"{modified_query} LIMIT {page_size} OFFSET {offset}"
                    elif db_type == "oracle":
                        inner_query = f"""
                            SELECT a.*, ROWNUM rnum 
                            FROM ({modified_query}) a 
                            WHERE ROWNUM <= {offset + page_size}
                        """
                        paginated_query = f"""
                            SELECT * 
                            FROM ({inner_query}) 
                            WHERE rnum > {offset}
                        """
                    elif db_type in ["sqlserver", "mssql"]:
                        # 使用SQL Server的ROW_NUMBER()函数实现分页
                        paginated_query = f"""
                            WITH PagedResults AS (
                                SELECT ROW_NUMBER() OVER (ORDER BY (SELECT 1)) AS RowNum, t.*
                                FROM ({modified_query}) t
                            )
                            SELECT * FROM PagedResults
                            WHERE RowNum BETWEEN {offset + 1} AND {offset + page_size}
                        """
                    else:
                        paginated_query = f"{modified_query} LIMIT {page_size} OFFSET {offset}"
                    
                    # 执行分页查询
                    result = await run_in_threadpool(
                        lambda: connection.execute(text(paginated_query))
                    )
                    
                    # 获取列名和数据
                    columns = result.keys()
                    rows = result.fetchall()
                    data.extend([dict(zip(columns, row)) for row in rows])
                    
                    return columns, data, total_count_ref[0]
                finally:
                    # 确保数据连接关闭
                    await run_in_threadpool(lambda: connection.close())
                    await run_in_threadpool(lambda: engine.dispose())
            except Exception as e:
                logger.error(f"处理外部数据源数据失败: {str(e)}")
                raise
        
        # 执行异步函数
        await process_external_data_async()

    @staticmethod
    async def read_data_from_source(datasource, table_name=None, query=None, page=1, page_size=100, search=""):
        """从数据源读取数据，支持分页和搜索
        
        Args:
            datasource: DataSource模型实例
            table_name: 表名，与query二选一
            query: 自定义查询，与table_name二选一
            page: 页码，从1开始
            page_size: 每页记录数
            search: 搜索关键词
            
        Returns:
            读取的数据，包含columns和data
        """
        try:
            from sqlalchemy import create_engine, text
            from fastapi.concurrency import run_in_threadpool
            import logging
            
            logger = logging.getLogger(__name__)
            
            if not table_name and not query:
                raise ValueError("table_name和query不能同时为空")
                
            # 参数校验
            if page < 1:
                page = 1
            if page_size < 1:
                page_size = 100
            if page_size > 1000:
                page_size = 1000
                
            # 计算分页参数
            offset = (page - 1) * page_size
            
            # 获取数据库类型
            db_type = datasource.type.lower() if datasource.type else "generic"
            
            # 构建基础查询
            base_query = query if query else f"SELECT * FROM {table_name}"
            
            # 获取连接字符串
            connection_string = datasource.get_connection_string()
            
            # 创建引擎并连接
            engine = create_engine(connection_string,
                                  pool_size=1, max_overflow=0,
                                  pool_pre_ping=True, pool_recycle=3600)
            connection = await run_in_threadpool(lambda: engine.connect())
            
            try:
                # 获取列名（用于搜索）
                search_clause = ""
                if search and search.strip():
                    if query:
                        # 对于自定义查询，获取列名
                        if db_type in ["mysql", "postgresql", "sqlite"]:
                            columns_query = f"SELECT * FROM ({base_query}) AS sq LIMIT 1"
                        elif db_type in ["sqlserver", "mssql"]:
                            columns_query = f"SELECT TOP 1 * FROM ({base_query}) AS sq"
                        elif db_type == "oracle":
                            columns_query = f"SELECT * FROM ({base_query}) WHERE ROWNUM <= 1"
                        else:
                            columns_query = f"SELECT * FROM ({base_query}) AS sq LIMIT 1"
                    else:
                        # 对于表查询，获取列名
                        if db_type in ["mysql", "postgresql", "sqlite"]:
                            columns_query = f"SELECT * FROM {table_name} LIMIT 1"
                        elif db_type in ["sqlserver", "mssql"]:
                            columns_query = f"SELECT TOP 1 * FROM {table_name}"
                        elif db_type == "oracle":
                            columns_query = f"SELECT * FROM {table_name} WHERE ROWNUM <= 1"
                        else:
                            columns_query = f"SELECT * FROM {table_name} LIMIT 1"
                        
                        # 执行获取列名查询
                        columns_result = await run_in_threadpool(
                            lambda: connection.execute(text(columns_query))
                        )
                        columns = columns_result.keys()
                        
                        # 构建搜索条件
                        search_conditions = []
                        for col in columns:
                            if db_type == "mysql":
                                search_conditions.append(f"CONVERT({col}, CHAR) LIKE '%{search}%'")
                            elif db_type == "postgresql":
                                search_conditions.append(f"CAST({col} AS TEXT) LIKE '%{search}%'")
                            elif db_type == "oracle":
                                search_conditions.append(f"TO_CHAR({col}) LIKE '%{search}%'")
                            elif db_type in ["sqlserver", "mssql"]:
                                search_conditions.append(f"CAST({col} AS NVARCHAR(MAX)) LIKE '%{search}%'")
                            else:
                                search_conditions.append(f"CAST({col} AS TEXT) LIKE '%{search}%'")
                        
                        search_clause = " OR ".join(search_conditions)
                        
                        # 添加到查询
                        if " WHERE " in base_query.upper():
                            modified_query = f"{base_query} AND ({search_clause})"
                        else:
                            modified_query = f"{base_query} WHERE {search_clause}"
                else:
                    modified_query = base_query
                
                # 获取总记录数
                try:
                    if query:
                        if db_type in ["mysql", "postgresql", "sqlite"]:
                            count_query = f"SELECT COUNT(*) AS total FROM ({modified_query}) AS cnt"
                        elif db_type == "oracle":
                            count_query = f"SELECT COUNT(*) AS total FROM ({modified_query})"
                        elif db_type in ["sqlserver", "mssql"]:
                            count_query = f"SELECT COUNT(*) AS total FROM ({modified_query}) AS cnt"
                        else:
                            count_query = f"SELECT COUNT(*) AS total FROM ({modified_query}) AS cnt"
                    else:
                        if search and search.strip():
                            count_query = f"SELECT COUNT(*) AS total FROM {table_name} WHERE {search_clause}"
                        else:
                            count_query = f"SELECT COUNT(*) AS total FROM {table_name}"
                    
                    count_result = await run_in_threadpool(
                        lambda: connection.execute(text(count_query))
                    )
                    total_count = count_result.scalar()
                except Exception as e:
                    logger.error(f"计数查询失败: {str(e)}")
                    total_count = 0
                
                # 添加分页
                if db_type in ["mysql", "postgresql", "sqlite"]:
                    paginated_query = f"{modified_query} LIMIT {page_size} OFFSET {offset}"
                elif db_type == "oracle":
                    inner_query = f"""
                        SELECT a.*, ROWNUM rnum 
                        FROM ({modified_query}) a 
                        WHERE ROWNUM <= {offset + page_size}
                    """
                    paginated_query = f"""
                        SELECT * 
                        FROM ({inner_query}) 
                        WHERE rnum > {offset}
                    """
                elif db_type in ["sqlserver", "mssql"]:
                    paginated_query = f"""
                        WITH PagedResults AS (
                            SELECT ROW_NUMBER() OVER (ORDER BY (SELECT 1)) AS RowNum, t.*
                            FROM ({modified_query}) t
                        )
                        SELECT * FROM PagedResults
                        WHERE RowNum BETWEEN {offset + 1} AND {offset + page_size}
                    """
                else:
                    paginated_query = f"{modified_query} LIMIT {page_size} OFFSET {offset}"
                
                # 执行分页查询
                result = await run_in_threadpool(
                    lambda: connection.execute(text(paginated_query))
                )
                
                # 获取结果
                columns = result.keys()
                rows = result.fetchall()
                data = [dict(zip(columns, row)) for row in rows]
                
                return {
                    "success": True,
                    "data": data,
                    "columns": columns,
                    "total": total_count,
                    "page": page,
                    "page_size": page_size
                }
            finally:
                await run_in_threadpool(lambda: connection.close())
                await run_in_threadpool(lambda: engine.dispose())
        
        except Exception as e:
            logger.error(f"从数据源读取数据失败: {str(e)}")
            return {
                "success": False,
                "message": f"从数据源读取数据失败: {str(e)}",
                "data": [],
                "total": 0,
                "page": page,
                "page_size": page_size
            } 
    @staticmethod
    async def execute_sql_from_source(
        datasourceid, 
        sql_statement, 
        session, 
        max_rows: int = None,
        enable_limit: bool = True
    ):
        """从数据源执行SQL语句
        
        Args:
            datasourceid: 数据源ID
            sql_statement: SQL语句
            session: 数据库会话
            max_rows: 最大返回行数限制
            enable_limit: 是否启用行数限制
        
        Returns:
            执行结果，包含columns、data和截断信息
        """
        from config import MAX_QUERY_ROWS
        
        # 设置默认限制
        if max_rows is None:
            max_rows = MAX_QUERY_ROWS
        
        if datasourceid == "00000000-0000-0000-0000-000000000000":
            # 使用本地SQLite数据库执行SQL
            datasource = DataSource(
                id="00000000-0000-0000-0000-000000000000",
                name="本地数据库"
            )
        else:
            datasource_query = select(DataSource).where(DataSource.id == datasourceid)
            datasource_result = await session.execute(datasource_query)
            datasource = datasource_result.scalar_one_or_none()
        
        if not datasource:
            return {
                "status": False,
                "message": "数据源不存在"
            }
                
        # 创建数据库连接并执行SQL
        connection_string = datasource.get_connection_string()
        engine = create_engine(connection_string)
        conn = await run_in_threadpool(lambda: engine.connect())
        
        try:
            # 🎯 核心改进：使用应用层行数限制，避免数据库特定的LIMIT语法
            is_truncated = False
            total_rows_estimated = None
            
            # 首先尝试估算总行数（仅对简单查询）
            if enable_limit and max_rows > 0:
                sql_upper = sql_statement.upper().strip()
                if sql_upper.startswith('SELECT') and 'UNION' not in sql_upper and 'LIMIT' not in sql_upper:
                    try:
                        # 构建COUNT查询来获取总行数估算
                        count_sql = f"SELECT COUNT(*) as total FROM ({sql_statement}) as count_query"
                        count_result = await run_in_threadpool(
                            lambda: conn.execute(text(count_sql))
                        )
                        total_rows_estimated = count_result.scalar()
                        
                        if total_rows_estimated > max_rows:
                            is_truncated = True
                            logger.warning(f"数据查询结果将被截断: {total_rows_estimated} -> {max_rows}")
                    except Exception as e:
                        logger.warning(f"无法估算总行数，将在应用层限制: {str(e)}")
                        is_truncated = None  # 未知是否截断
            
            # 执行原始SQL（不修改SQL语句）
            result = await run_in_threadpool(lambda: conn.execute(text(sql_statement)))
            
            # 🎯 关键改进：在应用层限制行数，使用 fetchmany() 方法
            column_names = result.keys()
            query_result_dict = []
            rows_fetched = 0
            
            if enable_limit and max_rows > 0:
                # 分批获取数据，直到达到限制
                batch_size = min(1000, max_rows)  # 每次最多获取1000行
                
                while rows_fetched < max_rows:
                    remaining_rows = max_rows - rows_fetched
                    current_batch_size = min(batch_size, remaining_rows)
                    
                    rows_batch = await run_in_threadpool(
                        lambda: result.fetchmany(current_batch_size)
                    )
                    
                    if not rows_batch:
                        # 没有更多数据
                        break
                    
                    # 转换为字典格式
                    batch_dict = [dict(zip(column_names, row)) for row in rows_batch]
                    query_result_dict.extend(batch_dict)
                    rows_fetched += len(rows_batch)
                    
                    # 如果获取的行数少于请求的行数，说明数据已经全部获取
                    if len(rows_batch) < current_batch_size:
                        break
                
                # 检查是否还有更多数据（判断是否截断）
                if is_truncated is None and rows_fetched == max_rows:
                    try:
                        # 尝试再获取一行来判断是否还有数据
                        extra_row = await run_in_threadpool(lambda: result.fetchone())
                        if extra_row:
                            is_truncated = True
                            total_rows_estimated = f">{rows_fetched}"
                        else:
                            is_truncated = False
                    except Exception:
                        is_truncated = True  # 保守估计
            else:
                # 不限制行数，获取所有数据
                all_rows = await run_in_threadpool(lambda: result.fetchall())
                query_result_dict = [dict(zip(column_names, row)) for row in all_rows]
                rows_fetched = len(query_result_dict)
                is_truncated = False
            
            actual_rows = len(query_result_dict)
            
            # 修正截断状态
            if is_truncated is None:
                is_truncated = False
            
            logger.info(f"SQL执行完成: 返回 {actual_rows} 行数据" + 
                       (f"，截断自 {total_rows_estimated} 行" if is_truncated and total_rows_estimated else ""))
            
            return {
                "status": True,
                "data": query_result_dict,
                "columns": column_names,
                "truncated": is_truncated,
                "total_rows_returned": actual_rows,
                "total_rows_estimated": total_rows_estimated,
                "max_rows_limit": max_rows if enable_limit else None,
                "sql_executed": sql_statement  # 返回原始SQL，未修改
            }
            
        except Exception as e:
            logger.error(f"执行SQL语句失败: {str(e)}")
            return {
                "status": False,
                "message": f"执行SQL语句失败: {str(e)}"
            }
        finally:
            await run_in_threadpool(lambda: conn.close())
            await run_in_threadpool(lambda: engine.dispose())
    