"""
数据同步引擎核心类
实现全量/增量同步逻辑，支持多种目标类型
"""

import asyncio
import csv
import json
import math
import os
import time
from datetime import datetime, timezone, timedelta

# 导入时区工具
from app.utils.timezone_utils import get_shanghai_now
from typing import Dict, Any, List, Optional, AsyncGenerator, Tuple, Set
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import text, select
import logging

from app.models.sync import SyncTask, SyncExecution, SyncLog, ExecutionStatus, LogLevel, TargetType, SyncType
from app.models.datasource import DataSource
from app.services.datasource_service import datasource_service
from app.utils.kafka_client import KafkaClient, kafka_pool
from app.utils.idempotency import IdempotencyManager
from app.utils.performance_monitor import performance_monitor
from app.utils.connection_pool import get_pooled_connection
from app.core.config import settings

logger = logging.getLogger(__name__)


def json_serializer(obj):
    """JSON序列化器，处理datetime、Decimal等特殊对象"""
    from decimal import Decimal

    if isinstance(obj, datetime):
        return obj.strftime('%Y-%m-%d %H:%M:%S')
    elif isinstance(obj, Decimal):
        return str(obj)
    raise TypeError(f"Object of type {type(obj)} is not JSON serializable")


def safe_json_dumps(data, **kwargs):
    """安全的JSON序列化，处理datetime对象"""
    return json.dumps(data, default=json_serializer, ensure_ascii=False, **kwargs)


class SyncEngine:
    """数据同步引擎"""
    
    def __init__(self, db: AsyncSession):
        """
        初始化同步引擎

        Args:
            db: 数据库会话
        """
        self.db = db
        self._running_tasks: Dict[int, bool] = {}  # 跟踪正在运行的任务
        self._paused_executions: Set[int] = set()  # 跟踪暂停的执行
        self._cancelled_executions: Set[int] = set()  # 跟踪取消的执行
        self.idempotency_manager = IdempotencyManager(db)  # 幂等性管理器
    
    async def execute_sync_task(
        self, 
        task: SyncTask, 
        execution: SyncExecution,
        progress_callback: Optional[callable] = None
    ) -> bool:
        """
        执行同步任务
        
        Args:
            task: 同步任务
            execution: 执行记录
            progress_callback: 进度回调函数
            
        Returns:
            是否执行成功
        """
        task_id = task.id
        
        # 检查任务是否已在运行
        if task_id in self._running_tasks and self._running_tasks[task_id]:
            await self._log_message(execution.id, LogLevel.WARNING, "任务已在运行中，跳过执行")
            return False
        
        try:
            # 标记任务为运行中
            self._running_tasks[task_id] = True

            # 开始性能监控
            task_performance = performance_monitor.start_task_monitoring(task.id, execution.id)

            # 更新执行状态为运行中
            execution.status = ExecutionStatus.RUNNING
            execution.started_at = get_shanghai_now()
            await self.db.commit()

            await self._log_message(execution.id, LogLevel.INFO, f"开始执行同步任务: {task.name}")
            
            # 获取源数据源连接
            source_datasource = await self._get_datasource(task.source_datasource_id)
            if not source_datasource:
                raise Exception(f"源数据源不存在: {task.source_datasource_id}")
            
            # 根据同步类型执行不同的同步逻辑
            if task.sync_type == SyncType.FULL:
                success = await self._execute_full_sync(task, execution, source_datasource, progress_callback)
            else:
                success = await self._execute_incremental_sync(task, execution, source_datasource, progress_callback)
            
            # 结束性能监控
            performance_monitor.end_task_monitoring(execution.id)

            # 更新执行状态
            logger.info(f"[执行ID:{execution.id}] 准备更新执行状态，success={success}")

            # 更新执行记录的统计信息
            execution.status = ExecutionStatus.SUCCESS if success else ExecutionStatus.FAILED
            execution.completed_at = get_shanghai_now()
            execution.total_records = execution.total_records or 0
            execution.processed_records = execution.processed_records or 0
            execution.success_records = execution.success_records or 0
            execution.failed_records = execution.failed_records or 0

            if execution.started_at:
                execution.execution_time_ms = int(
                    (execution.completed_at - execution.started_at).total_seconds() * 1000
                )

            logger.info(f"[执行ID:{execution.id}] 执行状态已设置为: {execution.status}")
            logger.info(f"[执行ID:{execution.id}] 统计信息: 总记录数={execution.total_records}, 处理记录数={execution.processed_records}, 成功记录数={execution.success_records}, 失败记录数={execution.failed_records}")

            try:
                await self.db.commit()
                logger.info(f"[执行ID:{execution.id}] 数据库事务已提交")
            except Exception as commit_error:
                logger.error(f"[执行ID:{execution.id}] 提交数据库事务失败: {commit_error}")
                # 尝试刷新会话并重新提交
                self.db.expunge_all()
                await self.db.refresh(execution)
                await self.db.commit()
                logger.info(f"[执行ID:{execution.id}] 数据库事务重试提交成功")

            status_msg = "执行成功" if success else "执行失败"
            await self._log_message(execution.id, LogLevel.INFO, f"同步任务完成: {status_msg}")

            # 获取性能优化建议
            suggestions = performance_monitor.get_optimization_suggestions(execution.id)
            if suggestions:
                await self._log_message(
                    execution.id,
                    LogLevel.INFO,
                    f"性能优化建议: {'; '.join(suggestions)}"
                )

            return success
            
        except Exception as e:
            logger.error(f"[执行ID:{execution.id}] 执行同步任务失败: {e}")

            # 结束性能监控
            performance_monitor.end_task_monitoring(execution.id)

            # 更新执行状态为失败
            execution.status = ExecutionStatus.FAILED
            execution.completed_at = get_shanghai_now()
            execution.error_message = str(e)

            # 确保统计信息不为空
            execution.total_records = execution.total_records or 0
            execution.processed_records = execution.processed_records or 0
            execution.success_records = execution.success_records or 0
            execution.failed_records = execution.failed_records or 0

            if execution.started_at:
                execution.execution_time_ms = int(
                    (execution.completed_at - execution.started_at).total_seconds() * 1000
                )

            logger.info(f"[执行ID:{execution.id}] 异常处理中设置状态为: {execution.status}")

            try:
                await self.db.commit()
                logger.info(f"[执行ID:{execution.id}] 异常处理中数据库事务已提交")
            except Exception as commit_error:
                logger.error(f"[执行ID:{execution.id}] 异常处理中提交失败状态时出错: {commit_error}")

            await self._log_message(execution.id, LogLevel.ERROR, f"执行失败: {str(e)}")

            return False

        finally:
            # 清除运行标记和幂等性记录
            self._running_tasks.pop(task_id, None)
            await self.idempotency_manager.clear_execution_records(execution.id)
    
    async def _execute_full_sync(
        self,
        task: SyncTask,
        execution: SyncExecution,
        source_datasource: DataSource,
        progress_callback: Optional[callable] = None
    ) -> bool:
        """
        执行全量同步

        Args:
            task: 同步任务
            execution: 执行记录
            source_datasource: 源数据源
            progress_callback: 进度回调函数

        Returns:
            是否执行成功
        """
        # 在方法开始就定义 batch_size，确保在所有代码路径中都可用
        batch_size = task.batch_size or 5000  # 默认批处理大小为5000，提高性能

        try:
            await self._log_message(execution.id, LogLevel.INFO, "开始全量同步")

            # 构建查询SQL
            query_sql = task.source_query or f"SELECT * FROM {task.source_table_name}"

            # 获取总记录数 - 使用更简单的查询避免子查询问题
            if task.source_query:
                # 如果是自定义查询，使用子查询方式
                count_sql = f"SELECT COUNT(*) as total FROM ({query_sql}) as temp_table"
            else:
                # 如果是表查询，直接查询表
                count_sql = f"SELECT COUNT(*) as total FROM {task.source_table_name}"

            await self._log_message(execution.id, LogLevel.INFO, f"开始执行计数查询: {count_sql}")
            total_records = await self._execute_count_query(source_datasource, count_sql)
            await self._log_message(execution.id, LogLevel.INFO, f"计数查询完成，总记录数: {total_records:,}")

            execution.total_records = total_records
            await self.db.commit()

            await self._log_message(
                execution.id,
                LogLevel.INFO,
                f"总记录数: {total_records:,}，批处理大小: {batch_size}，预计批次数: {math.ceil(total_records/batch_size) if total_records > 0 else 0}"
            )
            processed_records = 0
            success_records = 0
            failed_records = 0
            
            async for batch_data in self._fetch_data_in_batches(
                source_datasource, query_sql, batch_size
            ):
                # 检查是否被取消（优先检查数据库状态）
                if (self.is_execution_cancelled(execution.id) or
                    await self.is_execution_cancelled_from_db(execution.id)):
                    await self._log_message(
                        execution.id,
                        LogLevel.INFO,
                        "同步任务已被取消，停止处理"
                    )
                    break

                # 检查是否被暂停（优先检查数据库状态）
                while (self.is_execution_paused(execution.id) or
                       await self.is_execution_paused_from_db(execution.id)):
                    await self._log_message(
                        execution.id,
                        LogLevel.INFO,
                        "同步任务已暂停，等待恢复..."
                    )
                    await asyncio.sleep(1)  # 等待1秒后再检查

                    # 在暂停期间也要检查是否被取消
                    if (self.is_execution_cancelled(execution.id) or
                        await self.is_execution_cancelled_from_db(execution.id)):
                        await self._log_message(
                            execution.id,
                            LogLevel.INFO,
                            "同步任务在暂停期间被取消"
                        )
                        break

                # 如果在暂停期间被取消，跳出外层循环
                if (self.is_execution_cancelled(execution.id) or
                    await self.is_execution_cancelled_from_db(execution.id)):
                    break

                batch_number = (processed_records // batch_size) + 1
                batch_start_time = time.time()

                # 只在每10批或最后一批记录进度，减少日志数量
                total_batches = math.ceil(total_records / batch_size) if total_records else 1
                if batch_number % 10 == 0 or batch_number == total_batches:
                    await self._log_message(
                        execution.id,
                        LogLevel.INFO,
                        f"处理第 {batch_number}/{total_batches} 批数据，记录数: {len(batch_data)}",
                        batch_number=batch_number
                    )

                # 幂等性检查
                if not await self.idempotency_manager.check_and_record(
                    task.id, execution.id, batch_data, batch_number
                ):
                    await self._log_message(
                        execution.id,
                        LogLevel.INFO,
                        f"第 {batch_number} 批数据已处理过，跳过",
                        batch_number=batch_number
                    )
                    processed_records += len(batch_data)
                    success_records += len(batch_data)
                    continue

                # 写入目标
                logger.info(f"[执行ID:{execution.id}] 准备写入目标系统，批次: {batch_number}, 数据条数: {len(batch_data)}")
                batch_success = await self._write_to_target(task, batch_data, execution.id, batch_number)
                logger.info(f"[执行ID:{execution.id}] 写入目标系统结果: {batch_success}")

                batch_end_time = time.time()
                batch_time = batch_end_time - batch_start_time

                if batch_success:
                    success_records += len(batch_data)
                    # 标记为已处理
                    await self.idempotency_manager.mark_as_processed(
                        task.id, execution.id, batch_data, batch_number,
                        metadata={'batch_time': batch_time, 'success': True}
                    )
                else:
                    failed_records += len(batch_data)

                processed_records += len(batch_data)

                # 更新性能监控
                performance_monitor.update_batch_metrics(
                    execution.id, batch_time, len(batch_data),
                    len(batch_data) if not batch_success else 0
                )
                
                # 更新进度
                progress_percentage = (processed_records / total_records) * 100 if total_records > 0 else 100
                execution.progress_percentage = round(progress_percentage, 2)
                execution.processed_records = processed_records
                execution.success_records = success_records
                execution.failed_records = failed_records

                await self.db.commit()

                # 每处理10%或每100批次记录一次进度
                if batch_number % 100 == 0 or (batch_number > 1 and int(progress_percentage / 10) > int((progress_percentage - len(batch_data)/total_records*100) / 10)):
                    await self._log_message(
                        execution.id,
                        LogLevel.INFO,
                        f"同步进度: {progress_percentage:.1f}% ({processed_records:,}/{total_records:,}) - 批次 {batch_number}",
                        batch_number=batch_number
                    )

                # 调用进度回调
                if progress_callback:
                    await progress_callback(execution.id, progress_percentage, processed_records, total_records)
            
            # 清理执行状态
            self._paused_executions.discard(execution.id)
            self._cancelled_executions.discard(execution.id)

            return failed_records == 0

        except Exception as e:
            # 清理执行状态
            self._paused_executions.discard(execution.id)
            self._cancelled_executions.discard(execution.id)
            await self._log_message(execution.id, LogLevel.ERROR, f"全量同步失败: {str(e)}")
            raise
    
    async def _execute_incremental_sync(
        self,
        task: SyncTask,
        execution: SyncExecution,
        source_datasource: DataSource,
        progress_callback: Optional[callable] = None
    ) -> bool:
        """
        执行增量同步

        Args:
            task: 同步任务
            execution: 执行记录
            source_datasource: 源数据源
            progress_callback: 进度回调函数

        Returns:
            是否执行成功
        """
        # 在方法开始就定义 batch_size，确保在所有代码路径中都可用
        batch_size = task.batch_size or 5000  # 默认批处理大小为5000，提高性能

        try:
            await self._log_message(execution.id, LogLevel.INFO, "开始增量同步")

            # 从执行参数中获取自定义增量值
            custom_incremental_value = None
            if execution.execution_params:
                custom_incremental_value = execution.execution_params.get('custom_incremental_value')

            # 获取增量同步的起始值
            last_value = await self._get_last_sync_value(task, custom_incremental_value)

            # 构建增量查询SQL
            base_query = task.source_query or f"SELECT * FROM {task.source_table_name}"

            if last_value:
                if task.incremental_field_type == "timestamp":
                    incremental_condition = f"{task.incremental_field} > '{last_value}'"
                else:  # id类型
                    incremental_condition = f"{task.incremental_field} > {last_value}"

                query_sql = f"{base_query} WHERE {incremental_condition} ORDER BY {task.incremental_field}"
            else:
                query_sql = f"{base_query} ORDER BY {task.incremental_field}"

            await self._log_message(
                execution.id,
                LogLevel.INFO,
                f"增量查询条件: 从 {last_value or '开始'} 开始同步"
            )

            # 获取增量记录数
            count_sql = f"SELECT COUNT(*) as total FROM ({query_sql}) as temp_table"
            total_records = await self._execute_count_query(source_datasource, count_sql)

            execution.total_records = total_records
            await self.db.commit()

            if total_records == 0:
                await self._log_message(execution.id, LogLevel.INFO, "没有新的增量数据")
                return True
            processed_records = 0
            success_records = 0
            failed_records = 0
            last_processed_value = last_value
            
            async for batch_data in self._fetch_data_in_batches(
                source_datasource, query_sql, batch_size
            ):
                # 检查是否被取消（优先检查数据库状态）
                if (self.is_execution_cancelled(execution.id) or
                    await self.is_execution_cancelled_from_db(execution.id)):
                    await self._log_message(
                        execution.id,
                        LogLevel.INFO,
                        "增量同步任务已被取消，停止处理"
                    )
                    break

                # 检查是否被暂停（优先检查数据库状态）
                while (self.is_execution_paused(execution.id) or
                       await self.is_execution_paused_from_db(execution.id)):
                    await self._log_message(
                        execution.id,
                        LogLevel.INFO,
                        "增量同步任务已暂停，等待恢复..."
                    )
                    await asyncio.sleep(1)  # 等待1秒后再检查

                    # 在暂停期间也要检查是否被取消
                    if (self.is_execution_cancelled(execution.id) or
                        await self.is_execution_cancelled_from_db(execution.id)):
                        await self._log_message(
                            execution.id,
                            LogLevel.INFO,
                            "增量同步任务在暂停期间被取消"
                        )
                        break

                # 如果在暂停期间被取消，跳出外层循环
                if (self.is_execution_cancelled(execution.id) or
                    await self.is_execution_cancelled_from_db(execution.id)):
                    break

                batch_number = (processed_records // batch_size) + 1

                # 写入目标
                batch_success = await self._write_to_target(task, batch_data, execution.id, batch_number)
                
                if batch_success:
                    success_records += len(batch_data)
                    # 更新最后处理的值
                    if batch_data:
                        last_processed_value = str(batch_data[-1].get(task.incremental_field))
                else:
                    failed_records += len(batch_data)
                
                processed_records += len(batch_data)
                
                # 更新进度
                progress_percentage = (processed_records / total_records) * 100
                execution.progress_percentage = round(progress_percentage, 2)
                execution.processed_records = processed_records
                execution.success_records = success_records
                execution.failed_records = failed_records
                execution.last_processed_value = last_processed_value
                
                await self.db.commit()
                
                # 调用进度回调
                if progress_callback:
                    await progress_callback(execution.id, progress_percentage, processed_records, total_records)

            # 清理执行状态
            self._paused_executions.discard(execution.id)
            self._cancelled_executions.discard(execution.id)

            # 如果增量同步成功且有处理记录，更新任务的最后增量值
            if failed_records == 0 and last_processed_value and processed_records > 0:
                from sqlalchemy import update
                await self.db.execute(
                    update(SyncTask)
                    .where(SyncTask.id == task.id)
                    .values(last_incremental_value=last_processed_value)
                )
                await self.db.commit()
                await self._log_message(
                    execution.id,
                    LogLevel.INFO,
                    f"更新任务最后增量值: {last_processed_value}"
                )

            return failed_records == 0

        except Exception as e:
            # 清理执行状态
            self._paused_executions.discard(execution.id)
            self._cancelled_executions.discard(execution.id)
            await self._log_message(execution.id, LogLevel.ERROR, f"增量同步失败: {str(e)}")
            raise
    
    async def _get_datasource(self, datasource_id: int) -> Optional[DataSource]:
        """获取数据源"""
        result = await self.db.execute(
            select(DataSource).where(
                DataSource.id == datasource_id,
                DataSource.is_deleted == False
            )
        )
        return result.scalar_one_or_none()
    
    async def _execute_count_query(self, datasource: DataSource, count_sql: str) -> int:
        """执行计数查询"""
        try:
            logger.info(f"开始执行计数查询，数据源: {datasource.name}, SQL: {count_sql}")

            # 根据数据源类型执行计数查询
            from app.utils.database_connector import DatabaseConnector
            from app.utils.encryption import decrypt_password

            connector = DatabaseConnector(
                db_type=datasource.type,
                host=datasource.host,
                port=datasource.port,
                database=datasource.database_name,
                username=datasource.username,
                password=decrypt_password(datasource.password_encrypted),
                connection_params=datasource.connection_params or {}
            )

            logger.info(f"数据库连接器创建成功，开始执行查询")

            # 执行计数查询
            result = await connector.execute_query(count_sql)

            logger.info(f"查询执行完成，结果: {result}")

            if result and len(result) > 0:
                count = result[0].get('total', 0)
                logger.info(f"计数查询成功，总记录数: {count}")
                return count

            logger.warning("计数查询返回空结果")
            return 0

        except Exception as e:
            logger.error(f"执行计数查询失败: {e}", exc_info=True)
            return 0
    
    async def _fetch_data_in_batches(
        self,
        datasource: DataSource,
        query_sql: str,
        batch_size: int
    ) -> AsyncGenerator[List[Dict[str, Any]], None]:
        """分批获取数据"""
        try:
            from app.utils.database_connector import DatabaseConnector
            from app.utils.encryption import decrypt_password

            connector = DatabaseConnector(
                db_type=datasource.type,
                host=datasource.host,
                port=datasource.port,
                database=datasource.database_name,
                username=datasource.username,
                password=decrypt_password(datasource.password_encrypted),
                connection_params=datasource.connection_params or {}
            )

            # 分页查询数据
            offset = 0
            while True:
                # 构建分页查询SQL
                if datasource.type.value in ['mysql', 'postgresql', 'doris']:
                    paginated_sql = f"{query_sql} LIMIT {batch_size} OFFSET {offset}"
                elif datasource.type.value == 'oracle':
                    paginated_sql = f"""
                        SELECT * FROM (
                            SELECT ROWNUM rn, t.* FROM ({query_sql}) t
                            WHERE ROWNUM <= {offset + batch_size}
                        ) WHERE rn > {offset}
                    """
                elif datasource.type.value == 'sqlserver':
                    paginated_sql = f"{query_sql} ORDER BY (SELECT NULL) OFFSET {offset} ROWS FETCH NEXT {batch_size} ROWS ONLY"
                else:
                    paginated_sql = f"{query_sql} LIMIT {batch_size} OFFSET {offset}"

                # 执行查询
                batch_data = await connector.execute_query(paginated_sql)

                if not batch_data:
                    break

                yield batch_data

                # 如果返回的数据少于批次大小，说明已经到最后一批
                if len(batch_data) < batch_size:
                    break

                offset += batch_size

        except Exception as e:
            logger.error(f"分批获取数据失败: {e}")
            # 返回空数据以避免程序崩溃
            return
    
    async def _write_to_target(
        self,
        task: SyncTask,
        data: List[Dict[str, Any]],
        execution_id: int,
        batch_number: int
    ) -> bool:
        """写入目标系统"""
        try:
            # 添加调试日志
            logger.info(f"[执行ID:{execution_id}] 开始写入目标系统，目标类型: {task.target_type}, 数据条数: {len(data)}")

            if task.target_type == TargetType.CSV:
                logger.info(f"[执行ID:{execution_id}] 调用CSV写入方法")
                return await self._write_to_csv(task, data, execution_id, batch_number)
            elif task.target_type == TargetType.DORIS:
                logger.info(f"[执行ID:{execution_id}] 调用Doris写入方法")
                return await self._write_to_doris(task, data, execution_id, batch_number)
            elif task.target_type == TargetType.KAFKA:
                logger.info(f"[执行ID:{execution_id}] 调用Kafka写入方法")
                return await self._write_to_kafka(task, data, execution_id, batch_number)
            else:
                logger.error(f"[执行ID:{execution_id}] 不支持的目标类型: {task.target_type}")
                await self._log_message(execution_id, LogLevel.ERROR, f"不支持的目标类型: {task.target_type}")
                return False
        except Exception as e:
            # 确保错误信息是字符串，避免JSON序列化问题
            error_msg = str(e)
            logger.error(f"[执行ID:{execution_id}] 写入目标失败: {error_msg}")
            await self._log_message(execution_id, LogLevel.ERROR, f"写入目标失败: {error_msg}", batch_number=batch_number)
            return False
    
    async def _write_to_csv(
        self,
        task: SyncTask,
        data: List[Dict[str, Any]],
        execution_id: int,
        batch_number: int
    ) -> bool:
        """写入CSV文件"""
        try:
            from app.core.config import settings
            from datetime import datetime

            # 获取执行记录以检查CSV特定参数
            execution = await self._get_execution(execution_id)

            # 获取CSV配置参数（优先使用执行参数，其次使用任务配置）
            csv_params = {}
            if execution and execution.execution_params and 'csv_params' in execution.execution_params:
                csv_params = execution.execution_params.get('csv_params', {})

            # 设置CSV参数（优先使用执行参数，其次使用任务配置，最后使用默认值）
            delimiter = csv_params.get('delimiter', task.csv_delimiter or ',')
            quote_char = csv_params.get('quote_char', task.csv_quote_char or '"')
            escape_char = csv_params.get('escape_char', task.csv_escape_char or '\\')
            line_terminator = csv_params.get('line_terminator', task.csv_line_terminator or '\n')
            encoding = csv_params.get('encoding', task.csv_encoding or 'utf-8')
            include_header = csv_params.get('include_header', task.csv_include_header if task.csv_include_header is not None else True)
            null_value = csv_params.get('null_value', task.csv_null_value or '')
            date_format = csv_params.get('date_format', task.csv_date_format or '%Y-%m-%d %H:%M:%S')
            escape_special_chars = csv_params.get('escape_special_chars', True)

            # 简单的日志，不传递复杂数据
            logger.info(f"[执行ID:{execution_id}] 开始写入CSV文件，数据条数: {len(data)}")

            await self._log_message(
                execution_id,
                LogLevel.INFO,
                f"开始写入CSV文件，数据条数: {len(data)}，分隔符: '{delimiter}'",
                batch_number=batch_number
            )

            # 如果没有指定路径，自动生成文件名
            if not task.target_path:
                timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
                filename = f"{task.name}_{timestamp}.csv"
                file_path = os.path.join(settings.CSV_EXPORT_PATH, filename)

                # 更新任务的target_path
                task.target_path = file_path
            else:
                file_path = task.target_path

            await self._log_message(
                execution_id,
                LogLevel.INFO,
                f"CSV文件路径: {file_path}",
                batch_number=batch_number
            )

            # 确保目录存在
            os.makedirs(os.path.dirname(file_path), exist_ok=True)

            # 判断是否为第一批数据（决定是否写入表头）
            write_header = batch_number == 1 and not os.path.exists(file_path) and include_header

            await self._log_message(
                execution_id,
                LogLevel.INFO,
                f"开始处理数据，应用格式: 日期格式='{date_format}', 空值='{null_value}'",
                batch_number=batch_number
            )

            # 处理数据中的特殊类型和字符
            # 计算进度报告间隔：每处理10%的数据或每1000条记录报告一次进度（取较小值）
            total_records = len(data)
            progress_interval = min(max(total_records // 10, 100), 1000)

            processed_data = []
            for i, row in enumerate(data):
                processed_row = {}
                for key, value in row.items():
                    # 处理不同类型的数据
                    if isinstance(value, datetime):
                        processed_row[key] = value.strftime(date_format)
                    elif value is None:
                        processed_row[key] = null_value
                    else:
                        value_str = str(value)

                        # 处理特殊字符
                        if escape_special_chars:
                            # 如果数据中包含换行符、回车符或制表符，确保它们被正确处理
                            # 在CSV中，引号内的特殊字符会被保留，但需要双引号转义引号本身
                            if '\n' in value_str or '\r' in value_str or '\t' in value_str:
                                # 不需要手动替换，csv模块会处理引号内的特殊字符
                                pass

                        processed_row[key] = value_str

                processed_data.append(processed_row)

                # 根据计算的间隔报告进度
                if (i + 1) % progress_interval == 0:
                    await self._log_message(
                        execution_id,
                        LogLevel.INFO,
                        f"已处理 {i + 1} 条记录 (进度: {((i + 1) / total_records * 100):.1f}%)",
                        batch_number=batch_number
                    )

            await self._log_message(
                execution_id,
                LogLevel.INFO,
                f"数据处理完成，开始写入文件",
                batch_number=batch_number
            )

            # 写入CSV文件
            mode = 'w' if write_header else 'a'
            with open(file_path, mode, newline='', encoding=encoding) as csvfile:
                if processed_data:
                    fieldnames = processed_data[0].keys()
                    writer = csv.DictWriter(
                        csvfile,
                        fieldnames=fieldnames,
                        delimiter=delimiter,
                        quotechar=quote_char,
                        escapechar=escape_char,
                        lineterminator=line_terminator,
                        quoting=csv.QUOTE_MINIMAL
                    )

                    if write_header:
                        writer.writeheader()

                    writer.writerows(processed_data)

            await self._log_message(
                execution_id,
                LogLevel.INFO,
                f"成功写入CSV文件: {len(data)} 条记录",
                batch_number=batch_number
            )
            return True

        except Exception as e:
            await self._log_message(
                execution_id,
                LogLevel.ERROR,
                f"写入CSV文件失败: {str(e)}",
                batch_number=batch_number
            )
            return False
    
    async def _write_to_doris(
        self,
        task: SyncTask,
        data: List[Dict[str, Any]],
        execution_id: int,
        batch_number: int
    ) -> bool:
        """写入Doris数据库"""
        try:
            target_datasource = await self._get_datasource(task.target_datasource_id)
            if not target_datasource:
                raise Exception("目标Doris数据源不存在")

            # 如果数据量太大，分批处理
            MAX_BATCH_SIZE = 10000  # 每批最多10000条记录，大幅提高效率
            if len(data) > MAX_BATCH_SIZE:
                await self._log_message(
                    execution_id,
                    LogLevel.INFO,
                    f"数据量较大({len(data)}条)，分成{math.ceil(len(data)/MAX_BATCH_SIZE)}批处理",
                    batch_number=batch_number
                )

                success = True
                for i in range(0, len(data), MAX_BATCH_SIZE):
                    batch_data = data[i:i+MAX_BATCH_SIZE]
                    sub_batch_number = f"{batch_number}.{i//MAX_BATCH_SIZE+1}"

                    await self._log_message(
                        execution_id,
                        LogLevel.INFO,
                        f"处理子批次 {sub_batch_number}，数据条数: {len(batch_data)}",
                        batch_number=batch_number
                    )

                    batch_success = await self._write_doris_batch(
                        task,
                        batch_data,
                        target_datasource,
                        execution_id,
                        sub_batch_number
                    )

                    if not batch_success:
                        success = False

                return success

            # 使用Doris Stream Load API
            import aiohttp
            import json
            from app.utils.encryption import decrypt_password

            await self._log_message(
                execution_id,
                LogLevel.INFO,
                f"开始写入Doris，目标表: {task.target_table_name}，数据条数: {len(data)}",
                batch_number=batch_number
            )

            # 直接调用批处理函数
            return await self._write_doris_batch(task, data, target_datasource, execution_id, batch_number)

        except Exception as e:
            await self._log_message(
                execution_id,
                LogLevel.ERROR,
                f"写入Doris异常: {str(e)}",
                batch_number=batch_number
            )
            return False

    async def _write_doris_batch(
        self,
        task: SyncTask,
        data: List[Dict[str, Any]],
        target_datasource: Any,
        execution_id: int,
        batch_number: Any
    ) -> bool:
        """写入Doris数据批次"""
        try:
            import aiohttp
            import json
            from app.utils.encryption import decrypt_password

            # 构建Stream Load URL - 使用HTTP端口而不是MySQL端口
            # Doris Stream Load API使用HTTP协议，支持8030或18030端口，而不是MySQL协议的9030端口
            from app.utils.database_connector import get_doris_http_port

            # 优先从connection_params中获取http_port配置
            if target_datasource.connection_params and 'http_port' in target_datasource.connection_params:
                http_port = target_datasource.connection_params['http_port']
            else:
                # 使用智能端口映射
                http_port = get_doris_http_port(target_datasource.port)

            stream_load_url = f"http://{target_datasource.host}:{http_port}/api/{target_datasource.database_name}/{task.target_table_name}/_stream_load"

            await self._log_message(
                execution_id,
                LogLevel.INFO,
                f"数据源配置端口: {target_datasource.port}, 使用HTTP端口: {http_port}",
                batch_number=batch_number
            )

            await self._log_message(
                execution_id,
                LogLevel.INFO,
                f"Stream Load URL: {stream_load_url}",
                batch_number=batch_number
            )

            # 准备认证信息
            auth = aiohttp.BasicAuth(
                target_datasource.username,
                decrypt_password(target_datasource.password_encrypted)
            )

            # 准备数据（处理特殊字段类型和Doris表结构）
            from decimal import Decimal

            processed_data = []
            for i, record in enumerate(data):
                processed_record = {}
                for key, value in record.items():
                    if isinstance(value, datetime):
                        # 转换为字符串格式
                        processed_record[key] = value.strftime('%Y-%m-%d %H:%M:%S')
                    elif isinstance(value, Decimal):
                        # Decimal类型转换为字符串
                        processed_record[key] = str(value)
                    elif isinstance(value, dict) or isinstance(value, list):
                        # JSON字段转换为字符串
                        processed_record[key] = safe_json_dumps(value)
                    elif value is None:
                        # NULL值处理
                        processed_record[key] = None
                    else:
                        processed_record[key] = value

                # 确保Doris表的主键字段存在
                # 根据Doris表结构 UNIQUE KEY(id, edge_id)，确保这两个字段存在
                if 'id' not in processed_record and 'edge_id' in processed_record:
                    # 如果没有id字段但有edge_id，使用edge_id作为id
                    processed_record['id'] = processed_record['edge_id']
                elif 'id' not in processed_record:
                    # 如果都没有，生成一个唯一ID
                    processed_record['id'] = f"auto_id_{i}_{int(time.time())}"

                processed_data.append(processed_record)

            # 使用JSON格式，更适合处理复杂数据结构
            import json

            if processed_data:
                # 获取字段名
                fieldnames = list(processed_data[0].keys())

                # 预处理数据，确保JSON序列化正常
                clean_data = []
                for record in processed_data:
                    clean_record = {}
                    for key, value in record.items():
                        if value is None:
                            clean_record[key] = None
                        elif isinstance(value, str):
                            # 保持字符串原样，JSON会自动处理转义
                            clean_record[key] = value
                        else:
                            clean_record[key] = value
                    clean_data.append(clean_record)

                # 创建JSON Lines格式数据（每行一个JSON对象）
                json_lines = []
                for record in clean_data:
                    json_lines.append(json.dumps(record, ensure_ascii=False))

                csv_data = '\n'.join(json_lines)

                await self._log_message(
                    execution_id,
                    LogLevel.INFO,
                    f"使用JSON格式，字段数: {len(fieldnames)}，字段: {fieldnames}",
                    batch_number=batch_number
                )

                # 记录JSON数据的前几行用于调试
                json_lines_list = csv_data.split('\n')
                sample_lines = json_lines_list[:2] if len(json_lines_list) > 2 else json_lines_list
                await self._log_message(
                    execution_id,
                    LogLevel.INFO,
                    f"JSON数据样本: {sample_lines}",
                    batch_number=batch_number
                )

                # 记录JSON Lines的行数
                total_lines = len([line for line in json_lines_list if line.strip()])
                await self._log_message(
                    execution_id,
                    LogLevel.INFO,
                    f"JSON Lines总行数: {total_lines}，期望行数: {len(processed_data)}",
                    batch_number=batch_number
                )
            else:
                csv_data = ""

            await self._log_message(
                execution_id,
                LogLevel.INFO,
                f"JSON数据大小: {len(csv_data)} 字节，处理后记录数: {len(processed_data)}",
                batch_number=batch_number
            )

            # 记录数据样本用于调试
            if processed_data:
                sample_record = processed_data[0]
                # 只在第一批记录字段信息，减少日志
                if batch_number == 1:
                    await self._log_message(
                        execution_id,
                        LogLevel.INFO,
                        f"数据样本字段: {list(sample_record.keys())}",
                        batch_number=batch_number
                    )

                # 只在第一批记录数据样本内容，减少日志量
                if str(batch_number).endswith('.1') or batch_number == 1:
                    sample_data = processed_data[:2]  # 只记录前2条
                    await self._log_message(
                        execution_id,
                        LogLevel.INFO,
                        f"数据样本内容: {safe_json_dumps(sample_data)}",
                        batch_number=batch_number
                    )

            # 设置Stream Load头部 - 使用JSON格式配置
            headers = {
                'Content-Type': 'text/plain',  # 对于JSON Lines，使用text/plain
                'format': 'json',
                'read_json_by_line': 'true',  # 明确指定按行读取JSON
                'max_filter_ratio': '0.2',  # 允许20%的数据过滤
                'Expect': '100-continue',  # 添加100-continue头部，Doris要求此头部
                'strip_outer_array': 'false',  # JSON Lines格式不需要剥离外层数组
                'fuzzy_parse': 'false'  # 不使用模糊解析
            }

            await self._log_message(
                execution_id,
                LogLevel.INFO,
                f"请求头部: {headers}",
                batch_number=batch_number
            )

            # 发送Stream Load请求
            # 配置连接器以避免断开的管道错误
            connector = aiohttp.TCPConnector(
                limit=10,
                limit_per_host=5,
                keepalive_timeout=600,
                enable_cleanup_closed=True
            )

            timeout = aiohttp.ClientTimeout(
                total=600,  # 总超时10分钟
                connect=60,  # 连接超时1分钟
                sock_read=300  # 读取超时5分钟
            )

            # 创建ClientSession，启用100-continue支持
            async with aiohttp.ClientSession(
                connector=connector,
                timeout=timeout,
                connector_owner=False  # 确保连接器正确管理
            ) as session:
                async with session.put(
                    stream_load_url,
                    data=csv_data.encode('utf-8'),
                    headers=headers,
                    auth=auth
                ) as response:
                    await self._log_message(
                        execution_id,
                        LogLevel.INFO,
                        f"HTTP响应状态: {response.status}",
                        batch_number=batch_number
                    )

                    result_text = await response.text()

                    await self._log_message(
                        execution_id,
                        LogLevel.INFO,
                        f"Doris响应: {result_text[:500]}...",  # 只记录前500字符
                        batch_number=batch_number
                    )

                    # 检查HTTP状态码
                    if response.status != 200:
                        await self._log_message(
                            execution_id,
                            LogLevel.ERROR,
                            f"HTTP请求失败: {response.status}, 响应: {result_text}",
                            batch_number=batch_number
                        )
                        return False

                    try:
                        result_json = json.loads(result_text)
                    except json.JSONDecodeError as e:
                        await self._log_message(
                            execution_id,
                            LogLevel.ERROR,
                            f"解析Doris响应JSON失败: {e}, 原始响应: {result_text[:1000]}",
                            batch_number=batch_number
                        )
                        return False

                    # 记录完整的Doris响应用于调试
                    await self._log_message(
                        execution_id,
                        LogLevel.INFO,
                        f"Doris完整响应: {safe_json_dumps(result_json)}",
                        batch_number=batch_number
                    )

                    if result_json.get('Status') == 'Success':
                        loaded_rows = result_json.get('NumberLoadedRows', 0)
                        filtered_rows = result_json.get('NumberFilteredRows', 0)

                        await self._log_message(
                            execution_id,
                            LogLevel.INFO,
                            f"成功写入Doris: 发送 {len(processed_data)} 条记录，加载 {loaded_rows} 条，过滤 {filtered_rows} 条",
                            batch_number=batch_number,
                            details=result_json
                        )
                        return True
                    else:
                        error_msg = result_json.get('Message', '未知错误')
                        error_url = result_json.get('ErrorURL', '')

                        await self._log_message(
                            execution_id,
                            LogLevel.ERROR,
                            f"写入Doris失败: {error_msg}",
                            batch_number=batch_number,
                            details={
                                'error_message': error_msg,
                                'error_url': error_url,
                                'full_response': result_json
                            }
                        )

                        # 如果有错误URL，尝试获取详细错误信息
                        if error_url:
                            try:
                                async with session.get(error_url) as error_response:
                                    error_detail = await error_response.text()
                                    await self._log_message(
                                        execution_id,
                                        LogLevel.ERROR,
                                        f"Doris详细错误信息: {error_detail[:1000]}",
                                        batch_number=batch_number
                                    )
                            except Exception as e:
                                await self._log_message(
                                    execution_id,
                                    LogLevel.WARNING,
                                    f"获取Doris详细错误信息失败: {e}",
                                    batch_number=batch_number
                                )

                        return False

        except Exception as e:
            await self._log_message(
                execution_id,
                LogLevel.ERROR,
                f"写入Doris异常: {str(e)}",
                batch_number=batch_number
            )
            return False
    
    async def _write_to_kafka(
        self, 
        task: SyncTask, 
        data: List[Dict[str, Any]], 
        execution_id: int,
        batch_number: int
    ) -> bool:
        """写入Kafka"""
        try:
            target_datasource = await self._get_datasource(task.target_datasource_id)
            if not target_datasource:
                raise Exception("目标Kafka数据源不存在")
            
            # 获取Kafka客户端
            kafka_client = await kafka_pool.get_client(
                target_datasource.host + ':' + str(target_datasource.port),
                **target_datasource.connection_params or {}
            )
            
            # 准备消息
            messages = [{'value': record} for record in data]
            
            # 批量发送消息
            result = await kafka_client.send_batch_messages(task.target_path, messages)
            
            # 返回客户端到连接池
            await kafka_pool.return_client(
                kafka_client,
                target_datasource.host + ':' + str(target_datasource.port),
                **target_datasource.connection_params or {}
            )
            
            if result['success']:
                await self._log_message(
                    execution_id, 
                    LogLevel.INFO, 
                    f"成功写入Kafka: {result['success_count']} 条记录",
                    batch_number=batch_number
                )
                return True
            else:
                await self._log_message(
                    execution_id, 
                    LogLevel.ERROR, 
                    f"写入Kafka部分失败: 成功 {result['success_count']}, 失败 {result['failed_count']}",
                    batch_number=batch_number
                )
                return False
            
        except Exception as e:
            await self._log_message(
                execution_id, 
                LogLevel.ERROR, 
                f"写入Kafka失败: {str(e)}",
                batch_number=batch_number
            )
            return False
    
    async def _get_last_sync_value(self, task: SyncTask, custom_value: Optional[str] = None) -> Optional[str]:
        """
        获取增量同步的起始值

        优先级：
        1. 用户指定的自定义值 (custom_value)
        2. 任务配置的起始值 (task.incremental_start_value)
        3. 任务的最后增量值 (task.last_incremental_value)
        4. 最近一次成功执行的最后处理值
        """
        # 1. 如果用户指定了自定义值，优先使用
        if custom_value is not None:
            return custom_value

        # 2. 如果任务配置了起始值，使用起始值
        if task.incremental_start_value is not None:
            return task.incremental_start_value

        # 3. 如果任务有最后增量值，使用最后增量值
        if task.last_incremental_value is not None:
            return task.last_incremental_value

        # 4. 查询最近一次成功的执行记录
        result = await self.db.execute(
            select(SyncExecution.last_processed_value)
            .where(
                SyncExecution.task_id == task.id,
                SyncExecution.status == ExecutionStatus.SUCCESS,
                SyncExecution.last_processed_value.isnot(None)
            )
            .order_by(SyncExecution.completed_at.desc())
            .limit(1)
        )

        return result.scalar_one_or_none()

    async def _get_execution(self, execution_id: int) -> Optional[SyncExecution]:
        """获取执行记录"""
        try:
            result = await self.db.execute(
                select(SyncExecution).where(SyncExecution.id == execution_id)
            )
            return result.scalar_one_or_none()
        except Exception as e:
            logger.error(f"获取执行记录失败: {str(e)}")
            return None

    async def _log_message(
        self, 
        execution_id: int, 
        level: LogLevel, 
        message: str,
        batch_number: Optional[int] = None,
        details: Optional[Dict[str, Any]] = None
    ):
        """记录同步日志"""
        log_entry = SyncLog(
            execution_id=execution_id,
            log_level=level,
            log_message=message,
            log_details=details,
            batch_number=batch_number
        )
        
        self.db.add(log_entry)
        await self.db.commit()
        
        # 同时输出到系统日志
        log_func = getattr(logger, level.value.lower())
        log_func(f"[执行ID:{execution_id}] {message}")
    
    def is_task_running(self, task_id: int) -> bool:
        """检查任务是否正在运行"""
        return self._running_tasks.get(task_id, False)
    
    async def pause_execution(self, execution_id: int) -> bool:
        """暂停执行"""
        self._paused_executions.add(execution_id)
        await self._log_message(
            execution_id,
            LogLevel.INFO,
            "同步任务已暂停"
        )
        return True

    async def cancel_execution(self, execution_id: int) -> bool:
        """取消执行"""
        self._cancelled_executions.add(execution_id)
        await self._log_message(
            execution_id,
            LogLevel.INFO,
            "同步任务已取消"
        )
        return True

    def is_execution_paused(self, execution_id: int) -> bool:
        """检查执行是否暂停"""
        return execution_id in self._paused_executions

    def is_execution_cancelled(self, execution_id: int) -> bool:
        """检查执行是否取消"""
        return execution_id in self._cancelled_executions

    async def is_execution_cancelled_from_db(self, execution_id: int) -> bool:
        """从数据库检查执行是否取消"""
        try:
            from sqlalchemy import select
            result = await self.db.execute(
                select(SyncExecution.status).where(SyncExecution.id == execution_id)
            )
            status = result.scalar_one_or_none()
            return status == ExecutionStatus.CANCELLED
        except Exception as e:
            logger.error(f"检查执行状态失败: {e}")
            return False

    async def is_execution_paused_from_db(self, execution_id: int) -> bool:
        """从数据库检查执行是否暂停"""
        try:
            from sqlalchemy import select
            result = await self.db.execute(
                select(SyncExecution.status).where(SyncExecution.id == execution_id)
            )
            status = result.scalar_one_or_none()
            return status == ExecutionStatus.PAUSED
        except Exception as e:
            logger.error(f"检查执行状态失败: {e}")
            return False

    async def pause_task(self, task_id: int) -> bool:
        """暂停任务执行（兼容性方法）"""
        if task_id in self._running_tasks:
            # 这里可以实现更复杂的暂停逻辑
            # 暂时只是标记为非运行状态
            self._running_tasks[task_id] = False
            return True
        return False

    async def cancel_task(self, task_id: int) -> bool:
        """取消任务执行（兼容性方法）"""
        if task_id in self._running_tasks:
            self._running_tasks.pop(task_id, None)
            return True
        return False
