# filepath: d:\CompanyProject\Gitee\DBCompareX\backend\app\models\task_data_sync\task_execution.py
"""
任务执行模块，负责数据同步任务的执行和数据同步
"""
from typing import List, Dict, Any
import re
from datetime import datetime
from bson import ObjectId
import logging

from ...database.mongodb import (
    task_data_sync_execution_log_collection,
    task_target_list_collection,
    task_source_list_collection
)

from ...models.db_info_setting import DBInfoSettingModel
from ...models.task_data_sync.setting import TaskDataSyncSettingModel
from .db_connectors import DBConnector
from .data_utils import DataUtils
from .data_comparison import DataComparison

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class TaskExecutor:
    """任务执行器，处理数据同步任务的执行"""
    
    execution_log_collection = task_data_sync_execution_log_collection
    target_list_collection = task_target_list_collection
    source_list_collection = task_source_list_collection
    
    @classmethod
    async def execute_task(cls, task_id: str) -> Dict[str, Any]:
        """
        执行数据同步任务
        
        Args:
            task_id: 数据同步任务配置ID
            
        Returns:
            执行结果信息
        """
        # 获取任务配置
        task_setting = await TaskDataSyncSettingModel.find_by_id(task_id)
        if not task_setting:
            raise ValueError(f"未找到ID为 {task_id} 的数据同步任务配置")
        
        # 生成执行批次号: TDYYYYMMDD_序号
        date_str = datetime.now().strftime("%Y%m%d")
        # 获取当天的执行序号
        count = await cls.execution_log_collection.count_documents({
            "batch_no": {"$regex": f"^TD{date_str}_"}
        })
        batch_no = f"TD{date_str}_{count + 1}"
        
        execution_log_id = None
        
        try:
            # 创建执行日志
            execution_log = {
                "task_data_sync_setting_id": task_id,
                "batch_no": batch_no,
                "status": 1,  # 执行中
                "message": "任务开始执行",
                "created_at": datetime.now()
            }
            execution_log_result = await cls.execution_log_collection.insert_one(execution_log)
            execution_log_id = str(execution_log_result.inserted_id)
            
            logger.info(f"任务 {task_id} 开始执行，批次号: {batch_no}")
            
            # 1. 同步目标数据
            target_data = await cls.sync_target_data(task_setting, execution_log_id)
            
            if not target_data:
                # 更新执行日志状态为失败
                await cls.update_execution_log(execution_log_id, -1, "同步目标数据失败，未找到数据")
                return {
                    "execution_log": await cls.get_execution_log(execution_log_id),
                    "target_data_count": 0,
                    "source_data_count": 0,
                    "contrast_result_count": 0,
                    "different_count": 0
                }
            
            # 2. 同步来源数据
            source_data = await cls.sync_source_data(task_setting, execution_log_id, target_data)
            
            # 3. 对比数据并记录结果
            contrast_results, different_count = await DataComparison.compare_data(
                task_setting, execution_log_id, target_data, source_data
            )
            
            # 更新执行日志状态
            if different_count > 0:
                status = 3  # 数据对比失败
                message = f"数据对比完成，发现 {different_count} 处不同"
            else:
                status = 2  # 数据对比正常
                message = "数据对比完成，数据完全一致"
                
            await cls.update_execution_log(execution_log_id, status, message)
            
            return {
                "execution_log": await cls.get_execution_log(execution_log_id),
                "target_data_count": len(target_data),
                "source_data_count": len(source_data),
                "contrast_result_count": len(contrast_results),
                "different_count": different_count
            }
        except Exception as e:
            logger.error(f"执行任务 {task_id} 失败: {str(e)}", exc_info=True)
            
            # 确保执行日志已创建
            if execution_log_id:
                # 更新执行日志状态为失败
                await cls.update_execution_log(execution_log_id, -1, f"执行失败: {str(e)}")
            
            # 重新抛出异常，由上层处理
            raise
    
    @classmethod
    async def sync_target_data(cls, task_setting: Dict[str, Any], execution_log_id: str) -> List[Dict[str, Any]]:
        """
        同步目标数据
        
        Args:
            task_setting: 数据同步任务配置
            execution_log_id: 执行日志ID
            
        Returns:
            同步的目标数据列表
        """
        # 获取数据库连接配置
        db_info_setting = await DBInfoSettingModel.find_by_id(task_setting["db_info_setting_id"])
        if not db_info_setting:
            raise ValueError(f"未找到ID为 {task_setting['db_info_setting_id']} 的数据库配置")
        
        # 解密数据库密码
        db_info_setting = DBInfoSettingModel.decrypt_db_info(db_info_setting)
        
        # 准备查询的列
        columns = [detail["column_name"] for detail in task_setting["details"]]
        if not columns:
            raise ValueError("未配置任何列名")
        
        # 构建SQL查询
        columns_str = ", ".join(columns)
        table_name = task_setting["table_name"]
        query_condition = task_setting["query_condition"] or ""
        
        sql = f"SELECT {columns_str} FROM {table_name}"
        if query_condition:
            sql += f" WHERE {query_condition}"
            
        # 添加数据量限制条件
        data_limit = task_setting.get("data_limit")
        if data_limit is not None and data_limit > 0:
            # 根据数据库类型构建LIMIT或TOP子句
            db_type = db_info_setting["db_type"].lower()
            if db_type == "mysql":
                sql += f" LIMIT {data_limit}"
                logger.info(f"添加MySQL数据量限制: LIMIT {data_limit}")
            elif db_type == "sqlserver" or db_type == "fabric_sqlserver":
                # SQL Server的TOP子句应该在SELECT之后
                sql = sql.replace(f"SELECT {columns_str}", f"SELECT TOP {data_limit} {columns_str}")
                logger.info(f"添加SQL Server数据量限制: TOP {data_limit}")
            else:
                logger.warning(f"不支持的数据库类型 {db_type} 的数据量限制")
        
        logger.info(f"执行目标数据查询SQL: {sql}")
        
        # 连接数据库并执行查询
        db_data = await DBConnector.execute_query_with_retry(db_info_setting, sql)
        
        # 如果没有数据，直接返回空列表
        if not db_data:
            logger.warning("目标数据查询无结果")
            return []
        
        logger.info(f"查询到 {len(db_data)} 条目标数据")
        
        # 将查询结果保存到目标数据集合
        target_list = []
        for row in db_data:
            # 构建动态数据字段
            data = {}
            for i, column in enumerate(columns):
                data[column] = row[i]
            
            # 处理特殊类型数据，如Decimal
            processed_data = DataUtils.process_data_for_mongodb({"data": data})
            
            target_item = {
                "task_data_sync_execution_log_id": execution_log_id,
                "data": processed_data["data"]
            }
            
            result = await cls.target_list_collection.insert_one(target_item)
            
            # 添加ID
            target_item["_id"] = str(result.inserted_id)
            target_list.append(target_item)
        
        return target_list
    
    @classmethod
    async def sync_source_data(cls, task_setting: Dict[str, Any], execution_log_id: str, target_data: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """
        同步来源数据
        
        Args:
            task_setting: 数据同步任务配置
            execution_log_id: 执行日志ID
            target_data: 目标数据列表
            
        Returns:
            同步的来源数据列表
        """
        source_list = []
        # 记录成功设置了源数据的列
        source_data_columns = set()
        
        # 按数据库源分组处理映射关系
        db_source_mapping = {}
        
        logger.info(f"开始同步来源数据, 目标数据数量: {len(target_data)}")
        
        # 遍历每条目标数据
        for target_item in target_data:
            target_item_id = target_item["_id"]
            
            # 创建源数据项，包含目标数据ID的关联
            source_item = {
                "task_data_sync_execution_log_id": execution_log_id,
                "task_target_list_id": target_item_id,  # 添加关联的目标数据ID
                "data": {}
            }
            
            # 获取目标数据
            target_item_data = target_item["data"]
            logger.debug(f"处理目标数据: {target_item_data}")
            
            # 遍历每个列的关系映射
            for detail in task_setting["details"]:
                column_name = detail["column_name"]
                
                # 初始值为None
                source_item["data"][column_name] = None
                
                # 查找对应的目源关系配置
                mappings = [
                    m for m in task_setting["ts_mapping"] 
                    if m["target_column_name"] == column_name
                ]
                
                if not mappings:
                    logger.debug(f"列 {column_name} 没有找到映射配置")
                    continue
                
                # 按优先级排序映射配置
                sorted_mappings = sorted(mappings, key=lambda x: x["priority"])
                
                # 查找满足条件的映射配置
                mapping_matched = False
                for mapping in sorted_mappings:
                    condition = mapping.get("condition")
                    source_value = None
                    
                    # 检查条件是否满足
                    if condition:
                        # 条件类型1：[字段列] = '1,2,3'
                        field_value_match = re.match(r'\[(.*?)\]\s*=\s*\'(.*)\'', condition)
                        if field_value_match:
                            field_name = field_value_match.group(1)
                            expected_values = field_value_match.group(2).split(',')
                            
                            if field_name in target_item_data and str(target_item_data[field_name]) in expected_values:
                                logger.debug(f"条件类型1满足: [{field_name}] = '{expected_values}'")
                            else:
                                logger.debug(f"条件类型1不满足: [{field_name}] = '{expected_values}'")
                                continue
                                
                        # 条件类型2：[字段列] = [字段列]
                        field_field_match = re.match(r'\[(.*?)\]\s*=\s*\[(.*?)\]', condition)
                        if field_field_match:
                            field1 = field_field_match.group(1)
                            field2 = field_field_match.group(2)
                            
                            if (field1 in target_item_data and field2 in target_item_data and 
                                target_item_data[field1] == target_item_data[field2]):
                                logger.debug(f"条件类型2满足: [{field1}] = [{field2}]")
                            else:
                                logger.debug(f"条件类型2不满足: [{field1}] = [{field2}]")
                                continue
                        
                        # 条件类型3：<固定值>
                        direct_value_match = re.match(r'<(.*)>', condition)
                        if direct_value_match:
                            # 直接使用固定值
                            source_value = direct_value_match.group(1)
                            source_item["data"][column_name] = source_value
                            source_data_columns.add(column_name)
                            logger.debug(f"使用固定值: {source_value} 作为 {column_name} 的值")
                            mapping_matched = True
                            break
                    
                    # 没有条件或条件满足时，准备数据库查询
                    db_info_setting_id = mapping["db_info_setting_id"]
                    source_table_name = mapping["source_table_name"]
                    source_column_name = mapping["source_column_name"]
                    target_query_column = mapping.get("target_query_column")
                    source_query_column = mapping.get("source_query_column")
                    
                    # 如果需要根据目标数据查询源数据
                    if target_query_column and source_query_column and target_query_column in target_item_data:
                        query_value = target_item_data[target_query_column]
                        
                        # 按数据库源分组
                        if db_info_setting_id not in db_source_mapping:
                            db_source_mapping[db_info_setting_id] = {}
                        
                        db_mapping = db_source_mapping[db_info_setting_id]
                        
                        # 按表和查询列分组
                        table_key = f"{source_table_name}:{source_column_name}:{source_query_column}"
                        if table_key not in db_mapping:
                            db_mapping[table_key] = []
                        
                        # 添加查询信息
                        db_mapping[table_key].append({
                            "target_item_id": target_item_id,
                            "column_name": column_name,
                            "query_value": query_value
                        })
                        
                        # 标记此映射已匹配
                        mapping_matched = True
                        break
                
                if not mapping_matched:
                    logger.debug(f"列 {column_name} 没有找到匹配的映射条件")
            
            # 添加到源数据列表
            # 处理特殊类型数据
            processed_source_item = DataUtils.process_data_for_mongodb(source_item)
            result = await cls.source_list_collection.insert_one(processed_source_item)
            processed_source_item["_id"] = str(result.inserted_id)
            source_list.append(processed_source_item)
            
            logger.debug(f"已创建源数据项: {processed_source_item['_id']} 关联目标数据ID: {target_item_id}")
        
        # 处理需要查询数据库的数据
        for db_info_setting_id, db_mapping in db_source_mapping.items():
            # 获取数据库连接信息
            db_info_setting = await DBInfoSettingModel.find_by_id(db_info_setting_id)
            if not db_info_setting:
                logger.warning(f"未找到数据库配置 ID: {db_info_setting_id}")
                continue
            
            # 解密数据库密码
            db_info_setting = DBInfoSettingModel.decrypt_db_info(db_info_setting)
            
            logger.info(f"处理数据库 {db_info_setting['connection_name']} 的源数据查询")
            
            # 遍历每个表和列的查询
            for table_key, query_items in db_mapping.items():
                try:
                    # 解析表名、列名和查询列
                    parts = table_key.split(":")
                    source_table_name = parts[0]
                    source_column_name = parts[1]
                    source_query_column = parts[2]
                    
                    # 获取所有不同的查询值
                    query_values = list(set(item["query_value"] for item in query_items))
                    
                    if not query_values:
                        logger.warning(f"表 {source_table_name} 没有查询值")
                        continue
                    
                    logger.info(f"查询表 {source_table_name}, 列 {source_column_name}, 查询条件 {source_query_column} IN ({query_values})")
                    
                    # 根据数据库类型构建查询
                    db_type = db_info_setting["db_type"].lower()
                    
                    if db_type == "mysql":
                        placeholders = ",".join(["%s"] * len(query_values))
                        sql = f"SELECT {source_query_column}, {source_column_name} FROM {source_table_name} WHERE {source_query_column} IN ({placeholders})"
                    elif db_type == "sqlserver" or db_type == "fabric_sqlserver":
                        placeholders = ",".join(["?"] * len(query_values))
                        sql = f"SELECT {source_query_column}, {source_column_name} FROM {source_table_name} WHERE {source_query_column} IN ({placeholders})"
                    else:
                        logger.warning(f"不支持的数据库类型: {db_type}")
                        continue
                    
                    logger.info(f"执行源数据查询 SQL: {sql}")
                    logger.info(f"查询参数: {query_values}")
                    
                    # 执行查询
                    results = await DBConnector.execute_query_with_retry(db_info_setting, sql, query_values)
                    
                    if not results:
                        logger.warning(f"表 {source_table_name} 查询结果为空")
                        continue
                    
                    logger.info(f"查询返回 {len(results)} 条结果")
                    
                    # 构建查询结果映射 (查询列值 -> 源列值)
                    result_map = {}
                    for row in results:
                        key = str(row[0]) if row[0] is not None else "None"
                        result_map[key] = DataUtils.handle_special_types(row[1])
                    
                    logger.debug(f"查询结果映射: {result_map}")
                    
                    # 更新源数据
                    for item in query_items:
                        target_item_id = item["target_item_id"]
                        column_name = item["column_name"]
                        query_value = item["query_value"]
                        query_key = str(query_value) if query_value is not None else "None"
                        
                        # 查找具有相同target_item_id的源数据项
                        source_item = next((s for s in source_list if s["task_target_list_id"] == target_item_id), None)
                        
                        if source_item:
                            source_item_id = source_item["_id"]
                            
                            # 如果查询值存在于结果中
                            if query_key in result_map:
                                source_value = result_map[query_key]
                                
                                # 更新内存中的源数据
                                source_item["data"][column_name] = source_value
                                source_data_columns.add(column_name)
                                
                                logger.debug(f"更新源数据 ID: {source_item_id}, 列: {column_name}, 值: {source_value}")
                                
                                # 更新数据库中的源数据
                                await cls.source_list_collection.update_one(
                                    {"_id": ObjectId(source_item_id)},
                                    {"$set": {f"data.{column_name}": source_value}}
                                )
                            else:
                                logger.debug(f"查询值 {query_key} 在结果中未找到")
                        else:
                            logger.warning(f"目标数据 ID: {target_item_id} 未找到对应的源数据")
                except Exception as e:
                    logger.error(f"处理表 {table_key} 时出错: {str(e)}", exc_info=True)
        
        logger.info(f"同步来源数据完成, 成功设置了 {len(source_data_columns)} 列: {source_data_columns}")
        return source_list
    
    @classmethod
    async def get_execution_log(cls, execution_log_id: str) -> Dict[str, Any]:
        """
        获取执行日志
        
        Args:
            execution_log_id: 执行日志ID
            
        Returns:
            执行日志信息
        """
        result = await cls.execution_log_collection.find_one({"_id": ObjectId(execution_log_id)})
        if result:
            result["_id"] = str(result["_id"])
        return result
    
    @classmethod
    async def update_execution_log(cls, execution_log_id: str, status: int, message: str) -> None:
        """
        更新执行日志状态
        
        Args:
            execution_log_id: 执行日志ID
            status: 状态
            message: 消息
        """
        await cls.execution_log_collection.update_one(
            {"_id": ObjectId(execution_log_id)},
            {"$set": {"status": status, "message": message}}
        )