"""
简化的调度服务模块

统一的业务逻辑层，协调数据解析、验证、求解和结果处理的完整流程。
移除复杂的工序分类处理，实现统一的工序处理逻辑。
"""

# 移除未使用的导入
from collections import defaultdict, deque
from datetime import datetime
from typing import Any, Dict, List, Optional

from .error_handlers import ErrorContext, handle_sync_scheduler_errors
from .exceptions import (
    DataParsingError,
    OptimizationError,
    SchedulerError,
    ValidationError,
)
from .logging_config import PerformanceLogger, get_logger
from .simplified_cpsat_solver import SimplifiedCPSATSolver, SimplifiedSolverResult
from .simplified_data_models import (
    Activity,
    OptimizationConfig,
    Precedence,
    ProjectData,
    create_default_config,
)
from .simplified_data_parser import SimplifiedDataParser

# 初始化日志记录器
logger = get_logger("scheduler_service")


class SimplifiedSchedulerService:
    """简化的调度服务类

    提供统一的工序处理流程，简化的分区逻辑处理，
    工期对比计算功能，以及循环依赖检测逻辑。
    """

    def __init__(self):
        """初始化服务"""
        self.parser = SimplifiedDataParser()
        self.solver = SimplifiedCPSATSolver()

    @handle_sync_scheduler_errors
    def optimize_schedule(
        self, csv_content: str, config: Optional[Dict[str, Any]] = None
    ) -> Dict[str, Any]:
        """
        执行调度优化

        Args:
            csv_content: CSV文件内容
            config: 优化配置参数，如果为None则使用默认配置

        Returns:
            优化结果字典，包含成功状态、调度结果、工期对比等信息
        """
        with PerformanceLogger("调度优化") as perf_logger:
            with ErrorContext("调度优化") as error_ctx:
                # 1. 验证和处理配置参数
                logger.info("开始处理配置参数")
                optimization_config = self._process_config(config)
                error_ctx.add_context("配置参数", optimization_config)

                # 2. 数据解析
                logger.info("开始解析CSV数据")
                project_data = self.parser.parse_csv_data(csv_content)
                error_ctx.add_context("工序数量", len(project_data["activities"]))
                error_ctx.add_context("前置关系数量", len(project_data["precedences"]))

                # 3. 应用配置参数到项目数据
                logger.info("应用配置参数到项目数据")
                self._apply_config_to_project_data(project_data, optimization_config)
                
                # 记录工种容量配置状态
                worker_capacities = project_data.get("worker_capacities", {})
                if worker_capacities:
                    total_capacity = sum(worker_capacities.values())
                    logger.info(f"工种容量配置已应用: {len(worker_capacities)} 种工人，总容量: {total_capacity}")
                    for worker_type, capacity in worker_capacities.items():
                        logger.debug(f"  {worker_type}: {capacity} 人")
                else:
                    logger.warning("未找到工种容量配置")

                # 4. 数据验证
                logger.info("验证项目数据")
                self._validate_project_data(project_data)
                
                # 5. 验证工种容量与需求匹配
                logger.info("验证工种容量与需求匹配")
                self._validate_worker_capacity_requirements(project_data)

                # 6. 检查循环依赖
                logger.info("检查循环依赖")
                if self._has_circular_dependencies(project_data["precedences"]):
                    cycles = self._find_circular_dependencies(
                        project_data["precedences"]
                    )
                    cycle_descriptions = []
                    for cycle in cycles[:3]:  # 只显示前3个循环
                        cycle_str = " -> ".join(map(str, cycle))
                        cycle_descriptions.append(cycle_str)

                    error_msg = "检测到循环依赖关系，无法生成有效的调度计划。"
                    if cycle_descriptions:
                        error_msg += f" 循环路径: {'; '.join(cycle_descriptions)}"

                    raise ValidationError(error_msg)

                # 7. 执行优化求解
                logger.info("开始执行优化求解")
                
                # 确保工种容量配置传递给求解器
                if "worker_capacities" in project_data:
                    logger.info(f"传递工种容量配置给求解器: {project_data['worker_capacities']}")
                
                solver_result = self.solver.solve(
                    project_data, optimization_config
                )
                perf_logger.add_context("求解状态", solver_result.status)

                # 8. 处理求解结果
                if solver_result.status in ["OPTIMAL", "FEASIBLE"]:
                    logger.info(
                        f"求解成功，状态: {solver_result.status}, 工期: {solver_result.makespan}"
                    )

                    # 计算工期对比
                    duration_comparison = self._calculate_duration_comparison(
                        project_data, solver_result
                    )

                    # 构建完整的返回结果
                    result = {
                        "success": True,
                        "makespan": solver_result.makespan,
                        "schedule": solver_result.schedule,
                        "duration_comparison": duration_comparison,
                        "solve_time": perf_logger.context_data.get("duration", 0),
                        "status": solver_result.status,
                    }
                    
                    # 添加工种分配数据
                    if solver_result.worker_allocation:
                        result["worker_allocation"] = solver_result.worker_allocation
                        # 为每个工序添加工种分配信息
                        if result["schedule"]:
                            for activity_id, activity_data in result["schedule"].items():
                                activity_id_int = int(activity_id)
                                if activity_id_int in solver_result.worker_allocation:
                                    activity_data["worker_assignments"] = solver_result.worker_allocation[activity_id_int]
                    
                    # 添加资源使用数据
                    if project_data.get("resources") and project_data.get("resource_demands"):
                        result["resource_usage"] = self._calculate_resource_usage(
                            project_data, solver_result
                        )
                    
                    # 添加分区信息
                    if solver_result.subdivision_info:
                        result["subdivision_summary"] = solver_result.subdivision_info
                    
                    return result
                else:
                    error_message = (
                        solver_result.error_message
                        or f"求解失败: {solver_result.status}"
                    )
                    logger.error(f"求解失败: {error_message}")
                    raise OptimizationError(error_message, solver_result.status)

    def optimize_schedule_from_file(
        self, csv_path: str, config: Optional[Dict[str, Any]] = None
    ) -> Dict[str, Any]:
        """
        从CSV文件执行调度优化

        Args:
            csv_path: CSV文件路径
            config: 优化配置参数

        Returns:
            优化结果字典
        """
        try:
            # 读取CSV文件内容
            csv_content = self._read_csv_file(csv_path)
            return self.optimize_schedule(csv_content, config)
        except FileNotFoundError:
            return {
                "success": False,
                "error_message": f"CSV文件不存在: {csv_path}",
                "solve_time": 0,
            }
        except Exception as e:
            return {
                "success": False,
                "error_message": f"读取CSV文件失败: {str(e)}",
                "solve_time": 0,
            }

    def _process_config(self, config: Optional[Dict[str, Any]]) -> Dict[str, Any]:
        """
        处理和验证配置参数

        Args:
            config: 原始配置字典

        Returns:
            处理后的配置字典
        """
        if config is None:
            return {
                "solve_time_limit": 300,
                "optimization_objective": "minimize_makespan",
                "subdivision_config": {
                    "enable_subdivision": False,
                    "subdivision_count": 2,
                    "resource_efficiency": 0.75,
                }
            }

        # 直接返回配置字典，不进行复杂的对象转换
        processed_config = config.copy()
        
        # 设置默认值
        if "solve_time_limit" not in processed_config:
            processed_config["solve_time_limit"] = 300
        if "optimization_objective" not in processed_config:
            processed_config["optimization_objective"] = "minimize_makespan"
        
        return processed_config

    def _apply_config_to_project_data(
        self, project_data: Dict[str, Any], config: Dict[str, Any]
    ) -> None:
        """
        应用配置参数到项目数据

        Args:
            project_data: 项目数据字典
            config: 优化配置对象
        """
        # 更新资源容量（支持多种资源类型）
        resource_capacities = config.get("resource_capacities", [])
        for resource_config in resource_capacities:
            resource_type = resource_config["resource_type"]
            capacity = resource_config["capacity"]
            project_data["resources"][resource_type] = capacity

        # 应用工人容量配置 - 增强版本
        worker_capacities = config.get("worker_capacities", {})
        if worker_capacities:
            # 验证工种容量配置格式
            validated_capacities = self._validate_worker_capacities(worker_capacities)
            
            # 将工人容量添加到项目数据中，供求解器使用
            if "worker_capacities" not in project_data:
                project_data["worker_capacities"] = {}
            project_data["worker_capacities"].update(validated_capacities)
            logger.info(f"调度服务应用工种容量配置: {validated_capacities}")
        else:
            # 如果没有提供工种配置，使用默认配置
            default_worker_capacities = self._get_default_worker_capacities()
            project_data["worker_capacities"] = default_worker_capacities
            logger.info(f"调度服务使用默认工种容量配置: {default_worker_capacities}")

        # 应用分区配置到可分区的工序
        subdivision_config = config.get("subdivision_config", {})
        if subdivision_config.get("enable_subdivision", False):
            self._apply_subdivision_logic(project_data, subdivision_config)

    def _apply_subdivision_logic(
        self, project_data: Dict[str, Any], subdivision_config: Dict[str, Any]
    ) -> None:
        """
        应用简化的分区逻辑到可分区的工序

        Args:
            project_data: 项目数据字典
            subdivision_config: 分区配置对象
        """
        subdivision_count = subdivision_config.get("subdivision_count", 2)
        resource_efficiency = subdivision_config.get("resource_efficiency", 0.75)

        # 找到所有可分区的工序
        subdividable_activities = [
            act for act in project_data["activities"] if act.get("can_subdivide", False)
        ]

        # 对可分区工序进行简单的分区处理
        for activity in subdividable_activities:
            original_duration = activity["duration"]
            # 简化分区逻辑：工期按分区数量和效率调整
            new_duration = max(
                1, int(original_duration / subdivision_count * resource_efficiency)
            )

            if new_duration != original_duration:
                activity["duration"] = new_duration
                activity["subdivided"] = True
                activity["original_duration"] = original_duration

    def _validate_project_data(self, project_data: Dict[str, Any]) -> None:
        """
        验证项目数据完整性

        Args:
            project_data: 项目数据字典

        Raises:
            ValidationError: 数据验证失败
        """
        errors = []

        # 验证必需字段存在
        required_fields = ["activities", "precedences", "resources", "resource_demands"]
        for field in required_fields:
            if field not in project_data:
                errors.append(f"缺少必需字段: {field}")

        if errors:
            raise ValidationError("项目数据结构不完整", "; ".join(errors))

        # 验证activities
        activities = project_data["activities"]
        if not activities:
            raise ValidationError("工序列表不能为空")

        activity_ids = set()
        for activity in activities:
            # 验证必需字段
            if (
                "id" not in activity
                or "name" not in activity
                or "duration" not in activity
            ):
                errors.append(f"工序数据不完整: {activity}")
                continue

            # 验证ID唯一性
            activity_id = activity["id"]
            if activity_id in activity_ids:
                errors.append(f"工序ID重复: {activity_id}")
            activity_ids.add(activity_id)

            # 验证工期
            if activity["duration"] <= 0:
                errors.append(
                    f"工序 {activity_id} 的工期必须大于0，当前值: {activity['duration']}"
                )

        # 验证precedences中引用的ID存在
        precedences = project_data["precedences"]
        for precedence in precedences:
            from_id = precedence.get("from")
            to_id = precedence.get("to")

            if from_id not in activity_ids:
                errors.append(f"前置关系中引用了不存在的工序ID: {from_id}")

            if to_id not in activity_ids:
                errors.append(f"前置关系中引用了不存在的工序ID: {to_id}")

            # 验证依赖类型
            dep_type = precedence.get("type")
            if dep_type not in ["FS", "SS", "FF", "SF"]:
                errors.append(f"无效的依赖类型: {dep_type}")

        # 验证资源需求
        resource_demands = project_data["resource_demands"]
        resources = project_data["resources"]

        for activity_id_str, demands in resource_demands.items():
            try:
                activity_id = int(activity_id_str)
                if activity_id not in activity_ids:
                    errors.append(f"资源需求中引用了不存在的工序ID: {activity_id}")
            except ValueError:
                errors.append(f"资源需求中包含无效的工序ID: {activity_id_str}")

            # 验证资源类型存在
            for resource_type, demand in demands.items():
                if resource_type not in resources:
                    errors.append(
                        f"工序 {activity_id_str} 引用了不存在的资源类型: {resource_type}"
                    )

                # 验证需求量合理性
                if demand <= 0:
                    errors.append(
                        f"工序 {activity_id_str} 对资源 {resource_type} 的需求量必须大于0"
                    )
                elif resource_type in resources and demand > resources[resource_type]:
                    errors.append(
                        f"工序 {activity_id_str} 对资源 {resource_type} 的需求量 ({demand}) "
                        f"超过了可用容量 ({resources[resource_type]})"
                    )

        if errors:
            raise ValidationError("项目数据验证失败", "; ".join(errors))

    def _has_circular_dependencies(self, precedences: List[Dict]) -> bool:
        """
        使用拓扑排序检测循环依赖

        Args:
            precedences: 前后置关系列表

        Returns:
            是否存在循环依赖
        """
        if not precedences:
            return False

        # 构建邻接表和入度表
        graph: Dict[int, List[int]] = defaultdict(list)
        in_degree: Dict[int, int] = defaultdict(int)
        all_nodes: set[int] = set()

        # 构建图结构
        for precedence in precedences:
            from_id = precedence["from"]
            to_id = precedence["to"]

            graph[from_id].append(to_id)
            in_degree[to_id] += 1
            all_nodes.add(from_id)
            all_nodes.add(to_id)

        # 初始化所有节点的入度（确保没有依赖关系的节点入度为0）
        for node in all_nodes:
            if node not in in_degree:
                in_degree[node] = 0

        # 拓扑排序 - Kahn算法
        queue = deque([node for node in all_nodes if in_degree[node] == 0])
        processed_count = 0

        while queue:
            current_node = queue.popleft()
            processed_count += 1

            # 处理当前节点的所有邻居
            for neighbor in graph[current_node]:
                in_degree[neighbor] -= 1
                if in_degree[neighbor] == 0:
                    queue.append(neighbor)

        # 如果处理的节点数少于总节点数，说明存在循环依赖
        return processed_count < len(all_nodes)

    def _find_circular_dependencies(self, precedences: List[Dict]) -> List[List[int]]:
        """
        查找并返回所有循环依赖路径

        Args:
            precedences: 前后置关系列表

        Returns:
            循环依赖路径列表，每个路径是一个工序ID列表
        """
        if not precedences:
            return []

        # 构建邻接表
        graph = defaultdict(list)
        all_nodes = set()

        for precedence in precedences:
            from_id = precedence["from"]
            to_id = precedence["to"]
            graph[from_id].append(to_id)
            all_nodes.add(from_id)
            all_nodes.add(to_id)

        # 使用DFS检测循环
        visited = set()
        rec_stack = set()
        cycles = []

        def dfs(node: int, path: List[int]) -> bool:
            if node in rec_stack:
                # 找到循环，提取循环路径
                cycle_start = path.index(node)
                cycle = path[cycle_start:] + [node]
                cycles.append(cycle)
                return True

            if node in visited:
                return False

            visited.add(node)
            rec_stack.add(node)
            path.append(node)

            for neighbor in graph[node]:
                if dfs(neighbor, path):
                    return True

            rec_stack.remove(node)
            path.pop()
            return False

        # 对每个未访问的节点执行DFS
        for node in all_nodes:
            if node not in visited:
                dfs(node, [])

        return cycles

    def _calculate_resource_usage(
        self, 
        project_data: Dict[str, Any], 
        solver_result: SimplifiedSolverResult
    ) -> List[Dict[str, Any]]:
        """
        计算资源使用情况
        
        Args:
            project_data: 项目数据
            solver_result: 求解结果
            
        Returns:
            资源使用情况列表
        """
        resources = project_data.get("resources", {})
        resource_demands = project_data.get("resource_demands", {})
        
        if not resources or not resource_demands:
            return []
        
        resource_usage = []
        
        for resource_type, total_capacity in resources.items():
            # 计算总使用量和峰值使用量
            total_used = 0
            peak_usage = 0
            peak_time = 0
            
            # 遍历所有工序的资源需求
            for activity_id_str, demands in resource_demands.items():
                if resource_type in demands:
                    used = demands[resource_type]
                    total_used += used
                    if used > peak_usage:
                        peak_usage = used
                        # 尝试从调度结果中获取时间信息
                        if solver_result.schedule and activity_id_str in solver_result.schedule:
                            peak_time = solver_result.schedule[activity_id_str].get("start_time", 0)
            
            # 计算利用率
            utilization_rate = peak_usage / total_capacity if total_capacity > 0 else 0
            
            resource_usage.append({
                "resource_type": resource_type,
                "total_capacity": total_capacity,
                "used_capacity": total_used,
                "utilization_rate": utilization_rate,
                "peak_usage": peak_usage,
                "peak_time": peak_time,
            })
        
        return resource_usage

    def _calculate_duration_comparison(
        self, project_data: Dict[str, Any], solver_result: SimplifiedSolverResult
    ) -> Dict[str, Any]:
        """
        计算工期对比信息

        Args:
            project_data: 项目数据
            solver_result: 求解结果

        Returns:
            工期对比信息字典
        """
        # 计算原始工期（关键路径法）
        original_makespan = self._calculate_critical_path_makespan(project_data)

        optimized_makespan = solver_result.makespan or 0
        improvement = original_makespan - optimized_makespan
        improvement_percentage = 0

        if original_makespan > 0:
            improvement_percentage = round(improvement / original_makespan * 100, 1)

        return {
            "original_makespan": original_makespan,
            "optimized_makespan": optimized_makespan,
            "improvement": improvement,
            "improvement_percentage": improvement_percentage,
        }

    def _calculate_critical_path_makespan(self, project_data: Dict[str, Any]) -> int:
        """
        使用关键路径法计算原始工期

        Args:
            project_data: 项目数据

        Returns:
            原始工期（不考虑资源约束）
        """
        activities = project_data["activities"]
        precedences = project_data["precedences"]

        if not activities:
            return 0

        # 构建活动字典
        activity_dict = {act["id"]: act for act in activities}

        # 构建前向邻接表
        successors = defaultdict(list)
        predecessors = defaultdict(list)

        for prec in precedences:
            from_id = prec["from"]
            to_id = prec["to"]
            successors[from_id].append(prec)
            predecessors[to_id].append(prec)

        # 计算最早开始时间和最早结束时间
        early_start = {}
        early_finish = {}

        # 拓扑排序计算最早时间
        in_degree = defaultdict(int)
        for act in activities:
            in_degree[act["id"]] = len(predecessors[act["id"]])

        queue = deque([act["id"] for act in activities if in_degree[act["id"]] == 0])

        while queue:
            current_id = queue.popleft()
            current_activity = activity_dict[current_id]

            # 计算最早开始时间
            max_early_start = 0
            for prec in predecessors[current_id]:
                pred_id = prec["from"]
                if pred_id in early_finish:
                    # 简化处理，主要考虑FS关系
                    if prec["type"] == "FS":
                        required_start = early_finish[pred_id] + prec["lag"]
                    elif prec["type"] == "SS":
                        required_start = early_start.get(pred_id, 0) + prec["lag"]
                    else:
                        required_start = early_finish.get(pred_id, 0) + prec["lag"]

                    max_early_start = max(max_early_start, required_start)

            early_start[current_id] = max_early_start
            early_finish[current_id] = max_early_start + current_activity["duration"]

            # 更新后续活动的入度
            for succ_prec in successors[current_id]:
                succ_id = succ_prec["to"]
                in_degree[succ_id] -= 1
                if in_degree[succ_id] == 0:
                    queue.append(succ_id)

        # 返回最大的结束时间作为项目工期
        if early_finish:
            return max(early_finish.values())
        else:
            return sum(act["duration"] for act in activities)

    def _validate_worker_capacities(self, worker_capacities: Dict[str, Any]) -> Dict[str, int]:
        """
        验证和清理工种容量配置
        
        Args:
            worker_capacities: 原始工种容量配置
            
        Returns:
            验证后的工种容量配置
            
        Raises:
            ValidationError: 配置验证失败
        """
        validated_capacities = {}
        errors = []
        warnings = []
        
        for worker_type, capacity in worker_capacities.items():
            # 验证工种名称
            if not isinstance(worker_type, str) or not worker_type.strip():
                errors.append(f"工种名称无效: {worker_type}")
                continue
                
            # 验证容量值
            try:
                capacity_int = int(capacity)
                if capacity_int < 0:
                    errors.append(f"工种 '{worker_type}' 的容量不能为负数: {capacity}")
                    continue
                elif capacity_int == 0:
                    warnings.append(f"工种 '{worker_type}' 的容量为0，将无法分配任务")
                elif capacity_int > 200:
                    warnings.append(f"工种 '{worker_type}' 的容量较大({capacity_int})，请确认是否合理")
                
                validated_capacities[worker_type.strip()] = capacity_int
                
            except (ValueError, TypeError):
                errors.append(f"工种 '{worker_type}' 的容量必须为整数: {capacity}")
                continue
        
        # 记录警告
        for warning in warnings:
            logger.warning(warning)
        
        # 如果有错误，抛出异常
        if errors:
            error_message = "工种容量配置验证失败: " + "; ".join(errors)
            logger.error(error_message)
            raise ValidationError(error_message)
        
        # 如果没有有效的工种配置，使用默认配置
        if not validated_capacities:
            logger.warning("没有有效的工种配置，使用默认配置")
            return self._get_default_worker_capacities()
        
        return validated_capacities

    def _validate_worker_capacity_requirements(self, project_data: Dict[str, Any]) -> None:
        """
        验证工种容量配置与项目需求的匹配性
        
        Args:
            project_data: 项目数据字典
            
        Raises:
            ValidationError: 工种容量不足或配置不匹配
        """
        worker_capacities = project_data.get("worker_capacities", {})
        worker_requirements = project_data.get("worker_requirements", {})
        
        if not worker_capacities:
            logger.warning("未找到工种容量配置，跳过工种容量验证")
            return
            
        if not worker_requirements:
            logger.info("未找到工种需求数据，跳过工种容量验证")
            return
        
        errors = []
        warnings = []
        
        # 统计每种工种的最大单个工序需求和总需求
        worker_stats = {}
        
        for activity_id, requirements in worker_requirements.items():
            for worker_type, demand in requirements.items():
                if worker_type not in worker_stats:
                    worker_stats[worker_type] = {
                        "max_single_demand": 0,
                        "total_demand": 0,
                        "activity_count": 0
                    }
                
                worker_stats[worker_type]["max_single_demand"] = max(
                    worker_stats[worker_type]["max_single_demand"], demand
                )
                worker_stats[worker_type]["total_demand"] += demand
                worker_stats[worker_type]["activity_count"] += 1
        
        # 验证每种工种的容量
        for worker_type, stats in worker_stats.items():
            max_demand = stats["max_single_demand"]
            total_demand = stats["total_demand"]
            activity_count = stats["activity_count"]
            
            if worker_type in worker_capacities:
                capacity = worker_capacities[worker_type]
                
                # 检查单个工序需求是否超过容量
                if max_demand > capacity:
                    errors.append(
                        f"工种 '{worker_type}' 的最大单个工序需求({max_demand}) "
                        f"超过了可用容量({capacity})"
                    )
                
                # 计算利用率并给出警告
                if capacity > 0:
                    utilization = (max_demand / capacity) * 100
                    if utilization > 90:
                        warnings.append(
                            f"工种 '{worker_type}' 利用率较高({utilization:.1f}%)，"
                            f"可能影响调度灵活性"
                        )
                    elif utilization < 20:
                        warnings.append(
                            f"工种 '{worker_type}' 利用率较低({utilization:.1f}%)，"
                            f"可能存在资源浪费"
                        )
                
                logger.info(
                    f"工种 '{worker_type}': 容量={capacity}, 最大需求={max_demand}, "
                    f"总需求={total_demand}, 涉及工序={activity_count}"
                )
            else:
                warnings.append(
                    f"项目需要工种 '{worker_type}'，但未在容量配置中找到，"
                    f"将使用通用工人替代"
                )
        
        # 记录警告
        for warning in warnings:
            logger.warning(warning)
        
        # 如果有严重错误，抛出异常
        if errors:
            error_message = "工种容量验证失败: " + "; ".join(errors)
            logger.error(error_message)
            raise ValidationError(error_message)
        
        logger.info(f"工种容量验证通过: {len(worker_stats)} 种工人类型")

    def _get_default_worker_capacities(self) -> Dict[str, int]:
        """
        获取默认工种容量配置
        
        Returns:
            默认工种容量字典
        """
        return {
            "钢筋工": 12,
            "泥工": 10,
            "架子工": 12,
            "模板工": 20,
            "电工": 2,
            "焊工": 2,
            "起重工": 2,
            "通用工人": 5,
            "挖掘工": 8,  # 添加挖掘工默认配置
        }

    def _read_csv_file(self, csv_path: str) -> str:
        """
        读取CSV文件内容

        Args:
            csv_path: CSV文件路径

        Returns:
            文件内容字符串
        """
        encodings = ["utf-8", "gbk", "gb2312", "cp936"]

        for encoding in encodings:
            try:
                with open(csv_path, "r", encoding=encoding) as f:
                    return f.read()
            except UnicodeDecodeError:
                continue
            except FileNotFoundError:
                raise
            except Exception:
                continue

        raise SchedulerError(f"无法读取文件，尝试了所有编码: {encodings}")


# 便捷函数
def optimize_schedule_from_csv(
    csv_content: str, config: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
    """
    便捷函数：从CSV内容优化调度

    Args:
        csv_content: CSV文件内容
        config: 优化配置参数

    Returns:
        优化结果字典
    """
    service = SimplifiedSchedulerService()
    return service.optimize_schedule(csv_content, config)


def optimize_schedule_from_file(
    csv_path: str, config: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
    """
    便捷函数：从CSV文件优化调度

    Args:
        csv_path: CSV文件路径
        config: 优化配置参数

    Returns:
        优化结果字典
    """
    service = SimplifiedSchedulerService()
    return service.optimize_schedule_from_file(csv_path, config)
