"""
简化的数据解析器

移除复杂的文本识别逻辑，只依赖CSV文件中的"划分条件"列来决定工序分区。
支持动态资源类型解析，保留原有的前置关系解析逻辑。
"""

import re
from io import StringIO
from typing import Any, Dict, List, Optional

import pandas as pd

from .error_handlers import ErrorContext, handle_sync_scheduler_errors
from .exceptions import DataParsingError, ValidationError, WorkerValidationError
from .logging_config import get_logger
from .simplified_data_models import Activity, Precedence, ProjectData, WorkerType

# 初始化日志记录器
logger = get_logger("data_parser")


class SimplifiedDataParser:
    """简化的数据解析器"""

    # 必需的CSV列
    REQUIRED_COLUMNS = ["ID", "Name", "Duration", "Predecessors"]

    # 资源列模式 (Resource_*)
    RESOURCE_PATTERN = re.compile(r"^Resource_(.+)$")

    # 前置关系解析的正则表达式模式
    DEPENDENCY_PATTERN = re.compile(r"(\d+)(FS|SS|FF|SF)?([+-]\d+)?")
    
    # 工种列映射
    WORKER_COLUMNS = {
        "工种": ["工种", "worker_type", "Worker_Type", "工种类型"],
        "工人数量": ["工人数量", "worker_count", "Worker_Count", "人数", "工人需求"],
    }
    
    # 预定义工种字典
    DEFAULT_WORKER_TYPES = {
        "钢筋工": {"capacity": 25},
        "泥工": {"capacity": 30},
        "架子工": {"capacity": 15},
        "模板工": {"capacity": 20},
        "电工": {"capacity": 10},
        "焊工": {"capacity": 8},
        "起重工": {"capacity": 6},
        "挖掘工": {"capacity": 20},
        "通用工人": {"capacity": 40},
    }

    @handle_sync_scheduler_errors
    def parse_csv_data(self, csv_content: str) -> Dict[str, Any]:
        """
        解析CSV数据并返回标准化结构

        Args:
            csv_content: CSV文件内容字符串

        Returns:
            标准化的项目数据字典

        Raises:
            DataParsingError: 当CSV解析失败时
        """
        with ErrorContext("CSV数据解析") as error_ctx:
            try:
                # 读取CSV数据
                logger.info("开始读取CSV数据")
                df = pd.read_csv(StringIO(csv_content))
                error_ctx.add_context("数据行数", len(df))
                error_ctx.add_context("数据列数", len(df.columns))

                # 验证必需列
                logger.info("验证必需列")
                self._validate_required_columns(df)

                # 验证基础数据
                logger.info("验证基础数据")
                self._validate_basic_data(df)

                # 解析工序数据
                activities = self._parse_activities(df)

                # 解析依赖关系
                precedences = self._parse_precedences(df)

                # 解析资源信息
                resources, resource_demands = self._parse_resources(df)

                # 解析工种需求信息
                worker_requirements = self._parse_worker_requirements(df)
                
                # 转换为标准格式并进行交叉验证
                standardized_data = self._convert_to_standard_format(
                    activities, precedences, resources, resource_demands, worker_requirements
                )

                return standardized_data

            except pd.errors.EmptyDataError:
                raise DataParsingError("CSV文件为空或无有效数据")
            except pd.errors.ParserError as e:
                raise DataParsingError(f"CSV格式错误: {str(e)}")
            except UnicodeDecodeError as e:
                raise DataParsingError(f"文件编码错误: {str(e)}")
            except Exception as e:
                if isinstance(e, DataParsingError):
                    raise
                raise DataParsingError(f"CSV解析失败: {str(e)}")

    def parse_csv_file(self, csv_path: str) -> Dict[str, Any]:
        """
        解析CSV文件

        Args:
            csv_path: CSV文件路径

        Returns:
            标准化的项目数据字典
        """
        try:
            # 尝试不同编码读取文件
            csv_content = self._read_file_with_encoding(csv_path)
            return self.parse_csv_data(csv_content)
        except FileNotFoundError:
            raise FileNotFoundError(f"CSV文件不存在: {csv_path}")
        except Exception as e:
            if isinstance(e, DataParsingError):
                raise
            raise DataParsingError(f"读取CSV文件失败: {str(e)}")

    def _read_file_with_encoding(self, csv_path: str) -> str:
        """
        尝试不同编码读取文件

        Args:
            csv_path: CSV文件路径

        Returns:
            文件内容字符串
        """
        encodings = ["utf-8", "gbk", "gb2312", "cp936"]

        for encoding in encodings:
            try:
                with open(csv_path, "r", encoding=encoding) as f:
                    return f.read()
            except UnicodeDecodeError:
                continue
            except FileNotFoundError:
                # 重新抛出FileNotFoundError
                raise
            except Exception:
                continue

        raise DataParsingError(f"无法读取文件，尝试了所有编码: {encodings}")

    def _validate_required_columns(self, df: pd.DataFrame) -> None:
        """
        验证必需列是否存在

        Args:
            df: DataFrame

        Raises:
            DataParsingError: 当缺少必需列时
        """
        if df.empty:
            raise DataParsingError("CSV文件没有数据行")

        # 检查必需列是否存在（支持中英文列名）
        required_mappings = {
            "ID": ["ID", "id", "编号", "工序编号"],
            "Name": ["Name", "name", "名称", "工序名称", "任务名称"],
            "Duration": ["Duration", "duration", "工期", "持续时间"],
            "Predecessors": [
                "Predecessors",
                "predecessors",
                "前置任务",
                "前置工序",
                "依赖关系",
            ],
        }

        missing_columns = []
        for required_col, possible_names in required_mappings.items():
            found = False
            for possible_name in possible_names:
                if possible_name in df.columns:
                    found = True
                    break
            if not found:
                missing_columns.append(f"{required_col} ({'/'.join(possible_names)})")

        if missing_columns:
            raise DataParsingError(f"CSV文件缺少必需的列: {', '.join(missing_columns)}")

    def _validate_basic_data(self, df: pd.DataFrame) -> None:
        """
        验证基础数据类型和完整性

        Args:
            df: DataFrame

        Raises:
            DataParsingError: 当数据验证失败时
        """
        errors = []

        # 验证ID列
        id_col = self._find_column(df, ["ID", "id", "编号", "工序编号"])
        if id_col:
            try:
                id_series = pd.to_numeric(df[id_col], errors="coerce")
                if id_series.isna().any():
                    invalid_ids = df.loc[id_series.isna(), id_col].tolist()
                    errors.append(f"ID列包含非数值数据: {invalid_ids}")

                # 检查ID是否为正整数
                valid_ids = id_series.dropna()
                if len(valid_ids) > 0:
                    if (valid_ids <= 0).any():
                        invalid_ids = valid_ids[valid_ids <= 0].tolist()
                        errors.append(f"ID必须为正整数: {invalid_ids}")

                    # 检查ID是否有重复
                    if valid_ids.duplicated().any():
                        duplicate_ids = valid_ids[valid_ids.duplicated()].tolist()
                        errors.append(f"ID不能重复: {duplicate_ids}")
            except Exception as e:
                errors.append(f"验证ID列时出错: {str(e)}")

        # 验证Duration列
        duration_col = self._find_column(
            df, ["Duration", "duration", "工期", "持续时间"]
        )
        if duration_col:
            try:
                # 清理工期数据（移除中文单位）
                cleaned_durations = df[duration_col].apply(self._clean_duration_value)
                duration_series = pd.to_numeric(cleaned_durations, errors="coerce")

                if duration_series.isna().any():
                    invalid_rows = df.loc[duration_series.isna()].index.tolist()
                    errors.append(f"Duration列包含非数值数据，行号: {invalid_rows}")

                # 检查Duration是否为正数
                valid_durations = duration_series.dropna()
                if len(valid_durations) > 0:
                    if (valid_durations <= 0).any():
                        invalid_rows = df.loc[
                            duration_series.index[valid_durations <= 0]
                        ].index.tolist()
                        errors.append(f"Duration必须为正数，行号: {invalid_rows}")
            except Exception as e:
                errors.append(f"验证Duration列时出错: {str(e)}")

        # 验证Name列
        name_col = self._find_column(
            df, ["Name", "name", "名称", "工序名称", "任务名称"]
        )
        if name_col:
            empty_names = df[name_col].isna() | (
                df[name_col].astype(str).str.strip() == ""
            )
            if empty_names.any():
                empty_rows = df.loc[empty_names].index.tolist()
                errors.append(f"Name列不能为空，行号: {empty_rows}")

        # 如果有验证错误，抛出异常
        if errors:
            raise DataParsingError(f"数据验证失败: {'; '.join(errors)}")

    def _find_column(
        self, df: pd.DataFrame, possible_names: List[str]
    ) -> Optional[str]:
        """
        在DataFrame中查找列名

        Args:
            df: DataFrame
            possible_names: 可能的列名列表

        Returns:
            找到的列名，如果没找到返回None
        """
        for name in possible_names:
            if name in df.columns:
                return name
        return None

    def _clean_duration_value(self, value: Any) -> str:
        """
        清理工期值，移除中文单位

        Args:
            value: 原始工期值

        Returns:
            清理后的数值字符串
        """
        if pd.isna(value):
            return "0"

        value_str = str(value).strip()
        # 移除中文单位
        value_str = (
            value_str.replace("个工作日", "").replace("天", "").replace("日", "")
        )
        value_str = value_str.replace("个", "").replace(" ", "")

        return value_str if value_str else "0"

    def _parse_activities(self, df: pd.DataFrame) -> List[Dict]:
        """
        解析工序信息

        Args:
            df: DataFrame

        Returns:
            工序列表
        """
        activities = []

        # 获取列名
        id_col = self._find_column(df, ["ID", "id", "编号", "工序编号"])
        name_col = self._find_column(
            df, ["Name", "name", "名称", "工序名称", "任务名称"]
        )
        duration_col = self._find_column(
            df, ["Duration", "duration", "工期", "持续时间"]
        )
        subdivision_col = self._find_column(
            df, ["划分条件", "Can_Subdivide", "can_subdivide", "可分区", "分区条件"]
        )

        for _, row in df.iterrows():
            # 增强的分区逻辑：支持多种表示方式
            can_subdivide = False
            if subdivision_col and subdivision_col in row.index:
                subdivision_value = str(row[subdivision_col]).strip().lower()
                can_subdivide = (
                    subdivision_value in ["是", "true", "1", "yes", "可以", "支持"] or
                    subdivision_value == "1"
                )

            # 处理工期
            duration_value = row[duration_col]
            cleaned_duration = self._clean_duration_value(duration_value)
            duration = int(float(cleaned_duration))

            activity = {
                "id": int(row[id_col]),
                "name": str(row[name_col]).strip(),
                "duration": duration,
                "can_subdivide": can_subdivide,
            }
            activities.append(activity)

        return activities

    def _parse_precedences(self, df: pd.DataFrame) -> List[Dict]:
        """
        解析前置关系（保持原有逻辑）

        Args:
            df: DataFrame

        Returns:
            前置关系列表
        """
        precedences = []

        id_col = self._find_column(df, ["ID", "id", "编号", "工序编号"])
        predecessors_col = self._find_column(
            df, ["Predecessors", "predecessors", "前置任务", "前置工序", "依赖关系"]
        )

        for _, row in df.iterrows():
            current_id = int(row[id_col])

            # 获取前置任务
            predecessors_str = ""
            if predecessors_col and predecessors_col in row.index:
                predecessors_value = row[predecessors_col]
                if pd.notna(predecessors_value):
                    predecessors_str = str(predecessors_value).strip()

            # 解析前置依赖关系
            if predecessors_str:
                try:
                    dependencies = self._parse_dependency_string(predecessors_str)

                    for dep in dependencies:
                        precedence = {
                            "from": dep["from_id"],
                            "to": current_id,
                            "type": dep["type"],
                            "lag": dep["lag"],
                        }
                        precedences.append(precedence)

                except Exception as e:
                    raise DataParsingError(
                        f"解析工序 {current_id} 的前置关系时出错: {str(e)}"
                    )

        return precedences

    def _parse_dependency_string(self, dep_str: str) -> List[Dict[str, Any]]:
        """
        解析依赖关系字符串

        Args:
            dep_str: 依赖关系字符串

        Returns:
            解析后的依赖关系列表
        """
        dependencies = []

        if not dep_str or pd.isna(dep_str):
            return dependencies

        # 转换为字符串并清理
        dep_str = str(dep_str).strip()
        if not dep_str:
            return dependencies

        # 处理引号包围的情况
        if dep_str.startswith('"') and dep_str.endswith('"'):
            dep_str = dep_str[1:-1]

        # 按逗号分割多个依赖关系
        parts = [part.strip() for part in dep_str.split(",") if part.strip()]

        for part in parts:
            try:
                # 清理中文单位
                cleaned_part = (
                    part.replace("个工作日", "")
                    .replace("天", "")
                    .replace("日", "")
                    .strip()
                )

                # 使用正则表达式解析每个依赖关系
                match = self.DEPENDENCY_PATTERN.match(cleaned_part)

                if not match:
                    # 尝试简单的数字格式
                    if cleaned_part.isdigit():
                        pred_id = int(cleaned_part)
                        dependencies.append(
                            {"from_id": pred_id, "type": "FS", "lag": 0}
                        )
                        continue

                    raise DataParsingError(f"无效的前置依赖关系格式: '{part}'")

                # 提取匹配的组
                pred_id_str = match.group(1)
                dep_type = match.group(2) or "FS"  # 默认FS类型
                lag_str = match.group(3) or "0"  # 默认0延时

                # 转换数据类型
                try:
                    pred_id = int(
                        float(pred_id_str)
                    )  # 先转float再转int，处理1.0这种情况
                    lag = int(lag_str) if lag_str != "0" else 0
                except ValueError as e:
                    raise DataParsingError(
                        f"依赖关系中的数值转换失败: '{part}' - {str(e)}"
                    )

                # 验证前置工序ID
                if pred_id <= 0:
                    raise DataParsingError(
                        f"前置工序ID必须为正整数: '{part}' - ID: {pred_id}"
                    )

                # 验证依赖类型
                valid_types = ["FS", "SS", "FF", "SF"]
                if dep_type not in valid_types:
                    raise DataParsingError(
                        f"无效的依赖类型: '{dep_type}' in '{part}' - 有效类型: {', '.join(valid_types)}"
                    )

                # 添加到依赖关系列表
                dependencies.append({"from_id": pred_id, "type": dep_type, "lag": lag})

            except DataParsingError:
                # 重新抛出已知的解析错误
                raise
            except Exception as e:
                # 包装未知错误
                raise DataParsingError(
                    f"解析前置依赖关系时发生未知错误: '{part}' - {str(e)}"
                )

        return dependencies

    def _parse_resources(self, df: pd.DataFrame) -> tuple:
        """
        解析资源信息，支持动态资源类型（Resource_*列）

        Args:
            df: DataFrame

        Returns:
            (resources, resource_demands) 元组
        """
        resources = {}
        resource_demands = {}

        # 查找资源相关的列（以Resource_开头的列）
        resource_columns = [
            col for col in df.columns if self.RESOURCE_PATTERN.match(col)
        ]

        if not resource_columns:
            # 如果没有资源列，返回空字典
            return resources, resource_demands

        # 提取资源类型名称并设置默认容量
        for col in resource_columns:
            match = self.RESOURCE_PATTERN.match(col)
            if match:
                resource_name = match.group(1).lower()
                # 计算该资源的最大需求量，设置容量为最大需求的2倍（最少为1）
                max_demand = df[col].fillna(0).max()
                resources[resource_name] = max(int(max_demand * 2), 1)

        # 构建resource_demands
        id_col = self._find_column(df, ["ID", "id", "编号", "工序编号"])

        for _, row in df.iterrows():
            activity_id = str(int(row[id_col]))
            demands = {}

            for col in resource_columns:
                match = self.RESOURCE_PATTERN.match(col)
                if match:
                    resource_name = match.group(1).lower()
                    demand_value = row.get(col, 0)

                    # 处理空值和非数值
                    if pd.isna(demand_value):
                        demand_value = 0
                    else:
                        try:
                            demand_value = int(float(demand_value))
                            if demand_value < 0:
                                demand_value = 0
                        except (ValueError, TypeError):
                            demand_value = 0

                    if demand_value > 0:
                        demands[resource_name] = demand_value

            if demands:  # 只有当有资源需求时才添加
                resource_demands[activity_id] = demands

        return resources, resource_demands
    
    def _normalize_worker_type(self, worker_type: str) -> str:
        """
        标准化工种名称
        
        Args:
            worker_type: 原始工种名称
            
        Returns:
            标准化后的工种名称
        """
        if not worker_type or pd.isna(worker_type):
            return "通用工人"
            
        worker_type = str(worker_type).strip()
        if not worker_type:
            return "通用工人"
            
        # 检查是否为预定义工种
        if worker_type in self.DEFAULT_WORKER_TYPES:
            return worker_type
            
        # 模糊匹配常见工种
        worker_type_lower = worker_type.lower()
        for standard_type in self.DEFAULT_WORKER_TYPES.keys():
            if standard_type in worker_type or worker_type in standard_type:
                return standard_type
                
        # 基于关键词匹配
        keyword_mappings = {
            "钢筋": "钢筋工",
            "混凝土": "泥工",
            "水泥": "泥工",
            "架子": "架子工",
            "脚手架": "架子工",
            "模板": "模板工",
            "电": "电工",
            "焊": "焊工",
            "起重": "起重工",
            "吊": "起重工",
        }
        
        for keyword, standard_type in keyword_mappings.items():
            if keyword in worker_type:
                return standard_type
                
        # 未知工种归类为通用工人
        logger.warning(f"未知工种 '{worker_type}' 已归类为 '通用工人'")
        return "通用工人"
    
    def _parse_worker_requirements(self, df: pd.DataFrame) -> Dict[str, Dict[str, int]]:
        """
        解析工种需求信息
        
        Args:
            df: DataFrame
            
        Returns:
            工种需求字典 {activity_id: {worker_type: count}}
        """
        worker_requirements = {}
        
        # 查找工种和工人数量列
        worker_type_col = self._find_column(df, self.WORKER_COLUMNS["工种"])
        worker_count_col = self._find_column(df, self.WORKER_COLUMNS["工人数量"])
        id_col = self._find_column(df, ["ID", "id", "编号", "工序编号"])
        
        if not worker_type_col or not worker_count_col:
            logger.info("未找到工种相关列，跳过工种需求解析")
            return worker_requirements
            
        logger.info(f"找到工种列: {worker_type_col}, 工人数量列: {worker_count_col}")
        
        for _, row in df.iterrows():
            try:
                activity_id = str(int(row[id_col]))
                worker_type = row.get(worker_type_col)
                worker_count = row.get(worker_count_col)
                
                # 跳过空值
                if pd.isna(worker_type) or pd.isna(worker_count):
                    continue
                    
                # 标准化工种名称
                normalized_type = self._normalize_worker_type(worker_type)
                
                # 处理工人数量
                try:
                    count = int(float(worker_count))
                    if count <= 0:
                        continue
                except (ValueError, TypeError):
                    logger.warning(f"工序 {activity_id} 的工人数量无效: {worker_count}")
                    continue
                
                # 添加到需求字典
                if activity_id not in worker_requirements:
                    worker_requirements[activity_id] = {}
                    
                worker_requirements[activity_id][normalized_type] = count
                
            except Exception as e:
                logger.error(f"解析工序工种需求时出错: {str(e)}")
                continue
                
        logger.info(f"解析到 {len(worker_requirements)} 个工序的工种需求")
        return worker_requirements
    
    def get_worker_types_config(self, worker_requirements: Dict[str, Dict[str, int]] = None) -> List[WorkerType]:
        """
        获取工种配置列表
        
        Args:
            worker_requirements: 工种需求字典（可选）
            
        Returns:
            工种配置列表
        """
        worker_types = []
        used_types = set()
        
        # 从需求中提取使用的工种
        if worker_requirements:
            for activity_requirements in worker_requirements.values():
                used_types.update(activity_requirements.keys())
        
        # 为使用的工种创建配置
        for worker_type in used_types:
            capacity = self.DEFAULT_WORKER_TYPES.get(worker_type, {"capacity": 20})["capacity"]
            worker_types.append(WorkerType(name=worker_type, capacity=capacity))
            
        # 如果没有使用的工种，返回默认配置
        if not worker_types:
            for name, config in self.DEFAULT_WORKER_TYPES.items():
                worker_types.append(WorkerType(name=name, capacity=config["capacity"]))
                
        return worker_types
    
    def _convert_to_standard_format(
        self, 
        activities: List[Dict], 
        precedences: List[Dict], 
        resources: Dict[str, int], 
        resource_demands: Dict[str, Dict[str, int]], 
        worker_requirements: Dict[str, Dict[str, int]]
    ) -> Dict[str, Any]:
        """
        转换为标准格式并集成工种数据
        
        Args:
            activities: 工序列表
            precedences: 前置关系列表
            resources: 资源配置
            resource_demands: 资源需求
            worker_requirements: 工种需求
            
        Returns:
            标准化的项目数据字典
        """
        # 执行交叉引用验证
        self._validate_worker_data_cross_references(activities, worker_requirements)
        
        # 验证分区相关数据
        self.validate_subdivision_data(activities)
        
        # 将工种需求集成到工序数据中
        for activity in activities:
            activity_id = str(activity["id"])
            if activity_id in worker_requirements:
                activity["worker_requirements"] = worker_requirements[activity_id]
            else:
                activity["worker_requirements"] = {}
        
        # 获取分区统计信息
        subdivision_stats = self.get_subdivision_statistics(activities)
        logger.info(f"分区统计: 总工序={subdivision_stats['total_activities']}, "
                   f"可分区工序={subdivision_stats['subdividable_activities']}, "
                   f"分区比例={subdivision_stats['subdivision_ratio']:.2%}")
        
        return {
            "activities": activities,
            "precedences": precedences,
            "resources": resources,
            "resource_demands": resource_demands,
            "worker_requirements": worker_requirements,
            "subdivision_statistics": subdivision_stats,
        }
    
    def _validate_worker_data_cross_references(
        self, 
        activities: List[Dict], 
        worker_requirements: Dict[str, Dict[str, int]]
    ) -> None:
        """
        验证工种数据的交叉引用
        
        Args:
            activities: 工序列表
            worker_requirements: 工种需求字典
            
        Raises:
            WorkerValidationError: 当交叉引用验证失败时
        """
        # 获取所有工序ID
        activity_ids = {str(act["id"]) for act in activities}
        
        # 检查工种需求中引用的工序是否存在
        invalid_references = []
        for activity_id in worker_requirements.keys():
            if activity_id not in activity_ids:
                invalid_references.append(activity_id)
        
        if invalid_references:
            raise WorkerValidationError(
                f"工种需求中引用了不存在的工序ID: {', '.join(invalid_references)}",
                activity_id=', '.join(invalid_references)
            )
        
        # 验证工种需求数据的有效性
        for activity_id, requirements in worker_requirements.items():
            for worker_type, count in requirements.items():
                if not isinstance(count, int):
                    raise WorkerValidationError(
                        f"工序 {activity_id} 的工种 '{worker_type}' 需求数量必须为整数",
                        worker_type=worker_type,
                        activity_id=activity_id
                    )
                
                if count <= 0:
                    raise WorkerValidationError(
                        f"工序 {activity_id} 的工种 '{worker_type}' 需求数量必须大于0",
                        worker_type=worker_type,
                        activity_id=activity_id
                    )
        
        logger.info(f"工种数据交叉引用验证通过，涉及 {len(worker_requirements)} 个工序")
    
    def get_subdivision_statistics(self, activities: List[Dict]) -> Dict[str, int]:
        """
        获取分区统计信息
        
        Args:
            activities: 工序列表
            
        Returns:
            分区统计信息字典
        """
        total_activities = len(activities)
        subdividable_activities = len([act for act in activities if act.get("can_subdivide", False)])
        
        return {
            "total_activities": total_activities,
            "subdividable_activities": subdividable_activities,
            "subdivision_ratio": subdividable_activities / total_activities if total_activities > 0 else 0
        }
    
    def validate_subdivision_data(self, activities: List[Dict]) -> None:
        """
        验证分区相关数据
        
        Args:
            activities: 工序列表
            
        Raises:
            ValidationError: 当分区数据验证失败时
        """
        subdividable_activities = [act for act in activities if act.get("can_subdivide", False)]
        
        if not subdividable_activities:
            logger.info("未发现可分区工序")
            return
            
        # 验证可分区工序的基本要求
        for activity in subdividable_activities:
            # 检查工期是否足够分区
            if activity["duration"] < 2:
                logger.warning(f"工序 {activity['id']}({activity['name']}) 工期过短({activity['duration']}天)，可能不适合分区")
            
            # 检查是否有工种需求（分区时需要合理分配工人）
            worker_requirements = activity.get("worker_requirements", {})
            if worker_requirements:
                total_workers = sum(worker_requirements.values())
                if total_workers < 2:
                    logger.warning(f"工序 {activity['id']}({activity['name']}) 工人需求较少({total_workers}人)，分区效果可能有限")
        
        logger.info(f"分区数据验证完成，发现 {len(subdividable_activities)} 个可分区工序")

    def create_project_data(self, csv_content: str) -> ProjectData:
        """
        创建ProjectData对象

        Args:
            csv_content: CSV文件内容

        Returns:
            ProjectData对象
        """
        parsed_data = self.parse_csv_data(csv_content)

        # 创建Activity对象列表
        activities = []
        worker_requirements = parsed_data.get("worker_requirements", {})
        
        for act_data in parsed_data["activities"]:
            activity = Activity(
                id=act_data["id"],
                name=act_data["name"],
                duration=act_data["duration"],
                can_subdivide=act_data["can_subdivide"],
            )
            
            # 添加工种需求
            activity_id = str(act_data["id"])
            if activity_id in worker_requirements:
                activity.worker_requirements = worker_requirements[activity_id]
            
            activities.append(activity)

        # 创建Precedence对象列表
        precedences = []
        for prec_data in parsed_data["precedences"]:
            precedence = Precedence(
                from_activity=prec_data["from"],
                to_activity=prec_data["to"],
                dependency_type=prec_data["type"],
                lag=prec_data["lag"],
            )
            precedences.append(precedence)

        # 创建ProjectData对象
        project_data = ProjectData(
            activities=activities,
            precedences=precedences,
            resources=parsed_data["resources"],
            resource_demands=parsed_data["resource_demands"],
        )

        return project_data


# 便捷函数
def parse_csv_data(csv_content: str) -> Dict[str, Any]:
    """
    便捷函数：解析CSV数据

    Args:
        csv_content: CSV文件内容字符串

    Returns:
        标准化的项目数据字典
    """
    parser = SimplifiedDataParser()
    return parser.parse_csv_data(csv_content)


def parse_csv_file(csv_path: str) -> Dict[str, Any]:
    """
    便捷函数：解析CSV文件

    Args:
        csv_path: CSV文件路径

    Returns:
        标准化的项目数据字典
    """
    parser = SimplifiedDataParser()
    return parser.parse_csv_file(csv_path)


def create_project_data_from_csv(csv_content: str) -> ProjectData:
    """
    便捷函数：从CSV创建ProjectData对象

    Args:
        csv_content: CSV文件内容

    Returns:
        ProjectData对象
    """
    parser = SimplifiedDataParser()
    return parser.create_project_data(csv_content)
