"""
数据解析模块

负责解析CSV格式的项目数据文件，将外部数据转换为系统内部的标准化数据结构。
支持复杂的前后置关系解析和数据验证，包括中文格式支持。
"""

import re
from typing import Any, Dict, List, Optional

import pandas as pd

from .exceptions import DataParsingError


class ProjectDataParser:
    """项目数据解析器"""

    # 前后置关系解析的正则表达式模式
    # 匹配: 数字(可能包含.0) + 可选的依赖类型(FS|SS|FF|SF) + 可选的延时(+/-数字)
    DEPENDENCY_PATTERN = r"^(\d+(?:\.0)?)(FS|SS|FF|SF)?([+-]\d+)?$"

    # 必需的CSV列
    REQUIRED_COLUMNS = ["ID", "Name", "Duration", "Predecessors"]

    # 支持的工种类型
    WORKER_TYPES = [
        "泥工",
        "钢筋工",
        "架子工",
        "模板工",
        "通用工人",
        "电工",
        "焊工",
        "起重工",
    ]

    def __init__(self, config_path: Optional[str] = None):
        """
        初始化解析器

        Args:
            config_path: 配置文件路径，如果不提供则使用默认配置
        """
        self.config_path = config_path

    def parse_project_csv(self, csv_path: str) -> Dict[str, Any]:
        """
        解析CSV项目文件并返回标准化数据结构

        Args:
            csv_path: CSV文件路径

        Returns:
            标准化的项目数据字典，包含:
            - activities: 工序列表
            - precedences: 前后置关系列表
            - resources: 资源容量字典
            - resource_demands: 资源需求字典

        Raises:
            FileNotFoundError: 文件不存在
            DataParsingError: CSV格式错误或数据无效
        """
        try:
            # 检查文件是否存在
            import os

            if not os.path.exists(csv_path):
                raise FileNotFoundError(f"CSV文件不存在: {csv_path}")

            # 读取CSV文件，尝试不同编码
            df = self._read_csv_with_encoding(csv_path)

            # 验证CSV文件结构
            self._validate_csv_structure(df)

            # 验证数据类型和基础数据完整性
            self._validate_basic_data(df)

            # 转换为标准化数据结构
            return self._convert_to_standard_format(df)

        except (FileNotFoundError, DataParsingError):
            # 重新抛出已知异常
            raise
        except Exception as e:
            # 包装未知异常
            raise DataParsingError("解析CSV文件时发生未知错误", str(e))

    def _read_csv_with_encoding(self, csv_path: str) -> pd.DataFrame:
        """
        尝试不同编码读取CSV文件

        Args:
            csv_path: CSV文件路径

        Returns:
            读取的DataFrame

        Raises:
            DataParsingError: 无法读取文件
        """
        encodings = ["utf-8", "gbk", "gb2312", "cp936"]

        for encoding in encodings:
            try:
                df = pd.read_csv(csv_path, encoding=encoding)
                return df
            except UnicodeDecodeError:
                continue
            except pd.errors.EmptyDataError:
                raise DataParsingError("CSV文件为空", f"文件路径: {csv_path}")
            except pd.errors.ParserError as e:
                raise DataParsingError("CSV文件格式错误", f"解析错误: {str(e)}")
            except Exception:
                continue

        raise DataParsingError("无法读取CSV文件", f"尝试了所有编码: {encodings}")

    def _validate_csv_structure(self, df: pd.DataFrame) -> None:
        """
        验证CSV文件结构，支持中文列名

        Args:
            df: 读取的DataFrame

        Raises:
            DataParsingError: 当CSV结构不符合要求时
        """
        # 检查DataFrame是否为空
        if df.empty:
            raise DataParsingError("CSV文件没有数据行")

        # 检查必需列是否存在（支持中英文列名）
        required_mappings = {
            "ID": ["ID", "id", "编号", "工序编号"],
            "Name": ["Name", "name", "名称", "工序名称", "任务名称"],
            "Duration": ["Duration", "duration", "工期", "持续时间"],
            "Predecessors": [
                "Predecessors",
                "predecessors",
                "前置任务",
                "前置工序",
                "依赖关系",
            ],
        }

        missing_columns = []
        for required_col, possible_names in required_mappings.items():
            found = False
            for possible_name in possible_names:
                if possible_name in df.columns:
                    found = True
                    break
            if not found:
                missing_columns.append(f"{required_col} ({'/'.join(possible_names)})")

        if missing_columns:
            raise DataParsingError(
                "CSV文件缺少必需的列", f"缺少列: {', '.join(missing_columns)}"
            )

    def _validate_basic_data(self, df: pd.DataFrame) -> None:
        """
        验证基础数据类型和完整性

        Args:
            df: 读取的DataFrame

        Raises:
            DataParsingError: 当数据验证失败时
        """
        errors = []

        # 验证ID列
        id_col = self._find_column(df, ["ID", "id", "编号", "工序编号"])
        if id_col:
            try:
                id_series = pd.to_numeric(df[id_col], errors="coerce")
                if id_series.isna().any():
                    invalid_ids = df.loc[id_series.isna(), id_col].tolist()
                    errors.append(f"ID列包含非数值数据: {invalid_ids}")

                # 检查ID是否为正整数
                valid_ids = id_series.dropna()
                if len(valid_ids) > 0:
                    if (valid_ids <= 0).any():
                        invalid_ids = valid_ids[valid_ids <= 0].tolist()
                        errors.append(f"ID必须为正整数: {invalid_ids}")

                    # 检查ID是否有重复
                    if valid_ids.duplicated().any():
                        duplicate_ids = valid_ids[valid_ids.duplicated()].tolist()
                        errors.append(f"ID不能重复: {duplicate_ids}")

            except Exception as e:
                errors.append(f"验证ID列时出错: {str(e)}")

        # 验证Duration列
        duration_col = self._find_column(
            df, ["Duration", "duration", "工期", "持续时间"]
        )
        if duration_col:
            try:
                # 先清理工期数据（移除中文单位）
                cleaned_durations = df[duration_col].apply(self._clean_duration_value)
                duration_series = pd.to_numeric(cleaned_durations, errors="coerce")

                if duration_series.isna().any():
                    invalid_rows = df.loc[duration_series.isna()].index.tolist()
                    errors.append(f"Duration列包含非数值数据，行号: {invalid_rows}")

                # 检查Duration是否为正数
                valid_durations = duration_series.dropna()
                if len(valid_durations) > 0:
                    invalid_mask = (valid_durations <= 0) | (valid_durations < 1e-6)
                    if invalid_mask.any():
                        invalid_rows = df.loc[
                            duration_series.index[invalid_mask]
                        ].index.tolist()
                        errors.append(f"Duration必须为正数，行号: {invalid_rows}")

            except Exception as e:
                errors.append(f"验证Duration列时出错: {str(e)}")

        # 验证Name列
        name_col = self._find_column(
            df, ["Name", "name", "名称", "工序名称", "任务名称"]
        )
        if name_col:
            empty_names = df[name_col].isna() | (
                df[name_col].astype(str).str.strip() == ""
            )
            if empty_names.any():
                empty_rows = df.loc[empty_names].index.tolist()
                errors.append(f"Name列不能为空，行号: {empty_rows}")

        # 如果有验证错误，抛出异常
        if errors:
            raise DataParsingError("数据验证失败", "; ".join(errors))

    def _find_column(
        self, df: pd.DataFrame, possible_names: List[str]
    ) -> Optional[str]:
        """
        在DataFrame中查找列名

        Args:
            df: DataFrame
            possible_names: 可能的列名列表

        Returns:
            找到的列名，如果没找到返回None
        """
        for name in possible_names:
            if name in df.columns:
                return name
        return None

    def _clean_duration_value(self, value: Any) -> str:
        """
        清理工期值，移除中文单位

        Args:
            value: 原始工期值

        Returns:
            清理后的数值字符串
        """
        if pd.isna(value):
            return "0"

        value_str = str(value).strip()
        # 移除中文单位
        value_str = (
            value_str.replace("个工作日", "").replace("天", "").replace("日", "")
        )
        value_str = value_str.replace("个", "").replace(" ", "")

        return value_str if value_str else "0"

    def _convert_to_standard_format(self, df: pd.DataFrame) -> Dict[str, Any]:
        """
        将DataFrame转换为标准化的内部数据结构

        Args:
            df: 验证后的DataFrame

        Returns:
            标准化的项目数据字典

        Raises:
            DataParsingError: 当数据转换失败时
        """
        try:
            # 1. 构建activities列表
            activities = self._build_activities(df)

            # 2. 构建precedences列表
            precedences = self._build_precedences(df)

            # 3. 提取资源信息
            resources, resource_demands = self._extract_resources(df)

            # 4. 执行交叉引用完整性验证
            self._validate_cross_references(
                activities, precedences, resources, resource_demands
            )

            return {
                "activities": activities,
                "precedences": precedences,
                "resources": resources,
                "resource_demands": resource_demands,
            }

        except Exception as e:
            if isinstance(e, DataParsingError):
                raise
            else:
                raise DataParsingError("数据结构转换失败", str(e))

    def _build_activities(self, df: pd.DataFrame) -> List[Dict[str, Any]]:
        """
        构建activities数据结构

        Args:
            df: DataFrame

        Returns:
            activities列表，每个元素包含id、name、duration、is_outdoor
        """
        activities = []

        for _, row in df.iterrows():
            # 检查是否为室外工序（如果有相关列的话）
            is_outdoor = False
            if "Is_Outdoor" in df.columns:
                is_outdoor = bool(row.get("Is_Outdoor", False))

            # 处理工期，支持中文格式
            duration = self._parse_duration(row)

            # 处理名称，支持中文列名
            name = self._get_activity_name(row)

            # 获取ID
            id_col = self._find_column(df, ["ID", "id", "编号", "工序编号"])
            activity_id = int(row[id_col])

            activity = {
                "id": activity_id,
                "name": name,
                "duration": duration,
                "is_outdoor": is_outdoor,
            }

            activities.append(activity)

        return activities

    def _parse_duration(self, row: Any) -> int:
        """
        解析工期，支持多种格式

        Args:
            row: DataFrame行数据

        Returns:
            工期天数（整数）
        """
        # 尝试不同的列名
        duration_columns = ["Duration", "duration", "工期", "持续时间"]
        duration_value = None

        for col in duration_columns:
            if col in row.index:
                duration_value = row[col]
                break

        if duration_value is None or pd.isna(duration_value):
            raise DataParsingError("找不到工期信息")

        # 处理字符串格式的工期
        if isinstance(duration_value, str):
            # 移除中文单位和空格
            duration_str = str(duration_value).strip()
            duration_str = (
                duration_str.replace("个工作日", "").replace("天", "").replace("日", "")
            )
            duration_str = duration_str.replace("个", "").replace(" ", "")

            try:
                return int(float(duration_str))
            except ValueError:
                raise DataParsingError(f"无法解析工期格式: {duration_value}")

        # 处理数值格式的工期
        try:
            return int(float(duration_value))
        except (ValueError, TypeError):
            raise DataParsingError(f"无法解析工期格式: {duration_value}")

    def _get_activity_name(self, row: Any) -> str:
        """
        获取工序名称，支持多种列名

        Args:
            row: DataFrame行数据

        Returns:
            工序名称
        """
        # 尝试不同的列名
        name_columns = ["Name", "name", "名称", "工序名称", "任务名称"]

        for col in name_columns:
            if col in row.index:
                name = row[col]
                if pd.notna(name) and str(name).strip():
                    return str(name).strip()

        raise DataParsingError("找不到工序名称信息")

    def _build_precedences(self, df: pd.DataFrame) -> List[Dict[str, Any]]:
        """
        构建precedences数据结构

        Args:
            df: DataFrame

        Returns:
            precedences列表，每个元素包含from_id、to_id、type、lag
        """
        precedences = []

        id_col = self._find_column(df, ["ID", "id", "编号", "工序编号"])

        for _, row in df.iterrows():
            current_id = int(row[id_col])

            # 获取前置任务，支持多种列名
            predecessors_str = self._get_predecessors(row)

            # 解析前置依赖关系
            try:
                dependencies = self._parse_predecessors(predecessors_str)

                for dep in dependencies:
                    precedence = {
                        "from": dep["from_id"],
                        "to": current_id,
                        "type": dep["type"],
                        "lag": dep["lag"],
                    }
                    precedences.append(precedence)

            except DataParsingError as e:
                # 添加上下文信息
                raise DataParsingError(
                    f"解析工序 {current_id} 的前置关系时出错", f"原始错误: {str(e)}"
                )

        return precedences

    def _get_predecessors(self, row: Any) -> str:
        """
        获取前置任务信息，支持多种列名

        Args:
            row: DataFrame行数据

        Returns:
            前置任务字符串
        """
        # 尝试不同的列名
        predecessor_columns = [
            "Predecessors",
            "predecessors",
            "前置任务",
            "前置工序",
            "依赖关系",
        ]

        for col in predecessor_columns:
            if col in row.index:
                value = row[col]
                if pd.notna(value):
                    return str(value).strip()

        return ""

    def _parse_predecessors(self, predecessors_str: str) -> List[Dict[str, Any]]:
        """
        解析前置依赖关系字符串，支持中文格式

        支持格式:
        - "75FS+3个工作日" -> ID=75, type=FS, lag=+3
        - "81FS-1个工作日, 77" -> ID=81 type=FS lag=-1, ID=77 type=FS lag=0
        - "88SS, 87FF-2个工作日" -> ID=88 type=SS lag=0, ID=87 type=FF lag=-2
        - "158,153" -> 多个前置工序

        Args:
            predecessors_str: 前置关系字符串

        Returns:
            解析后的依赖关系列表，每个元素包含 from_id, type, lag

        Raises:
            DataParsingError: 当依赖关系格式无效时
        """
        dependencies: List[Dict[str, Any]] = []

        # 处理空值或空字符串
        if predecessors_str is None or pd.isna(predecessors_str):
            return dependencies

        # 转换为字符串并检查是否为空
        predecessors_str = str(predecessors_str).strip()
        if not predecessors_str:
            return dependencies

        # 处理引号包围的情况
        if predecessors_str.startswith('"') and predecessors_str.endswith('"'):
            predecessors_str = predecessors_str[1:-1]

        # 按逗号分割多个依赖关系
        parts = [part.strip() for part in predecessors_str.split(",") if part.strip()]

        for part in parts:
            try:
                # 清理中文单位
                cleaned_part = (
                    part.replace("个工作日", "")
                    .replace("天", "")
                    .replace("日", "")
                    .strip()
                )

                # 使用正则表达式解析每个依赖关系
                match = re.match(self.DEPENDENCY_PATTERN, cleaned_part)

                if not match:
                    # 尝试简单的数字格式
                    if cleaned_part.isdigit():
                        pred_id = int(cleaned_part)
                        dependencies.append(
                            {"from_id": pred_id, "type": "FS", "lag": 0}
                        )
                        continue

                    raise DataParsingError(
                        f"无效的前置依赖关系格式: '{part}'",
                        "期望格式: 数字[FS|SS|FF|SF][+/-数字], 如: '75FS+3', '81', '88SS-2'",
                    )

                # 提取匹配的组
                pred_id_str = match.group(1)
                dep_type = match.group(2) or "FS"  # 默认FS类型
                lag_str = match.group(3) or "0"  # 默认0延时

                # 转换数据类型
                try:
                    pred_id = int(
                        float(pred_id_str)
                    )  # 先转float再转int，处理1.0这种情况
                    lag = int(lag_str) if lag_str != "0" else 0
                except ValueError as e:
                    raise DataParsingError(
                        f"依赖关系中的数值转换失败: '{part}'", f"错误: {str(e)}"
                    )

                # 验证前置工序ID
                if pred_id <= 0:
                    raise DataParsingError(
                        f"前置工序ID必须为正整数: '{part}'", f"ID: {pred_id}"
                    )

                # 验证依赖类型
                valid_types = ["FS", "SS", "FF", "SF"]
                if dep_type not in valid_types:
                    raise DataParsingError(
                        f"无效的依赖类型: '{dep_type}' in '{part}'",
                        f"有效类型: {', '.join(valid_types)}",
                    )

                # 添加到依赖关系列表
                dependencies.append({"from_id": pred_id, "type": dep_type, "lag": lag})

            except DataParsingError:
                # 重新抛出已知的解析错误
                raise
            except Exception as e:
                # 包装未知错误
                raise DataParsingError(
                    f"解析前置依赖关系时发生未知错误: '{part}'", f"错误: {str(e)}"
                )

        return dependencies

    def _load_resource_capacities(self) -> tuple:
        """
        加载资源容量配置

        Returns:
            (worker_capacities, equipment_capacities) 元组
        """
        import json
        import os

        # 默认容量配置
        default_worker_capacities = {
            "泥工": 30,
            "钢筋工": 25,
            "架子工": 15,
            "模板工": 20,
            "通用工人": 40,
            "电工": 10,
            "焊工": 8,
            "起重工": 6,
        }

        default_equipment_capacities = {"挖掘机": 3, "起重机": 2, "混凝土泵车": 2}

        # 如果没有指定配置文件，使用默认配置
        if not self.config_path:
            # 尝试查找默认配置文件
            possible_configs = [
                "project_config.json",
                "config.json",
                ".kiro/project_config.json",
            ]
            for config_file in possible_configs:
                if os.path.exists(config_file):
                    self.config_path = config_file
                    break

        # 如果找到配置文件，尝试加载
        if self.config_path and os.path.exists(self.config_path):
            try:
                with open(self.config_path, "r", encoding="utf-8") as f:
                    config = json.load(f)

                # 提取资源容量配置
                resource_config = config.get("resource_capacities", {})
                worker_capacities = resource_config.get(
                    "worker_types", default_worker_capacities
                )
                equipment_capacities = resource_config.get(
                    "equipment_types", default_equipment_capacities
                )

                print(f"✅ 已加载配置文件: {self.config_path}")
                return worker_capacities, equipment_capacities

            except Exception as e:
                print(f"⚠️ 配置文件加载失败，使用默认配置: {e}")

        return default_worker_capacities, default_equipment_capacities

    def _extract_resources(self, df: pd.DataFrame) -> tuple:
        """
        提取资源信息，支持多种工种和设备

        Args:
            df: DataFrame

        Returns:
            (resources, resource_demands) 元组
            - resources: 资源容量字典 {resource_name: capacity}
            - resource_demands: 资源需求字典 {activity_id: {resource_name: demand}}
        """
        # 1. 处理新格式：工种 + 工人数量列
        if "工种" in df.columns and "工人数量" in df.columns:
            return self._extract_worker_type_resources(df)

        # 2. 处理旧格式：Resource_开头的列
        return self._extract_legacy_resources(df)

    def _extract_worker_type_resources(self, df: pd.DataFrame) -> tuple:
        """
        提取工种类型资源信息（新格式）

        Args:
            df: DataFrame，包含工种和工人数量列

        Returns:
            (resources, resource_demands) 元组
        """
        resources: Dict[str, int] = {}
        resource_demands: Dict[str, Dict[str, int]] = {}

        # 统计所有出现的工种类型
        worker_types = set()
        equipment_types = set()

        id_col = self._find_column(df, ["ID", "id", "编号", "工序编号"])

        for _, row in df.iterrows():
            # 处理工种
            worker_type = row.get("工种", "")
            if pd.notna(worker_type) and worker_type.strip():
                worker_types.add(worker_type.strip())

            # 处理设备（挖掘机等）
            if "挖掘机数量" in df.columns:
                equipment_types.add("挖掘机")

        # 加载资源容量配置
        worker_capacities, equipment_capacities = self._load_resource_capacities()

        # 添加工种资源
        for worker_type in worker_types:
            capacity = worker_capacities.get(worker_type, 15)  # 默认15人
            resources[worker_type] = capacity

        # 添加设备资源
        for equipment_type in equipment_types:
            capacity = equipment_capacities.get(equipment_type, 2)  # 默认2台
            resources[equipment_type] = capacity

        # 构建resource_demands
        for _, row in df.iterrows():
            activity_id = str(int(row[id_col]))
            demands = {}

            # 处理工种需求
            worker_type = row.get("工种", "")
            worker_count = row.get("工人数量", 0)

            if pd.notna(worker_type) and worker_type.strip() and worker_count > 0:
                try:
                    worker_count = int(float(worker_count))
                    if worker_count > 0:
                        demands[worker_type.strip()] = worker_count
                except (ValueError, TypeError):
                    pass

            # 处理设备需求
            if "挖掘机数量" in df.columns:
                excavator_count = row.get("挖掘机数量", 0)
                try:
                    excavator_count = int(float(excavator_count))
                    if excavator_count > 0:
                        demands["挖掘机"] = excavator_count
                except (ValueError, TypeError):
                    pass

            if demands:  # 只有当有资源需求时才添加
                resource_demands[activity_id] = demands

        return resources, resource_demands

    def _extract_legacy_resources(self, df: pd.DataFrame) -> tuple:
        """
        提取传统格式资源信息（Resource_开头的列）

        Args:
            df: DataFrame

        Returns:
            (resources, resource_demands) 元组
        """
        resources: Dict[str, int] = {}
        resource_demands: Dict[str, Dict[str, int]] = {}

        # 查找资源相关的列（以Resource_开头的列）
        resource_columns = [col for col in df.columns if col.startswith("Resource_")]

        if not resource_columns:
            # 如果没有资源列，返回空字典
            return resources, resource_demands

        # 提取资源类型名称
        resource_types = []
        for col in resource_columns:
            # 从Resource_Worker提取Worker
            resource_name = col.replace("Resource_", "").lower()
            resource_types.append(resource_name)

        # 设置默认资源容量
        default_capacities = {"worker": 20, "excavator": 5, "crane": 3, "truck": 10}

        for resource_type in resource_types:
            # 使用默认容量，如果没有默认值则使用10
            resources[resource_type] = default_capacities.get(resource_type, 10)

        # 构建resource_demands
        id_col = self._find_column(df, ["ID", "id", "编号", "工序编号"])

        for _, row in df.iterrows():
            activity_id = str(int(row[id_col]))
            demands = {}

            for col, resource_type in zip(resource_columns, resource_types):
                demand_value = row.get(col, 0)

                # 处理空值和非数值
                if pd.isna(demand_value):
                    demand_value = 0
                else:
                    try:
                        demand_value = int(float(demand_value))
                        if demand_value < 0:
                            demand_value = 0
                    except (ValueError, TypeError):
                        demand_value = 0

                if demand_value > 0:
                    demands[resource_type] = demand_value

            if demands:  # 只有当有资源需求时才添加
                resource_demands[activity_id] = demands

        return resources, resource_demands

    def _validate_cross_references(
        self,
        activities: List[Dict],
        precedences: List[Dict],
        resources: Dict,
        resource_demands: Dict,
    ) -> None:
        """
        验证交叉引用完整性

        Args:
            activities: 工序列表
            precedences: 前后置关系列表
            resources: 资源容量字典
            resource_demands: 资源需求字典

        Raises:
            DataParsingError: 当交叉引用验证失败时
        """
        errors = []

        # 1. 构建有效的活动ID集合
        valid_activity_ids = {activity["id"] for activity in activities}

        # 2. 验证precedences中引用的ID是否都存在
        for precedence in precedences:
            from_id = precedence["from"]
            to_id = precedence["to"]

            if from_id not in valid_activity_ids:
                errors.append(f"前置关系中引用了不存在的工序ID: {from_id}")

            if to_id not in valid_activity_ids:
                errors.append(f"前置关系中引用了不存在的工序ID: {to_id}")

        # 3. 验证resource_demands中引用的活动ID是否都存在
        for activity_id_str in resource_demands.keys():
            try:
                activity_id = int(activity_id_str)
                if activity_id not in valid_activity_ids:
                    errors.append(f"资源需求中引用了不存在的工序ID: {activity_id}")
            except ValueError:
                errors.append(f"资源需求中包含无效的工序ID格式: {activity_id_str}")

        # 4. 验证resource_demands中引用的资源类型是否都存在
        valid_resource_types = set(resources.keys())
        for activity_id, demands in resource_demands.items():
            for resource_type in demands.keys():
                if resource_type not in valid_resource_types:
                    errors.append(
                        f"工序 {activity_id} 引用了不存在的资源类型: {resource_type}"
                    )

        # 5. 验证资源需求量不超过资源容量
        for activity_id, demands in resource_demands.items():
            for resource_type, demand in demands.items():
                if resource_type in resources:
                    capacity = resources[resource_type]
                    if demand > capacity:
                        errors.append(
                            f"工序 {activity_id} 对资源 {resource_type} 的需求量 ({demand}) "
                            f"超过了可用容量 ({capacity})"
                        )

        # 如果有验证错误，抛出异常
        if errors:
            raise DataParsingError("交叉引用完整性验证失败", "; ".join(errors))


def parse_project_csv(csv_path: str) -> Dict[str, Any]:
    """
    便捷函数：解析CSV项目文件

    Args:
        csv_path: CSV文件路径

    Returns:
        标准化的项目数据字典
    """
    parser = ProjectDataParser()
    return parser.parse_project_csv(csv_path)
