from __future__ import annotations
from datetime import datetime
from pathlib import Path
from typing import TYPE_CHECKING, Any, Literal, Optional, TypeAlias, Union
from pydantic import BaseModel, Field, PrivateAttr, computed_field, model_validator
from ..dataclass.terminologies import Units
from .pipeline import PipeLine

UIAttributeSchema: TypeAlias = Union[StringAttributeSchema, IntegerAttributeSchema, FloatAttributeSchema, BooleanAttributeSchema, ArrayAttributeSchema, ObjectAttributeSchema, FileAttributeSchema, TableAttributeSchema]

class MapData(BaseModel):
    """Pipeline map data used to dump to a json."""
    from_module: list[Any]
    port_out: list[Any]
    to_module: list[Any]
    port_in: list[Any]

class DynamicPortsInfo(BaseModel):
    """Dynamic port type used to dump to a json."""
    port_types: int | list[int]
    port_keys: list[str]
    port_docs: list[Any]

class ModuleData(BaseModel):
    """Pipeline module data used to dump to a json."""
    mname: str
    mclass: str
    ports_in_data: dict[str, Any] | None
    cal_params: dict[str, Any]
    dynamic_ports_in: DynamicPortsInfo | None
    dynamic_ports_out: DynamicPortsInfo | None

class WidgetAttribute(BaseModel):
    """Widget attribute."""
    select_enable_search: bool
    table_enable_filter: bool
    table_enable_sort: bool
    table_enable_group: bool
    table_enable_set_all_rows: bool
    slider_range: bool

class BaseAttributeSchema(BaseModel):
    """Base attribute schema for all kinds of attributes."""
    title: str | None
    description: str | None
    default: Any
    required: bool
    selections: list[Any] | None
    selections_name: list[str] | None
    placeholder: str | None
    help_text: str | None
    units: Units | None
    depends_on: list[str] | str | None
    visible: bool
    readonly: bool
    widget_attributes: WidgetAttribute

class StringAttributeSchema(BaseAttributeSchema):
    """String attribute schema."""
    vtype: Literal[Any]
    min_length: int | None
    max_length: int | None
    pattern: str | None
    @property
    def widget(self) -> str:
        """
        Get the widget type of the attribute.
        If the `widget type` is not set, it will be automatically determined based on the `selections` and `max_length`.
        If the `selections` is not set, it will be automatically determined based on the `max_length`.
        """
        ...
    @widget.setter
    def widget(self, value: Any):
        """Set the widget type of the attribute."""
        ...

class IntegerAttributeSchema(BaseAttributeSchema):
    """Integer attribute schema."""
    vtype: Literal[Any]
    minimum: int | None
    maximum: int | None
    exclude_minimum: bool
    exclude_maximum: bool
    multiple_of: int | None
    @property
    def widget(self) -> str:
        """
        Get the widget type of the attribute.
        If the `widget type` is not set, it will be automatically determined based on the `selections`.
        """
        ...
    @widget.setter
    def widget(self, value: Any):
        """Set the widget type of the attribute."""
        ...

class FloatAttributeSchema(BaseAttributeSchema):
    """Float attribute schema."""
    vtype: Literal[Any]
    minimum: float | None
    maximum: float | None
    exclude_minimum: bool
    exclude_maximum: bool
    multiple_of: float | None
    precision: int | None
    @property
    def widget(self) -> str:
        """
        Get the widget type of the attribute.
        If the `widget type` is not set, it will be automatically determined based on the `selections`.
        """
        ...
    @widget.setter
    def widget(self, value: Any):
        """Set the widget type of the attribute."""
        ...

class BooleanAttributeSchema(BaseAttributeSchema):
    """Boolean attribute schema."""
    vtype: Literal[Any]
    @property
    def widget(self) -> str:
        """
        Get the widget type of the attribute.
        If the `widget type` is not set, it will be automatically determined.
        """
        ...
    @widget.setter
    def widget(self, value: Any):
        """Set the widget type of the attribute."""
        ...

class ArrayAttributeSchema(BaseAttributeSchema):
    """
    Array attribute schema.
    
    Notes
    -----
    If the widget type of the ArrayAttributeSchema is `select` or `checkbox`, it will be a multi-select widget.
    """
    vtype: Literal[Any]
    items: StringAttributeSchema | IntegerAttributeSchema | FloatAttributeSchema | BooleanAttributeSchema | ArrayAttributeSchema | ObjectAttributeSchema | list[Any]
    min_items: int
    max_items: int | None
    unique_items: bool
    render_as_table: bool
    @property
    def widget(self) -> Any:
        """Get the widget type for array attributes."""
        ...
    @widget.setter
    def widget(self, value: Any):
        """Set the widget type for array attributes."""
        ...

class ObjectAttributeSchema(BaseAttributeSchema):
    """Object attribute schema."""
    vtype: Literal[Any]
    properties: dict[str, Any]
    additional_properties: bool
    additional_properties_key_schema: StringAttributeSchema | IntegerAttributeSchema | None
    additional_properties_value_schema: StringAttributeSchema | IntegerAttributeSchema | FloatAttributeSchema | BooleanAttributeSchema | ArrayAttributeSchema | ObjectAttributeSchema | None

class FileAttributeSchema(BaseAttributeSchema):
    """File attribute schema."""
    vtype: Literal[Any]
    extension: str | list[str] | None
    upload: bool
    min_files: int
    max_files: int | None
    directory: bool
    @property
    def widget(self) -> Literal[Any]:
        ...

class TableAttributeSchema(BaseAttributeSchema):
    """Table attribute schema."""
    vtype: Literal[Any]
    columns: list[Any]
    columns_name: list[str]
    min_rows: int
    max_rows: int | None
    @property
    def widget(self) -> Literal[Any]:
        ...
    @model_validator
    def validate_columns_length(self) -> Any:
        """Validate that columns_name has the same length as columns."""
        ...

class RangeModel(BaseModel):
    """A data structure model to define the type and range of the values of the calculation parameters."""
    vtype: Literal[Any, Any, Any, Any, Any, Any, Any, Any, Any] | tuple[Any, Any]
    title: str
    default: int | float | str | bool | list | tuple | dict | None
    units: Units | str
    minmax: tuple[Any, Any] | None
    include_min: bool
    include_max: bool
    list_type: Literal[Any, Any, Any, Any, Any, Any] | tuple[Any, Any] | None
    tuple_type: Literal[Any, Any, Any, Any, Any, Any] | tuple[Any, Any] | None
    choices: tuple[Any, Any] | dict[str, Any] | None
    choices_label: tuple[str, Any] | dict[str, Any] | None
    list_len: int | None
    tuple_len: int | None
    dict_key_type: Literal[Any, Any, Any, Any] | tuple[Any, Any] | None
    dict_value_type: Literal[Any, Any, Any, Any, Any] | tuple[Any, Any] | None
    dict_key_choices: tuple[Any, Any] | None
    dict_key_choices_label: tuple[str, Any] | None
    dict_value_choices: tuple[Any, Any] | dict[str, Any] | None
    dict_value_choices_label: tuple[str, Any] | None
    depends_on: list[str] | str | None
    required: bool
    visible: bool
    read_only: bool
    widget: Literal[Any, Any, Any, Any, Any, Any]

class NestedRangeModel(BaseModel):
    """
    A model for handling nested parameters like dictionaries and dictionaries with list values.
    
    When vtype is 'list_dict', it represents list[dict].
    When vtype is 'dict', it represents regular dict format.
    """
    vtype: Literal[Any, Any]
    title: str
    fields: dict[str, Any]
    min_max_items: tuple[int, Any] | None
    default: dict | list[dict] | None
    depends_on: list[str] | str | None
    required: bool

class PipelineAttribute(BaseModel):
    """Pipeline attribute data used for configuring the pipeline as an application."""
    attr_name: str
    attr_title: str | None
    module_name: str
    param_name: str
    default_value: Any
    range_model: RangeModel | NestedRangeModel | None
    attr_schema: UIAttributeSchema | None
    is_dict_attribute: bool
    dict_key: str | None
    is_multi_binding: bool
    bindings: list[Any] | None

class ConditionCheck(BaseModel):
    """Defines a single condition check on a module attribute/parameter."""
    module_name: str
    attribute_name: str
    operator: Literal[Any, Any, Any, Any, Any, Any, Any, Any]
    value: Any
    def evaluate(self, module_value: Any) -> bool:
        """
        Evaluate this condition check against a module value.
        
        Args:
            module_value: The actual value from the module to compare
        
        Returns:
            Boolean result of the condition check
        """
        ...

class StepTransition(BaseModel):
    """
    Defines a transition from one step to another in the pipeline execution flow.
    
    This allows for complex execution patterns like loops, conditionals, and branches.
    Supports both declarative condition checking and local function evaluation.
    """
    target_step: str
    max_iterations: int | None
    condition_checks: list[ConditionCheck] | None
    condition_logic: Literal[Any, Any]
    condition_function_name: str | None
    target_modules: list[str] | None
    def evaluate_condition(self, pipeline: Any) -> bool:
        """
        Evaluate the transition condition using either declarative checks or local function.
        
        Args:
            pipeline: Pipeline instance to get target modules and local functions path
        
        Returns:
            Boolean result of condition evaluation
        
        Raises:
            ValueError: If condition evaluation fails
        """
        ...

class Config:
    arbitrary_types_allowed: Any

class FlowControl(BaseModel):
    """
    Defines how the execution flow progresses after a step completes.
    
    This allows for defining linear sequences, loops, and conditional branching between steps.
    """
    next_steps: list[StepTransition]
    default_next: str | None

class PipelineStep(BaseModel):
    """
    A step in the pipeline execution process.
    
    A step defines a point or an interval in the execution flow where the pipeline will start and stop, and return results
    from the end modules in that interval.
    
    Parameters
    ----------
    return_results:
        If True, the results of the current step end modules of all output ports will be returned.
        If False, no results will be returned.
        If a dictionary, the keys are the module names and the values are the port names to return.
        Note - current step end moudles are not the `end_modules` defined in the pipeline, they are the parents of the end modules.
    
    reset_end_modules_params:
        If True, the calculation parameters of the end modules will be reset.
        If False, the calculation parameters of the end modules will not be reset.
        If a dictionary, the keys are the module names and the values are the boolean values to reset the calculation parameters.
    """
    step_name: str
    start_modules: list[str] | None
    end_modules: list[str] | None
    return_results: bool | dict[str, Any]
    reset_end_modules_params: bool | dict[str, bool]
    flow_control: FlowControl | None
    step_description: str | None

class StepsManager(BaseModel):
    """A collection of execution steps for a pipeline."""
    execution_mode: Literal[Any, Any]
    initial_step: str | None
    steps: list[PipelineStep]
    def get_step(self, step_name: str) -> Any:
        """
        Get a pipeline step by its name.
        
        Args:
            step_name: The name of the step to retrieve
        
        Returns:
            The pipeline step if found, None otherwise
        """
        ...
    def get_previous_step(self, current_step_name: str) -> Any:
        """
        Get the previous step in the sequence.
        
        In linear mode, this is the step that appears before the current step in the list.
        In flow_control mode, this is the step that has a transition to the current step.
        For steps without flow_control, linear logic is used regardless of execution_mode.
        
        Args:
            current_step_name: The name of the current step
        
        Returns:
            The previous pipeline step if found, None if this is the first step or initial_step
        """
        ...
    def get_next_step(self, current_step_name: str, execution_context: Optional[Any] = None, pipeline: Optional[Any] = None) -> Optional[str]:
        """
        Determine the next step to execute based on the current step and execution context.
        
        This method applies the flow control logic to determine which step should be executed next.
        For linear execution mode, it simply returns the next step in the list.
        For flow_control mode, it evaluates transitions and conditions to determine the next step.
        
        Args:
            current_step_name: The name of the current step
            execution_context: The execution context containing transition counts and results
            pipeline: The pipeline instance (required for local function evaluation)
        
        Returns:
            The name of the next step to execute, or None if there is no next step
        
        Raises:
            ValueError: If flow_control with next_steps is defined but execution_context is not provided
        """
        ...

class PipelineData(BaseModel):
    """Pipeline data used to dump a pipeline to json including the map data and modules data."""
    map: MapData
    modules: dict[str, ModuleData]
    attributes: list[PipelineAttribute]
    app_name: str
    app_title: str | None
    app_version: str | None
    steps_manager: StepsManager | None
    local_functions_path: str | None
    json_to_db: dict[str, Any]
    workspace: str | None
    hide_output_files: dict[str, str] | None
    def __getitem__(self, key: str) -> ModuleData:
        """Get the module data by the module name."""
        ...
    def __contains__(self, key: str) -> bool:
        """Check if the module name is in the pipeline."""
        ...

class AttributeInfo(PipelineAttribute):
    """
    Pydantic model for storing pipeline attribute information that can be easily shared via FastAPI.
    Extends PipelineAttribute to include current runtime value.
    """
    current_value: Any

class AttributesInfoResponse(BaseModel):
    """Response model for returning multiple attributes information objects."""
    attributes: dict[str, AttributeInfo]
    app_name: str | None
    app_title: str | None
    app_version: str | None

class PipelineResult(BaseModel):
    """
    Standardized result model for PipelineRunner.run() method.
    
    This model represents the output from running a pipeline, containing
    results from all end modules with their output port data.
    """
    module_results: dict[str, Any]
    execution_time: datetime
    current_step: str | None
    executed_steps: list[str]
    last_step: bool
    def get_result(self, module_name: str, port_name: str) -> Any:
        """Get the result from the module_results dictionary."""
        ...
    def encrypt_file_paths(self) -> Any:
        """
        Create a copy of this PipelineResult with file paths encrypted for frontend security.
        
        This method recursively searches through module_results and encrypts any values
        that appear to be file paths revealing server directory structure. Single filenames
        are left unencrypted as they don't pose security risks.
        
        Returns
        -------
        PipelineResult
            A new PipelineResult instance with encrypted file paths
        """
        ...

class PipelineRunDataItem(BaseModel):
    """
    Pydantic model for individual data items in pipeline run data.
    
    This model represents a single data item that was marked for database storage
    during pipeline execution. It contains all necessary metadata for proper
    deserialization and source tracking.
    
    Attributes
    ----------
    name : str
        The name of the data item (port_name, attr_name, or param_name)
    data_type : Literal["port", "pipeline_attr", "module_attr"]
        The type of data source this item came from
    module_name : str | None
        The name of the module that owns this data (None for pipeline_attr)
    value_type : str
        The type of the data value (e.g., "TableData", "str", "int", "dict")
    data : Any
        The actual serialized data content
    timestamp : str
        ISO format timestamp when this data was captured
    
    Examples
    --------
    >>> # Output port data item
    >>> port_item = PipelineRunDataItem(
    ...     name="OutputData",
    ...     data_type="port",
    ...     module_name="FilterModule",
    ...     value_type="TableData",
    ...     data={...},  # Serialized TableData
    ...     timestamp="2024-01-01T10:00:00Z"
    ... )
    
    >>> # Pipeline attribute data item
    >>> pipeline_item = PipelineRunDataItem(
    ...     name="workspace",
    ...     data_type="pipeline_attr",
    ...     module_name=None,
    ...     value_type="str",
    ...     data="/path/to/workspace",
    ...     timestamp="2024-01-01T10:00:00Z"
    ... )
    
    >>> # Module parameter data item
    >>> module_item = PipelineRunDataItem(
    ...     name="threshold",
    ...     data_type="module_attr",
    ...     module_name="FilterModule",
    ...     value_type="float",
    ...     data=0.8,
    ...     timestamp="2024-01-01T10:00:00Z"
    ... )
    """
    name: str
    data_type: Literal[Any, Any, Any]
    module_name: str | None
    value_type: str
    data: Any
    timestamp: str
    @model_validator
    def validate_module_name_consistency(self) -> Any:
        """Validate that module_name is consistent with data_type."""
        ...
    @property
    def key(self) -> str:
        """
        Generate the key used in ResultsDict based on data_type and names.
        
        Returns
        -------
        str
            The key in the format used by ResultsDict:
            - Output ports: "module_name@port_name"
            - Pipeline attributes: "pipeline@attr_name"
            - Module attributes: "module_name#attr_name"
        """
        ...
    @property
    def source_info(self) -> str:
        """
        Generate source information string for debugging and traceability.
        
        Returns
        -------
        str
            Source information in the format:
            - Output ports: "output_port:ModuleName:ValueType"
            - Pipeline attributes: "pipeline_attribute:ValueType"
            - Module attributes: "module_attribute:ModuleName:ValueType"
        """
        ...

class ExecutionContext(BaseModel):
    """
    Structured context for pipeline step execution.
    
    This model provides a type-safe way to track execution state, including
    transition counts and step results.
    """
    transition_counts: dict[str, int]
    results: dict[str, PipelineResult]
    def increment_transition_count(self, from_step: str, to_step: str) -> int:
        """
        Increment and return the count for a specific transition.
        
        Args:
            from_step: The name of the step we're transitioning from
            to_step: The name of the step we're transitioning to
        
        Returns:
            The new count after incrementing
        """
        ...
    def get_transition_count(self, from_step: str, to_step: str) -> int:
        """
        Get the current count for a specific transition.
        
        Args:
            from_step: The name of the step we're transitioning from
            to_step: The name of the step we're transitioning to
        
        Returns:
            The current count for this transition
        """
        ...
    def encrypt_file_paths(self) -> Any:
        """
        Create a copy of this ExecutionContext with file paths encrypted in all PipelineResults.
        
        This method encrypts file paths in all stored PipelineResult objects for frontend security.
        
        Returns
        -------
        ExecutionContext
            A new ExecutionContext instance with encrypted file paths in all results
        """
        ...
    def update_step_result(self, step_name: str, result: PipelineResult) -> Any:
        """
        Update the results for a specific step.
        
        Args:
            step_name: The name of the step
            result: The PipelineResult object to store
        """
        ...
    def get_step_result(self, step_name: str) -> Any:
        """
        Get the results for a specific step.
        
        Args:
            step_name: The name of the step
        
        Returns:
            The PipelineResult object if available, None otherwise
        """
        ...

class ResultsDict(BaseModel):
    """
    Pydantic model for storing results from pipeline json_to_db functionality.
    
    This provides better type safety and validation compared to a simple dictionary.
    The model stores data from various sources that were marked for database storage.
    
    Key Format
    ----------
    The keys in the data dictionary follow a specific naming convention to identify the data source:
    
    - **Output ports**: `"module_name@port_name"`
      Example: `"FilterModule@OutputData"`, `"ReaderModule@OutputTable"`
    
    - **Pipeline attributes**: `"pipeline@attr_name"`
      Example: `"pipeline@workspace"`, `"pipeline@app_name"`, `"pipeline@app_title"`
    
    - **Module attributes**: `"module_name#attr_name"`
      Example: `"FilterModule#threshold"`, `"ReaderModule#auto_run"`, `"CalculatorModule#precision"`
    
    Value Types
    -----------
    The values can be any of the following types depending on the data source:
    
    **From Output Ports:**
    - **Complex objects**: `TableData`, `TableCollection`, `SingleResult`, `ComplexResult`, `DocData` (auto-deserialized)
    - **Attributes dictionaries**: `dict[str, Any]` from module `OutputAttributes` ports
    - **Data structures**: `pandas.DataFrame`, `numpy.ndarray` (serialized/deserialized automatically)
    - **Basic types**: `str`, `int`, `float`, `bool`, `list`, `dict`, `Path`
    - **None**: For modules that haven't been executed or failed execution
    
    **From Pipeline Attributes:**
    - **Path values**: `str` or `Path` objects (workspace, local_functions_path)
    - **Application metadata**: `str` values (app_name, app_title, app_version)
    - **Configuration**: `dict`, `bool`, or other configuration values
    
    **From Module Attributes:**
    - **Parameters**: Any module calculation parameter value (`str`, `int`, `float`, `bool`, `list`, `dict`, etc.)
    - **Settings**: Module configuration values like `auto_run`, `threshold`, `precision`
    - **Complex parameters**: Nested dictionaries, lists, or custom objects used by modules
    
    Source Info Field
    -----------------
    The `source_info` field provides metadata about where each data item originated from:
    
    **Purpose:**
    - **Debugging**: Helps identify which module or pipeline component generated specific data
    - **Traceability**: Tracks the origin of data for audit and validation purposes
    - **Data provenance**: Maintains information about data lineage in complex pipelines
    
    **Source Info Values:**
    - **`"output_port:ModuleName"`**: Data from a module's output port (e.g., `"output_port:FilterModule"`)
    - **`"pipeline_attribute"`**: Data from a direct pipeline attribute (e.g., workspace, app_name)
    - **`"module_attribute:ModuleName"`**: Data from a module's parameter/attribute (e.g., `"module_attribute:FilterModule"`)
    - **`"direct_assignment"`**: Data added directly via dictionary assignment
    - **`"update_operation"`**: Data added via the update() method
    - **`"unknown"`**: Data with unclear origin (backward compatibility)
    
    Examples
    --------
    >>> results = ResultsDict(
    ...     data={
    ...         # Output port data (complex objects)
    ...         "MyModule@OutputData": table_data_object,           # TableData object
    ...         "FilterModule@Results": single_result_object,       # SingleResult object
    ...         "ReaderModule@OutputAttributes": {"param": "value"}, # Module attributes dict
    ...
    ...         # Pipeline attributes
    ...         "pipeline@workspace": "/path/to/workspace",         # Pipeline workspace path
    ...         "pipeline@app_name": "MyApplication",              # Application name
    ...
    ...         # Module parameters/attributes
    ...         "FilterModule#threshold": 0.5,                     # Module parameter (float)
    ...         "ReaderModule#auto_run": True,                     # Module setting (bool)
    ...         "CalculatorModule#config": {"precision": 2}        # Complex parameter (dict)
    ...     },
    ...     source_info={
    ...         "MyModule@OutputData": "output_port:MyModule",
    ...         "FilterModule@Results": "output_port:FilterModule",
    ...         "ReaderModule@OutputAttributes": "output_port:ReaderModule",
    ...         "pipeline@workspace": "pipeline_attribute",
    ...         "pipeline@app_name": "pipeline_attribute",
    ...         "FilterModule#threshold": "module_attribute:FilterModule",
    ...         "ReaderModule#auto_run": "module_attribute:ReaderModule",
    ...         "CalculatorModule#config": "module_attribute:CalculatorModule"
    ...     }
    ... )
    
    >>> # Access source information for debugging
    >>> print(f"Data from: {results.source_info['MyModule@OutputData']}")
    # Output: "Data from: output_port:MyModule"
    """
    data: dict[str, Any]
    source_info: dict[str, str]
    @model_validator
    def validate_input(cls, values):
        """Validate input data and handle both dict and ResultsDict inputs."""
        ...
    def __getitem__(self, key: str) -> Any:
        """Allow dictionary-style access to data."""
        ...
    def __setitem__(self, key: str, value: Any) -> Any:
        """Allow dictionary-style assignment to data."""
        ...
    def __contains__(self, key: str) -> bool:
        """Allow 'in' operator for checking key existence."""
        ...
    def __len__(self) -> int:
        """Return number of items in data."""
        ...
    def __iter__(self):
        """Allow iteration over keys."""
        ...
    def items(self):
        """Return items like a dictionary."""
        ...
    def keys(self):
        """Return keys like a dictionary."""
        ...
    def values(self):
        """Return values like a dictionary."""
        ...
    def get(self, key: str, default: Any = None) -> Any:
        """Get value with optional default."""
        ...
    def update(self, other: Any) -> Any:
        """Update data from another dictionary or ResultsDict."""
        ...
    def from_json_to_db(cls, pipeline_run_data: list[PipelineRunDataItem]) -> Any:
        """
        Create ResultsDict from enhanced json_to_db data with metadata.
        
        Args:
            pipeline_run_data: List of PipelineRunDataItem instances from a single pipeline run.
                Each item contains validated metadata and data for proper deserialization.
        
        Returns:
            ResultsDict instance with proper source information and deserialized data
        
        Examples:
            >>> pipeline_data = [
            ...     PipelineRunDataItem(
            ...         name="OutputData",
            ...         data_type="port",
            ...         module_name="MyModule",
            ...         value_type="TableData",
            ...         data={...},
            ...         timestamp="2024-01-01T10:00:00Z"
            ...     ),
            ...     PipelineRunDataItem(
            ...         name="workspace",
            ...         data_type="pipeline_attr",
            ...         module_name=None,
            ...         value_type="str",
            ...         data="/path/to/workspace",
            ...         timestamp="2024-01-01T10:00:00Z"
            ...     )
            ... ]
            >>> results = ResultsDict.from_json_to_db(pipeline_data)
            >>> results["MyModule@OutputData"]  # TableData object
            >>> results["pipeline@workspace"]   # str value
        """
        ...
    def from_multiple_runs(cls, multiple_runs_data: list[Any], run_selector: Any = "latest") -> Any:
        """
        Create ResultsDict from multiple pipeline runs data.
        
        Args:
            multiple_runs_data: List of pipeline runs, each containing a list of PipelineRunDataItem instances
            run_selector: Which run to select:
                - "latest": Use the most recent run (default)
                - "first": Use the first run
                - int: Use the run at the specified index
        
        Returns:
            ResultsDict instance from the selected pipeline run
        
        Examples:
            >>> multiple_runs = [
            ...     [  # First run
            ...         PipelineRunDataItem(name="OutputData", data_type="port", ...)
            ...     ],
            ...     [  # Second run (latest)
            ...         PipelineRunDataItem(name="OutputData", data_type="port", ...)
            ...     ]
            ... ]
            >>> results = ResultsDict.from_multiple_runs(multiple_runs, "latest")
            >>> # Uses data from the second run
        """
        ...
    def decrypt_pipeline_data(cls, pipeline_run_data: list[PipelineRunDataItem]) -> list[PipelineRunDataItem]:
        """
        Decrypt file paths in pipeline run data.
        
        This method can be used to decrypt file paths when processing data
        from the frontend or when deserializing stored data.
        
        Parameters
        ----------
        pipeline_run_data : list[PipelineRunDataItem]
            List of pipeline run data items that may contain encrypted file paths
        
        Returns
        -------
        list[PipelineRunDataItem]
            List with decrypted file paths
        """
        ...

class MultiResultsDict(BaseModel):
    """
    Pydantic model for storing multiple ResultsDict instances with unified storage for all scenarios.
    
    This model handles all three common scenarios with a single, simple storage structure:
    
    1. **Single pipeline, multiple runs**: Same pipeline executed multiple times
    2. **Multiple pipelines, single run each**: Different pipelines, one execution each
    3. **Multiple pipelines, multiple runs each**: Different pipelines, each with multiple executions
    
    Unified Storage Structure
    -------------------------
    **Single storage field**: `pipelines` with format `dict[str, dict[str, ResultsDict]]`
    - **Format**: `{"pipeline_name": {"run_id": ResultsDict, ...}}`
    - **Consistent**: All scenarios use the same structure
    - **Flexible**: Handles 1 to N pipelines, each with 1 to M runs
    
    **Examples for all scenarios:**
    ```python
    # Scenario 1: Single pipeline, multiple runs
    {
        "GeotechnicalAnalysis": {
            "run_1": ResultsDict(...),
            "run_2": ResultsDict(...),
            "run_3": ResultsDict(...)
        }
    }
    
    # Scenario 2: Multiple pipelines, single run each
    {
        "GeotechnicalAnalysis": {"run_1": ResultsDict(...)},
        "FoundationDesign": {"run_1": ResultsDict(...)},
        "SeismicAnalysis": {"run_1": ResultsDict(...)}
    }
    
    # Scenario 3: Multiple pipelines, multiple runs each
    {
        "GeotechnicalAnalysis": {
            "run_1": ResultsDict(...),
            "run_2": ResultsDict(...)
        },
        "FoundationDesign": {
            "run_1": ResultsDict(...),
            "run_2": ResultsDict(...),
            "run_3": ResultsDict(...)
        },
        "SeismicAnalysis": {"run_1": ResultsDict(...)}
    }
    ```
    
    Key Benefits
    ------------
    - **Unified approach**: Same structure handles all scenarios
    - **Simple API**: Consistent methods work for all use cases
    - **Scalable**: Easily add more pipelines or runs
    - **Clear semantics**: Pipeline name + run ID clearly identifies any result
    - **Type safety**: Pydantic validation ensures correct structure
    
    Access Patterns
    ---------------
    **By pipeline and run**: `get_result(pipeline_name, run_id)`
    **Latest run for pipeline**: `get_latest_result(pipeline_name)`
    **All runs for pipeline**: `get_pipeline_runs(pipeline_name)`
    **Single result (when only one run)**: `get_single_result(pipeline_name)`
    **All pipelines**: `get_pipeline_names()`
    **Pipeline statistics**: `get_run_count(pipeline_name)`
    
    Examples
    --------
    >>> # Create for any scenario - same API
    >>> multi_results = MultiResultsDict(
    ...     pipelines={
    ...         "GeotechnicalAnalysis": {
    ...             "run_1": ResultsDict(data={"Module1@Output": data1}),
    ...             "run_2": ResultsDict(data={"Module1@Output": data2})
    ...         },
    ...         "FoundationDesign": {
    ...             "run_1": ResultsDict(data={"Module2@Output": foundation_data})
    ...         }
    ...     }
    ... )
    >>>
    >>> # Access works the same for all scenarios
    >>> latest_geo = multi_results.get_latest_result("GeotechnicalAnalysis")
    >>> foundation_result = multi_results.get_single_result("FoundationDesign")  # When only one run
    >>> all_geo_runs = multi_results.get_pipeline_runs("GeotechnicalAnalysis")
    
    Use Cases
    ---------
    - **Parameter sweeps**: Multiple runs of same pipeline with different parameters
    - **Time series**: Same pipeline executed over time periods
    - **Comparative analysis**: Multiple pipelines for different analysis approaches
    - **Batch processing**: Multiple pipelines, each with multiple configurations
    - **A/B testing**: Different pipeline versions with multiple test runs
    """
    pipelines: dict[str, Any]
    def add_result(self, pipeline_name: str, run_id: str, results_dict: Any) -> Any:
        """
        Add a result for a specific pipeline and run.
        
        Works for all scenarios:
        - Single pipeline, multiple runs: Different run_ids for same pipeline_name
        - Multiple pipelines, single run each: Different pipeline_names, typically run_id="run_1"
        - Multiple pipelines, multiple runs each: Different pipeline_names, multiple run_ids each
        
        Args:
            pipeline_name: Name of the pipeline
            run_id: Identifier for this specific run (e.g., "run_1", timestamp, run_number)
            results_dict: ResultsDict or dictionary to add
        """
        ...
    def get_result(self, pipeline_name: str, run_id: str) -> Any:
        """
        Get a specific result by pipeline name and run ID.
        
        Args:
            pipeline_name: Name of the pipeline
            run_id: Identifier for the specific run
        
        Returns:
            ResultsDict for the specified pipeline and run, or None if not found
        """
        ...
    def get_latest_result(self, pipeline_name: str) -> Any:
        """
        Get the latest result for a pipeline (assumes run_ids are sortable).
        
        Args:
            pipeline_name: Name of the pipeline
        
        Returns:
            ResultsDict for the latest run, or None if no runs exist
        """
        ...
    def get_single_result(self, pipeline_name: str) -> Any:
        """
        Get the single result for a pipeline (when there's only one run).
        
        Convenient for scenarios where you know a pipeline has only one run.
        
        Args:
            pipeline_name: Name of the pipeline
        
        Returns:
            ResultsDict for the single run, or None if pipeline doesn't exist
        
        Raises:
            ValueError: If pipeline has multiple runs
        """
        ...
    def get_pipeline_runs(self, pipeline_name: str) -> dict[str, ResultsDict]:
        """
        Get all runs for a specific pipeline.
        
        Args:
            pipeline_name: Name of the pipeline
        
        Returns:
            Dictionary mapping run_ids to ResultsDict instances
        """
        ...
    def get_run_count(self, pipeline_name: str) -> int:
        """
        Get the number of runs for a specific pipeline.
        
        Args:
            pipeline_name: Name of the pipeline
        
        Returns:
            Number of runs for the pipeline
        """
        ...
    def get_pipeline_names(self) -> set[str]:
        """
        Get all pipeline names.
        
        Returns:
            Set of all pipeline names
        """
        ...
    def has_multiple_runs(self, pipeline_name: str) -> bool:
        """
        Check if a pipeline has multiple runs.
        
        Args:
            pipeline_name: Name of the pipeline
        
        Returns:
            True if the pipeline has multiple runs, False otherwise
        """
        ...
    def has_single_run(self, pipeline_name: str) -> bool:
        """
        Check if a pipeline has exactly one run.
        
        Args:
            pipeline_name: Name of the pipeline
        
        Returns:
            True if the pipeline has exactly one run, False otherwise
        """
        ...
    def pipeline_exists(self, pipeline_name: str) -> bool:
        """
        Check if a pipeline exists.
        
        Args:
            pipeline_name: Name of the pipeline
        
        Returns:
            True if the pipeline exists, False otherwise
        """
        ...
    def __getitem__(self, key: str) -> dict[str, ResultsDict]:
        """Get all runs for a pipeline."""
        ...
    def __contains__(self, key: str) -> bool:
        """Check if pipeline exists."""
        ...
    def __len__(self) -> int:
        """Return number of pipelines."""
        ...
    def __iter__(self):
        """Iterate over pipeline names."""
        ...
    def from_pipeline_runs(cls, pipelines_data: dict[str, Any]) -> Any:
        """
        Create MultiResultsDict from pipeline runs data (handles all 3 scenarios).
        
        Universal factory method for all scenarios:
        1. Single pipeline, multiple runs: {"pipeline_name": {run1_data, run2_data, ...}}
        2. Multiple pipelines, single run each: {"pipeline1": {run1_data}, "pipeline2": {run1_data}, ...}
        3. Multiple pipelines, multiple runs each: {"pipeline1": {run1_data, run2_data}, "pipeline2": {run1_data, run2_data, run3_data}, ...}
        
        Args:
            pipelines_data: Dictionary mapping pipeline names to their runs data
        
        Returns:
            MultiResultsDict instance with all pipelines and runs
        """
        ...
    def from_single_runs(cls, pipelines_results: dict[str, Any]) -> Any:
        """
        Create MultiResultsDict from single run data for multiple pipelines.
        
        Convenience method for scenario 2 (multiple pipelines, single run each).
        
        Args:
            pipelines_results: Dictionary mapping pipeline names to their single run data
        
        Returns:
            MultiResultsDict instance with single run for each pipeline
        """
        ...
    def from_selected_runs(cls, pipelines_data: dict[str, Any], run_selector: Any = "latest") -> Any:
        """
        Create MultiResultsDict by selecting specific runs from multiple pipeline runs.
        
        Useful when you have multiple runs but only want one run per pipeline.
        
        Args:
            pipelines_data: Dictionary mapping pipeline names to their multiple runs data
            run_selector: Which run to select ("latest", "first", or int index)
        
        Returns:
            MultiResultsDict instance with selected runs from each pipeline
        """
        ...
