import os
import uuid
import warnings
from typing import Any, Callable, Dict, Iterator, List, Literal, Optional, Union
import numpy
import pandas
from .gdimData import GdimFieldMetadata
from .terminologies import Units

class FieldMetadata:
    def __init__(self, name: str, title: Any = None, unit: Any = None, widget_type: Any = None, data_format: Any = None, field_type: Any = None, description: Any = None):
        ...
    def __repr__(self):
        ...

class TableSeries(pd.Series):
    def __init__(self, *args, field_meta: FieldMetadata, title_to_name: Any = None, **kwargs):
        ...
    @property
    def field_meta(self) -> FieldMetadata:
        ...
    @property
    def unit(self) -> Units:
        ...
    @unit.setter
    def unit(self, value: Units):
        """Allow updating the unit"""
        ...
    @property
    def description(self) -> str:
        ...
    @description.setter
    def description(self, value: str):
        """Allow updating the description"""
        ...
    def rename(self, *args, **kwargs):
        """Override rename to ensure TableSeries is returned with updated metadata"""
        ...
    def __finalize__(self, other, method = None, **kwargs):
        """Propagate metadata from other to self"""
        ...
    def __add__(self, other):
        ...
    def __sub__(self, other):
        ...
    def __mul__(self, other):
        ...
    def __truediv__(self, other):
        ...
    def __floordiv__(self, other):
        ...
    def __mod__(self, other):
        ...
    def __pow__(self, other):
        ...
    def __radd__(self, other):
        ...
    def __rsub__(self, other):
        ...
    def __rmul__(self, other):
        ...
    def __rtruediv__(self, other):
        ...
    def __rfloordiv__(self, other):
        ...
    def __rmod__(self, other):
        ...
    def __rpow__(self, other):
        ...
    def __lt__(self, other):
        ...
    def __le__(self, other):
        ...
    def __gt__(self, other):
        ...
    def __ge__(self, other):
        ...
    def __eq__(self, other):
        ...
    def __ne__(self, other):
        ...
    def __getitem__(self, key):
        """Supports both numeric indexing and field title and column name access."""
        ...
    def map(self, arg, na_action = None):
        """
        Map values of Series according to an input mapping or function.
        
        This method preserves the TableSeries metadata when mapping values.
        
        Parameters
        ----------
        arg : function, dict, or Series
            Mapping correspondence.
        na_action : {None, 'ignore'}, default None
            If 'ignore', propagate NaN values, without passing them to the mapping function.
        
        Returns
        -------
        TableSeries
            Series with mapped values, preserving metadata.
        """
        ...

class TableData(pd.DataFrame):
    def __init__(self, *args, name: Any = None, title: Any = None, description: Any = None, table_type: Any = None, fields_meta: Any = None, **kwargs):
        ...
    @property
    def title_to_name(self) -> dict[str, str]:
        """Get the mapping of column titles to column names. Key is title, value is name."""
        ...
    @property
    def name_to_title(self) -> dict[str, str]:
        """Get the mapping of column names to column titles. Key is name, value is title."""
        ...
    @property
    def field_titles(self) -> list[str]:
        """Get the list of field titles"""
        ...
    @property
    def field_names(self) -> list[str]:
        """Get the list of field names"""
        ...
    def convert_to_field_names(self, fields: Any = None) -> list[str]:
        """Convert field titles, field names, or combination of both to field names."""
        ...
    def convert_to_field_titles(self, fields: Any = None) -> list[str]:
        """Convert field titles, field names, or combination of both to field titles."""
        ...
    def get_field_metadata(self, key: str) -> FieldMetadata:
        """
        Get field metadata by name or title
        
        Parameters
        ----------
        key : str
            The name or title of the field
        
        Returns
        -------
        FieldMetadata
            The metadata of the field
        """
        ...
    @property
    def fields_meta(self) -> dict[str, FieldMetadata]:
        """Read-only access to fields metadata"""
        ...
    def update_fields_meta(self, value: Any):
        """Update the fields metadata"""
        ...
    def update_field_metadata(self, field_name: str, title: Optional[str] = None, unit: Optional[Units] = None, description: Optional[str] = None):
        """Update field metadata while preserving the name"""
        ...
    def __getitem__(self, key) -> Any:
        """
        Access data by either column name or field title.
        
        Supports all pandas DataFrame indexing:
        - Single string: table['col'] or table['Field Title'] -> TableSeries
        - List of strings: table[['col1', 'Title2']] -> TableData
        - Boolean indexing: table[table['col'] > 5] -> TableData
        - Slice objects: table[1:5] -> TableData
        - Other pandas indexing -> DataFrame/Series (wrapped appropriately)
        """
        ...
    def get(self, key, default = None):
        """
        Get column(s) by name or title with a default return value.
        
        Supports the same indexing as __getitem__:
        - Single string: column name or field title
        - List of strings: multiple column names/field titles
        - Other pandas indexing types
        
        Parameters
        ----------
        key : str, list, tuple, or other pandas key types
            Column name(s), field title(s), or other pandas indexing
        default : Any, optional
            Value to return if key is not found, by default None
        
        Returns
        -------
        TableSeries, TableData, or default value
            Result of indexing or default if key not found
        """
        ...
    def select_by_titles(self, *titles) -> Any:
        """
        Select columns by their titles, returning a new TableData with those columns.
        
        Parameters
        ----------
        *titles : str
            Field titles, field names, or combination of both to select
        
        Returns
        -------
        TableData
            New TableData containing only the selected columns
        
        Raises
        ------
        KeyError
            If any field title or name is not found
        """
        ...
    def rename_columns(self, mapper = None, **kwargs):
        """
        Rename columns while preserving field metadata.
        
        Parameters
        ----------
        mapper : dict, optional
            Dictionary mapping old column names to new names
        **kwargs
            Other arguments passed to pandas.DataFrame.rename
        
        Returns
        -------
        TableData or None
            Table with renamed columns if inplace=False, None if inplace=True
        """
        ...
    def rename_columns_to_titles(self, inplace = True):
        """
        Rename all column names to match their titles.
        
        This method renames the columns of the DataFrame to use their corresponding
        titles from field metadata, making the column names more human-readable.
        If a column doesn't have a title different from its name, it will remain unchanged.
        
        Parameters
        ----------
        inplace : bool, default True
            If True, modify the DataFrame in place (do not create a new object).
            If False, return a copy with renamed columns.
        
        Returns
        -------
        TableData
            If inplace=True, returns self.
            If inplace=False, returns a new TableData with columns renamed to match their titles.
        """
        ...
    def __setitem__(self, key, value):
        """
        Set data by column name or field title.
        
        Supports all pandas DataFrame assignment:
        - Single string: table['col'] = values or table['Field Title'] = values
        - List of strings: table[['col1', 'Title2']] = values
        - Boolean indexing: table[table['col'] > 5] = values
        - Slice objects: table[1:5] = values
        - Other pandas indexing -> Standard pandas behavior
        """
        ...
    def drop(self, labels = None, axis = 0, index = None, columns = None, **kwargs):
        """Override drop to maintain metadata consistency"""
        ...
    def rename(self, *args, **kwargs):
        """Override rename to update metadata"""
        ...
    def __finalize__(self, other, method = None, **kwargs):
        """Propagate metadata from other to self"""
        ...
    def groupby(self, by = None, axis = 0, level = None, as_index = True, sort = True, group_keys = True, observed = None, dropna = True, **kwargs):
        """
        Group DataFrame using a mapper or by a Series of columns, supporting field titles.
        
        This method extends pandas DataFrame.groupby() to work with TableData objects,
        allowing the use of field titles as column identifiers.
        
        Parameters
        ----------
        by : str, list of str, or Series, optional
            Column name(s), field title(s), or Series to group by.
            Can be a single string, list of strings, or pandas Series.
        axis : {0 or 'index', 1 or 'columns'}, default 0
            Split along rows (0) or columns (1).
        level : int, level name, or sequence, optional
            If the axis is a MultiIndex, group by a particular level or levels.
        as_index : bool, default True
            Return object with group labels as the index.
        sort : bool, default True
            Sort group keys.
        group_keys : bool, default True
            Add group keys to index when calling apply.
        observed : bool, optional
            When True, only show observed values for categorical groupers.
        dropna : bool, default True
            If True, and if group keys contain NA values, NA values together with row/column will be dropped.
        **kwargs
            Additional keyword arguments passed to pandas groupby.
        
        Returns
        -------
        _TableDataGroupBy
            A grouped object that preserves metadata
        """
        ...
    def value_counts(self, subset: Any = None, normalize: bool = False, sort: bool = True, ascending: bool = False, dropna: bool = True):
        """
        Return a Series containing counts of unique combinations of values.
        
        This method extends pandas DataFrame.value_counts() to work with TableData objects,
        allowing the use of field titles as column identifiers.
        
        Parameters
        ----------
        subset : str, list of str, or None, optional
            Column name(s) or field title(s) to use when counting unique combinations.
            Can be a single string for one column or a list for multiple columns.
            If None, all columns are used.
        
        normalize : bool, default False
            Return proportions rather than frequencies.
        
        sort : bool, default True
            Sort by frequencies.
        
        ascending : bool, default False
            Sort in ascending order.
        
        dropna : bool, default True
            Don't include counts of NaN.
        
        Returns
        -------
        pd.Series
            Series containing the value counts
        """
        ...
    def __add__(self, other):
        ...
    def __sub__(self, other):
        ...
    def __mul__(self, other):
        ...
    def __truediv__(self, other):
        ...
    def __floordiv__(self, other):
        ...
    def __mod__(self, other):
        ...
    def __pow__(self, other):
        ...
    def __radd__(self, other):
        ...
    def __rsub__(self, other):
        ...
    def __rmul__(self, other):
        ...
    def __rtruediv__(self, other):
        ...
    def __rfloordiv__(self, other):
        ...
    def __rmod__(self, other):
        ...
    def __rpow__(self, other):
        ...
    def export_doc_context(self, precision: dict[str, Any] = None, key_type: Literal[Any, Any] = "name") -> dict[str, Any]:
        """
        Export the table data as a dictionary for document context.
        
        Parameters
        ----------
        precision: dict[str, int | None], default: None
            The precision of the table values. key is the column name, value is the precision.
        
        key_type: Literal["name", "title"], default: name
            Whether to use the fiedl name or title as the doc key.
        
        Returns
        -------
        dict[str, Any]
            The context of the result.
            key "data" stores the table data.
            key "doc_keys_strcut" stores the structure of the keys for the result.
        """
        ...
    @property
    def iloc(self) -> Any:
        """Override iloc to maintain metadata"""
        ...
    @property
    def loc(self) -> Any:
        """Override loc to maintain metadata"""
        ...
    def copy(self, deep = True):
        """Override copy to preserve metadata"""
        ...
    def merge(self, right, how = "inner", on = None, left_on = None, right_on = None, left_index = False, right_index = False, sort = False, suffixes = ..., copy = True, indicator = False, validate = None, ignore_index = False):
        """
        Merge TableData with another DataFrame or TableData, preserving metadata.
        
        This method performs a database-style join operation between DataFrames
        and ensures that metadata from both tables is properly handled in the result.
        
        Parameters
        ----------
        right : DataFrame or TableData
            Object to merge with
        how : str, default 'inner'
            Type of merge to be performed: 'left', 'right', 'outer', 'inner'
        on : label or list, optional
            Column(s) to join on. Must be found in both DataFrames.
        left_on : label or list, optional
            Column(s) from the left DataFrame to use as keys
        right_on : label or list, optional
            Column(s) from the right DataFrame to use as keys
        left_index : bool, default False
            Use the index from the left DataFrame as the join key
        right_index : bool, default False
            Use the index from the right DataFrame as the join key
        sort : bool, default False
            Sort the result DataFrame by the join keys
        suffixes : 2-tuple of str, default ('_x', '_y')
            Suffix to apply to overlapping column names
        copy : bool, default True
            If False, avoid copying data when possible
        indicator : bool or str, default False
            Add a column to the output DataFrame indicating the source
        validate : str, optional
            Validation check on the merge operation: 'one_to_one', 'one_to_many',
            'many_to_one', 'many_to_many'
        ignore_index : bool, default False
            If True, do not use the index values along the concatenation axis.
            The resulting axis will be labeled 0, 1, …, n - 1.
        
        Returns
        -------
        TableData
            The merged TableData with preserved metadata
        """
        ...
    def astype(self, dtype, copy = True, errors = "raise"):
        """Override astype to preserve metadata"""
        ...
    def sort_values(self, by: Any, axis: int = 0, ascending: Any = True, inplace: bool = False, kind: Literal[Any, Any, Any, Any] = "quicksort", na_position: Literal[Any, Any] = "last", ignore_index: bool = False, key: Any = None):
        """
        Sort DataFrame by values, preserving TableData metadata and allowing field titles as sort keys.
        
        This method extends pandas DataFrame.sort_values() to work with TableData objects,
        preserving all metadata and allowing the use of field titles as column identifiers.
        
        Parameters
        ----------
        by : str or list of str
            Name(s) or title(s) of field(s) to sort by. Can be either column names or field titles.
        
        axis : {0 or 'index', 1 or 'columns'}, default 0
            Axis to be sorted.
        
        ascending : bool or list of bool, default True
            Sort ascending vs. descending. Specify list for multiple sort orders.
        
        inplace : bool, default False
            If True, perform operation in-place.
        
        kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort'
            Choice of sorting algorithm.
        
        na_position : {'first', 'last'}, default 'last'
            Puts NaNs at the beginning if 'first'; 'last' puts NaNs at the end.
        
        ignore_index : bool, default False
            If True, the resulting axis will be labeled 0, 1, …, n - 1.
        
        key : callable, optional
            Apply the key function to the values before sorting.
        
        Returns
        -------
        TableData or None
            Sorted TableData object if inplace=False, None if inplace=True.
        
        Examples
        --------
        Sort by column name:
        >>> table.sort_values('column_name')
        
        Sort by field title:
        >>> table.sort_values('Column Title')
        
        Sort by multiple fields (mix of names and titles):
        >>> table.sort_values(['column_name', 'Another Title'])
        """
        ...
    def reset_index(self, level = None, drop = False, inplace = False, col_level = 0, col_fill = "", allow_duplicates = False, names = None):
        """
        Reset the index, preserving TableData metadata and creating metadata for new columns.
        
        This method extends pandas DataFrame.reset_index() to work with TableData objects,
        preserving all metadata and creating appropriate metadata for any new columns created
        from the index.
        
        Parameters
        ----------
        level : int, str, tuple, or list, optional
            Only remove the given levels from the index. Removes all levels by default.
        drop : bool, default False
            Do not try to insert index into dataframe columns. This resets the index to the default integer index.
        inplace : bool, default False
            Modify the DataFrame in place (do not create a new object).
        col_level : int or str, default 0
            If the columns have multiple levels, determines which level the labels are inserted into.
        col_fill : object, default ''
            If the columns have multiple levels, determines how the other levels are named.
        allow_duplicates : bool, default False
            Allow duplicate column labels to be created.
        names : int, str or 1-dimensional list, optional
            Using the given string, rename the DataFrame columns whose data comes from the index.
        
        Returns
        -------
        TableData or None
            DataFrame with the new index or None if inplace=True.
        """
        ...
    def fillna(self, value = None, method = None, axis = None, inplace = False, limit = None, downcast = None) -> Union[Any, Any]:
        """
        Fill NA/NaN values using the specified method, preserving TableData metadata.
        
        This method extends pandas DataFrame.fillna() to work with TableData objects,
        preserving all metadata and returning a TableData object instead of a regular DataFrame.
        Behavior is identical to pandas DataFrame.fillna().
        
        Parameters
        ----------
        value : scalar, dict, Series, or DataFrame, optional
            Value to use to fill holes (e.g. 0), alternately a dict/Series/DataFrame of values
            specifying which value to use for each index (for a Series) or column (for a DataFrame).
        method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
            Method to use for filling holes in reindexed Series.
        axis : {0 or 'index', 1 or 'columns'}, optional
            Axis along which to fill missing values.
        inplace : bool, default False
            If True, fill in-place. Note: this will modify any other views on this object.
        limit : int, optional
            If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill.
        downcast : dict, optional
            A dict of item->dtype of what to downcast if possible.
        
        Returns
        -------
        TableData or None
            TableData with NA entries filled if inplace=False, None if inplace=True.
        
        Examples
        --------
        Fill with a single value:
        >>> table.fillna(0)
        
        Fill with different values per column:
        >>> table.fillna({'column1': 0, 'column2': 'missing'})
        
        Forward fill:
        >>> table.fillna(method='ffill')
        """
        ...
    def prepare_for_json(self, inplace = False) -> Union[Any, Any]:
        """
        Prepare TableData for JSON serialization by replacing problematic values.
        
        Replaces np.nan, np.inf, and -np.inf with None (which serializes to null in JSON).
        This method is specifically designed for JSON serialization purposes.
        
        Parameters
        ----------
        inplace : bool, default False
            If True, modify the DataFrame in place and return None.
            If False, return a new TableData with values replaced.
        
        Returns
        -------
        TableData or None
            TableData with JSON-safe values if inplace=False, None if inplace=True.
        
        Examples
        --------
        Prepare for JSON serialization:
        >>> json_ready_table = table.prepare_for_json()
        >>> json_dict = json_ready_table.to_dict(orient="records")
        
        In-place preparation:
        >>> table.prepare_for_json(inplace=True)
        >>> json_dict = table.to_dict(orient="records")
        """
        ...
    def describe(self, percentiles = None, include = None, exclude = None, **kwargs) -> Any:
        """
        Generate descriptive statistics while preserving TableData metadata.
        
        This method extends pandas DataFrame.describe() to work with TableData objects,
        preserving field metadata and returning a TableData object instead of a regular DataFrame.
        
        Parameters
        ----------
        percentiles : list-like of numbers, optional
            The percentiles to include in the output. All should fall between 0 and 1.
            The default is [.25, .5, .75], which returns the 25th, 50th, and 75th percentiles.
        include : 'all', list-like of dtypes or None (default), optional
            A white list of data types to include in the result.
        exclude : list-like of dtypes or None (default), optional
            A black list of data types to omit from the result.
        **kwargs
            Additional keyword arguments passed to pandas describe() method.
            Note: Some parameters may not be available in all pandas versions.
        
        Returns
        -------
        TableData
            Summary statistics of the DataFrame with preserved metadata.
        
        Examples
        --------
        >>> table = TableData(...)
        >>> described = table.describe()  # Returns TableData, not DataFrame
        >>> print(type(described))  # <class 'TableData'>
        """
        ...
    def drop_duplicates(self, subset = None, keep = "first", inplace = False, ignore_index = False):
        """
        Remove duplicate rows, supporting field titles as column identifiers.
        
        This method extends pandas DataFrame.drop_duplicates() to work with TableData objects,
        preserving all metadata and allowing the use of field titles as column identifiers.
        
        Parameters
        ----------
        subset : str, list of str, or None, optional
            Column name(s) or field title(s) to consider for identifying duplicates.
            Can be either column names or field titles.
            If None, all columns are used.
        keep : {'first', 'last', False}, default 'first'
            Determines which duplicates (if any) to keep.
            - 'first': Drop duplicates except for the first occurrence.
            - 'last': Drop duplicates except for the last occurrence.
            - False: Drop all duplicates.
        inplace : bool, default False
            If True, perform operation in-place and return None.
        ignore_index : bool, default False
            If True, the resulting axis will be labeled 0, 1, …, n - 1.
        
        Returns
        -------
        TableData or None
            TableData object with duplicates removed if inplace=False, None if inplace=True.
        
        Examples
        --------
        Drop duplicates using column name:
        >>> table.drop_duplicates('column_name')
        
        Drop duplicates using field title:
        >>> table.drop_duplicates('Column Title')
        
        Drop duplicates using multiple fields (mix of names and titles):
        >>> table.drop_duplicates(['column_name', 'Another Title'])
        """
        ...
    def duplicated(self, subset = None, keep = "first"):
        """
        Return boolean Series denoting duplicate rows, supporting field titles as column identifiers.
        
        This method extends pandas DataFrame.duplicated() to work with TableData objects,
        allowing the use of field titles as column identifiers.
        
        Parameters
        ----------
        subset : str, list of str, or None, optional
            Column name(s) or field title(s) to consider for identifying duplicates.
            Can be either column names or field titles.
            If None, all columns are used.
        keep : {'first', 'last', False}, default 'first'
            Determines which duplicates (if any) to mark as False.
            - 'first': Mark duplicates as True except for the first occurrence.
            - 'last': Mark duplicates as True except for the last occurrence.
            - False: Mark all duplicates as True.
        
        Returns
        -------
        pd.Series
            Boolean series indicating whether each row is a duplicate.
        
        Examples
        --------
        Check duplicates using column name:
        >>> duplicates = table.duplicated('column_name')
        
        Check duplicates using field title:
        >>> duplicates = table.duplicated('Column Title')
        
        Check duplicates using multiple fields (mix of names and titles):
        >>> duplicates = table.duplicated(['column_name', 'Another Title'])
        """
        ...
    def serialize(self) -> dict:
        """
        Serialize the TableData object to a dictionary.
        
        This method converts the TableData object into a dictionary format that can be
        easily serialized to JSON or other formats. All metadata including field
        information, units, titles, and descriptions are preserved.
        
        Returns
        -------
        dict
            A dictionary containing all TableData information:
            - 'data': DataFrame data as records
            - 'name': Table name
            - 'title': Table title
            - 'description': Table description
            - 'fields_meta': Field metadata as dictionaries
            - 'dtypes': Column data types for proper reconstruction
        
        Examples
        --------
        >>> table = TableData(...)
        >>> serialized = table.serialize()
        >>> # Can be saved to JSON
        >>> import json
        >>> json_str = json.dumps(serialized)
        """
        ...
    def __getattr__(self, name):
        """
        Enable field title support for pandas DataFrame methods.
        
        This method intercepts attribute access and wraps pandas DataFrame methods
        to support field titles in addition to field names, while preserving
        TableData metadata in results.
        """
        ...
    def deserialize(cls, data_dict: dict) -> Any:
        """
        Deserialize a TableData object from a dictionary.
        
        This class method reconstructs a TableData object from a dictionary created
        by the serialize() method. All metadata and data types are properly restored.
        
        Parameters
        ----------
        data_dict : dict
            Dictionary containing serialized TableData information, typically
            created by the serialize() method
        
        Returns
        -------
        TableData
            Reconstructed TableData object with all metadata preserved
        
        Examples
        --------
        >>> # Load from serialized data
        >>> table = TableData.deserialize(serialized_data)
        >>>
        >>> # Or from JSON
        >>> import json
        >>> data_dict = json.loads(json_str)
        >>> table = TableData.deserialize(data_dict)
        """
        ...
    def append_table(self, other, ignore_index = True, **kwargs):
        """
        Append another TableData to this one, modifying this table in place.
        
        This is an instance method that concatenates another TableData object
        to the current one, updating the current table in place.
        
        Parameters
        ----------
        other : TableData
            The TableData object to append to this one
        ignore_index : bool, default True
            If True, do not use the index values along the concatenation axis
        **kwargs
            Additional keyword arguments passed to pd.concat()
        
        Returns
        -------
        None
            This method modifies the current TableData in place
        
        Examples
        --------
        >>> table1 = TableData(...)
        >>> table2 = TableData(...)
        >>> table1.append_table(table2)  # table1 now contains both datasets
        """
        ...
    def concat(tables, ignore_index = True, name = None, title = None, description = None, **kwargs):
        """
        Concatenate TableData objects while preserving metadata.
        
        This is a static method that provides a TableData-aware version of pd.concat().
        It concatenates multiple TableData objects and merges their field metadata.
        
        Parameters
        ----------
        tables : list of TableData or iterable of TableData
            The TableData objects to concatenate
        ignore_index : bool, default True
            If True, do not use the index values along the concatenation axis
        name : str, optional
            Name for the resulting TableData. If None, defaults to "concatenated_table"
        title : str, optional
            Title for the resulting TableData. If None, defaults to "Concatenated Table"
        description : str, optional
            Description for the resulting TableData. If None, provides a default description
        **kwargs
            Additional keyword arguments passed to pd.concat()
        
        Returns
        -------
        TableData
            A new TableData object containing the concatenated data with merged metadata
        
        Examples
        --------
        >>> table1 = TableData(...)
        >>> table2 = TableData(...)
        >>> result = TableData.concat([table1, table2])
        
        >>> # From a TableCollection
        >>> tables = TableCollection()
        >>> result = TableData.concat(tables)
        """
        ...

class TableCollection:
    """A class to manage multiple TableData objects"""
    def __init__(self, name: Any = None, title: Any = None, description: Any = None):
        ...
    def add_table(self, table: TableData, main_table: bool = False, main_key: Any = None, sub_table: bool = False) -> Any:
        """
        Add a TableData object to the collection.
        
        Parameters
        ----------
        table: TableData
            The TableData object to add
        main_table: bool, default False
            Whether this table should be set as the main table
        main_key: str | None, default None
            The main key for grouping operations
        sub_table: bool, default False
            Whether this table should be added to the sub_tables list
        
        Raises
        ------
        ValueError
            If table.name or table.title is None or empty string
        Warning
            If a table with the same name already exists (will be overwritten)
            If a table with the same title already exists (will be overwritten)
        
        Note
        ----
        The table can be retrieved either by its name or title.
        """
        ...
    def get_table(self, key: str) -> Any:
        """
        Retrieve a table by name or title.
        
        Parameters
        ----------
        key: str
            The name or title of the table to retrieve
        
        Returns
        -------
        TableData or None
            TableData object if found, None if not found
        """
        ...
    def remove_table(self, key: str) -> Any:
        """
        Remove a table from the collection
        
        Args:
            key: Name or title of the table to remove
        
        Raises:
            KeyError: If table name/title doesn't exist
        """
        ...
    def copy(self) -> Any:
        """
        Create a deep copy of the TableCollection.
        
        Returns
        -------
        TableCollection
            A new TableCollection with copies of all tables
        """
        ...
    def rename_columns_to_titles(self):
        """
        Rename all column names to match their titles.
        
        This method renames the columns of the DataFrame to use their corresponding
        titles from field metadata, making the column names more human-readable.
        
        Parameters
        ----------
        inplace : bool, default True
            If True, modify the DataFrame in place (do not create a new object).
            If False, return a copy with renamed columns.
        """
        ...
    @property
    def table_names(self) -> List[str]:
        """Get list of all table names in the collection"""
        ...
    @property
    def table_titles(self) -> List[str]:
        """Get list of all table titles in the collection"""
        ...
    def keys(self) -> List[str]:
        """Get list of all table names in the collection"""
        ...
    def values(self) -> List[TableData]:
        """Get list of all table objects in the collection"""
        ...
    def __getitem__(self, key: Any) -> TableData:
        """Allow dictionary-style access to tables by name/title or integer index"""
        ...
    def __delitem__(self, key: Any) -> Any:
        """Allow dictionary-style deletion of tables by name/title or integer index"""
        ...
    def __contains__(self, key: Any) -> bool:
        """Allow 'in' operator to check for table existence by name/title or integer index"""
        ...
    def __len__(self) -> int:
        """Return number of tables in collection"""
        ...
    def __iter__(self) -> Iterator[TableData]:
        """Allow iteration over tables"""
        ...
    def __repr__(self) -> str:
        ...
    def serialize(self) -> dict:
        """
        Serialize the TableCollection object to a dictionary.
        
        This method converts the TableCollection object into a dictionary format that can be
        easily serialized to JSON or other formats. All metadata and contained TableData
        objects are preserved.
        
        Returns
        -------
        dict
            A dictionary containing all TableCollection information:
            - 'name': Collection name
            - 'title': Collection title
            - 'description': Collection description
            - 'main_table': Name of the main table (or None)
            - 'sub_tables': List of sub-table names
            - 'main_key': Main key used for grouping (or None)
            - 'tables': Dictionary of serialized TableData objects keyed by table name
        
        Examples
        --------
        >>> collection = TableCollection(...)
        >>> serialized = collection.serialize()
        >>> # Can be saved to JSON
        >>> import json
        >>> json_str = json.dumps(serialized)
        """
        ...
    def deserialize(cls, data_dict: dict) -> Any:
        """
        Deserialize a TableCollection object from a dictionary.
        
        This class method reconstructs a TableCollection object from a dictionary created
        by the serialize() method. All metadata and TableData objects are properly restored.
        
        Parameters
        ----------
        data_dict : dict
            Dictionary containing serialized TableCollection information, typically
            created by the serialize() method
        
        Returns
        -------
        TableCollection
            Reconstructed TableCollection object with all tables and metadata preserved
        
        Examples
        --------
        >>> # Load from serialized data
        >>> collection = TableCollection.deserialize(serialized_data)
        >>>
        >>> # Or from JSON
        >>> import json
        >>> data_dict = json.loads(json_str)
        >>> collection = TableCollection.deserialize(data_dict)
        """
        ...
    def export_doc_context(self, precision: Any = None, main_table_name: Any = None, sub_tables_name: Any = None, group_by: Any = None, sub_table_doc_key_name: Any = None, sub_table_doc_key_title: Any = None) -> dict:
        """
        Export the table collection as a dictionary for document context.
        
        Parameters
        ----------
        precision: dict[str, int | None], default: None
            The precision of the table values. key is the column name, value is the precision.
        
        main_table_name: str | None, default: None
            The name of the main table.
            If None, the table with the least number of rows will be used as the main table.
        
        sub_tables_name: str | list[str] | None, default: None
            The names of the sub tables.
            If list, all the listed tables will be merged to main table as sub table using group_by.
            If None, all the rest tables will be merged to main table as sub table using group_by.
        
        group_by: str | None, default: None
            The group by column of the main table - primary key.
            All the sub tables should have the column named as group_by.
            If None, use the first column with the same name in all tables as the group by column.
        
        sub_table_doc_key_name: str, default: None
            The doc key name for the sub table.
            If None, use the first sub table name as the doc key name.
        
        sub_table_doc_key_title: str, default: None
            The doc key title for the sub table.
            If None, use the first sub table title as the doc key title.
        
        Returns
        -------
        dict
        """
        ...
    def to_dict(self, serializable: bool = False, orient: Literal[Any, Any, Any, Any] = "records") -> dict:
        """
        Convert the TableCollection object to a dictionary.
        
        Parameters
        ----------
        serializable: bool, default: False
            If True, return the serialized dictionary which means all the np.nan, np.inf, -np.inf will be converted to None.
        """
        ...
    def convert_to_verticalBores_data(self):
        """生成VerticalBores对象需要的bores_table、layers_table、materials_name"""
        ...
    def convert_to_drawApps_params(self, section_lines: Dict[str, Any] = None):
        """
        功能：导出gpro中生成剖面接口所需的参数
        
        参数：
        log_file: str型，错误日志的文件路径。
        section_lines:Dict型， 剖面线字典
                     示例： {'seciton1':['TJ1','TJ2','TJ3'],'section2':['ZK2','ZK3']}
        """
        ...
    def get_table_by_index(self, index: int) -> TableData:
        """
        Retrieve a table by its index position.
        
        Parameters
        ----------
        index: int
            The index position of the table to retrieve (supports negative indexing)
        
        Returns
        -------
        TableData
            The table at the specified index
        
        Raises
        ------
        IndexError
            If index is out of range
        """
        ...
