import warnings
import zipfile
from pathlib import Path
from typing import Literal

import numpy as np
import pandas as pd

from ..connectors.fileConnector import (
    VrtHandler,
    convert_multiple_pdfs_to_images,
    convert_pdf_to_images,
)
from ..connectors.gdimConnector import upload_file_to_gdim
from ..dataclass.geoProfiles import BoreForCadDraw, BoreForPlanDraw, MultiProfile1D
from ..dataclass.results import SingleResult, UnitResult
from ..dataclass.tables import TableCollection
from ..dataclass.terminologies import BoreTypes
from ..dataclass.terminologies import GeoMaterialProps as GProps
from ..dataclass.terminologies import Units
from ..dataTransformers.dataCleanAlgorithm import format_dataframe_type
from ..dataTransformers.dataConverter import (
    convert_table_to_material_table,
    convert_to_plan_bores,
    convert_to_project_info,
    convert_to_section_bores_with_matrials,
)
from ..pipeline.pipeData import (
    ArrayAttributeSchema,
    IntegerAttributeSchema,
    RangeModel,
    StringAttributeSchema,
    TableAttributeSchema,
    UIAttributeSchema,
    WidgetAttribute,
)
from ..pipeline.pipeline import (
    PipeModule,
    Port,
    PortReference,
    module_decorator,
    status_manage,
)
from ..pipeline.portTypes import PortType, PortTypeHint


@module_decorator()
class ConvertToMaterialTable(PipeModule):
    """Convert the input table to a MaterialTable object which can be used for geo calculation."""

    InputTable: PortReference[PortTypeHint.TableData | PortTypeHint.TableCollection]
    OutputMaterialTable: PortReference[PortTypeHint.MaterialTable]

    _port_docs = {
        "InputTable": "The input table data.",
        "OutputMaterialTable": "The output material table.",
    }

    def __init__(
        self,
        mname: str = "ConvertToMaterialTable",
        auto_run: bool = True,
        table: PortTypeHint.TableData | PortTypeHint.TableCollection | None = None,
        column_name_map: dict[str, GProps] | None = None,
        map_key_type: Literal["name", "title"] = "title",
        sort_by_layer_number: bool = True,
        reset_material_id: bool = True,
        format_dict: dict[GProps, str] | None = None,
        table_name: str | None = None,
    ) -> None:
        """Initialize a ConvertToMaterialTable object.

        Parameters
        ----------
        table: PortTypeHint.Table | None, default: None
            The input table data.

        column_name_map: dict[str, GProps] | None, default: None
            The mapping of the column name of the table and the materials name.
            The keys are the column name or title of the table and the values are the material name.
            If the mapping of a column is not provided,
            the module will try to find the column with the same name as the value of GeoMaterialProps automatically.

        map_key_type: Literal["name", "title"] = "title", default: "title"
            The type of the key of the column_name_map.
            "name" means the name of the column and "title" means the title of the column.

        sort_by_layer_number: bool, default: True
            If True, the data will be sorted by the layer number. The material_id starts from 0.

        reset_material_id: bool, default: True
             If True, the material_id will be reset to the ascending order of layer number which means the material_id will start from 0 at the first row after sorting.
             It's available only when 'sort_by_layer_number' is True.

        format_dict: dict[GProps, str] | None, default: None
            The dictionary of the column name and the type which is used to convert the column to the specific type.
            The key is the material name and the value is the type.

        table_name: str | None, default: None
            The name of the table. It can be either the name or the title of the table.
            This is only used when table is a TableCollection.
            If None, the first table in the collection will be used.

        Note
        ----
        GPorp.LayerNumber will be converted to string type forcefully.
        """
        super().__init__(mname=mname, auto_run=auto_run)

        if table is not None:
            self.InputTable = table

        self.column_name_map = column_name_map
        self.map_key_type = map_key_type
        self.sort_by_layer_number = sort_by_layer_number
        self.reset_material_id = reset_material_id
        self.format_dict = format_dict
        self.table_name = table_name

    def execute(self) -> PortTypeHint.MaterialTable | None:
        table: PortTypeHint.TableData | PortTypeHint.TableCollection | None = (
            self._ports_in["InputTable"].data
        )

        if table is None:
            self._ports_out["OutputMaterialTable"].data = None
            return None

        if isinstance(table, PortTypeHint.TableCollection):
            if self.table_name is None:
                table = table[0]
            else:
                table = table.get_table(self.table_name)
            if table is None:
                raise ValueError(
                    f"The table '{self.table_name}' is not found in the input table collection."
                )

        material_table = convert_table_to_material_table(
            table, column_name_map=self.column_name_map, map_key_type=self.map_key_type
        )

        # Postprocess the material table
        if self.format_dict is not None:
            material_table = format_dataframe_type(material_table, self.format_dict)
        if self.sort_by_layer_number:
            if GProps.LayerNumber not in material_table.columns:
                warnings.warn(
                    f"数据中没有地层编号 '{GProps.LayerNumber}' 列，无法根据地层编号重排序."
                )
            else:
                material_table.sort_by_layer_number(
                    inplace=True, reset_material_id=self.reset_material_id
                )

        self._ports_out["OutputMaterialTable"].data = material_table
        return material_table


@status_manage
class ConvertToMaterialTables(PipeModule):
    """Convert the input table collection to a MaterialTable collection which can be used for geo calculation."""

    def __init__(
        self,
        mname: str = "ConvertToMaterialTables",
        auto_run: bool = True,
        tables: PortTypeHint.TableCollection | None = None,
        column_name_map: dict[str, GProps] | None = None,
        map_key_type: Literal["name", "title"] = "title",
        sort_by_layer_number: bool = True,
        format_dict: dict[GProps, str] | None = None,
    ) -> None:
        """Initialize a ConvertToMaterialTables object.

        Parameters
        ----------
        tables: PortTypeHint.TableCollection | None, default: None
            The input table collection.

        column_name_map: dict[str, GProps] | None, default: None
            The mapping of the column name of the table and the materials name.
            The key is the column name of the table and the value is the materials name.

        map_key_type: Literal["name", "title"] = "title", default: "title"
            The type of the key of the column_name_map. "name" means the name of the materials and "title" means the title of the column.

        sort_by_layer_number: bool, default: True
            If True, the data will be sorted by the layer number. The material_id starts from 0.

        format_dict: dict[GProps, str] | None, default: None
            The dictionary of the column name and the type which is used to convert the column to the specific type.
            The key is the material name and the value is the type.

        Note
        ----
        MatrialTables are often used when each bore or section has its own material table.
        So the key of the MaterialTables is usually the bore or section number or name.
        """
        super().__init__(mname=mname, auto_run=auto_run)

        self._ports_in = {
            "InputTables": Port(
                ptype=PortType.TableCollection,
                data=tables,
                pdoc="The input table collection.",
            )
        }
        self._ports_out = {
            "OutputMaterialTables": Port(
                ptype=PortType.MaterialTableCollection,
                pdoc="The output material table.",
            )
        }

        self.column_name_map = column_name_map
        self.map_key_type = map_key_type
        self.sort_by_layer_number = sort_by_layer_number
        self.format_dict = format_dict

    @property
    def InputTables(self) -> None:
        raise AttributeError("Property 'InputTables' is write-only.")

    @InputTables.setter
    def InputTables(self, value: PortTypeHint.TableCollection) -> None:
        self["InputTables"] = value

    @property
    def OutputMaterialTables(self) -> PortTypeHint.MaterialTableCollection | None:
        return self["OutputMaterialTables"]

    def set_cal_params(self, reset: bool = True) -> dict[str, RangeModel]:
        pass

    def execute(self) -> PortTypeHint.MaterialTableCollection | None:
        tables: PortTypeHint.TableCollection | None = self._ports_in["InputTables"].data
        if tables is None:
            self._ports_out["OutputMaterialTables"].data = None
            return None

        material_table_collection = {}
        for table in tables:
            material_table = convert_table_to_material_table(
                table,
                column_name_map=self.column_name_map,
                map_key_type=self.map_key_type,
            )
            # Postprocess the material table
            if self.format_dict:
                # Skip the columns that are not in the material table
                format_dict = {}
                for k, v in self.format_dict.items():
                    if k in material_table.columns:
                        format_dict[k] = v
                material_table = format_dataframe_type(material_table, format_dict)
            if self.sort_by_layer_number:
                if GProps.LayerNumber not in material_table.columns:
                    warnings.warn(
                        f"数据中没有地层编号 '{GProps.LayerNumber}' 列，无法根据地层编号重排序."
                    )
                else:
                    material_table.sort_by_layer_number(
                        inplace=True, reset_material_id=True
                    )
            material_table_collection[table.name] = material_table

        self._ports_out["OutputMaterialTables"].data = material_table_collection
        return material_table_collection


@status_manage
class ConvertToMultiProfile1D(PipeModule):
    """Convert the input table collection to a MultiProfile1D object which can be used for geo calculation."""

    def __init__(
        self,
        mname: str = "ConvertToMultiProfile1D",
        auto_run: bool = True,
        tables: PortTypeHint.TableCollection | None = None,
        material_table: PortTypeHint.MaterialTable | None = None,
        profiles_table_name: str | None = None,
        profiles_table_field_names: list[str] | None = None,
        layers_table_name: str | None = None,
        layers_table_field_names: list[str] | None = None,
        layer_number_for_index: bool = True,
        selected_profiles: list[str] | None = None,
    ) -> None:
        """Initialize a ConvertToMultiProfile1D object.

        Parameters
        ----------
        tables: PortTypeHint.TableCollection | None, default: None
            The input tables data.

        material_table: PortTypeHint.MaterialTable | None, default: None
            The input material table data which is used to get the material_id for each layer in the profile.

        profiles_table_name: str | None, default: None
            The name of the profile table. It can be either the name or the title of the table.

        profiles_table_field_names: list[str] | None, default: None
            The field names of the profile table. The names can be either the name or title of the fields.
            The names are corresponding to ["pnum", "top", "x_coord", "y_coord", "gwt_depth"] by sequence.

        layers_table_name: str | None, default: None
            The name of the layers table. It can be either the name or the title of the table.

        layers_table_field_names: list[str] | None, default: None
            The field names of the layers table. The names can be either the name or title of the fields.
            The names are corresponding to ["pnum", "layers_depth", "materials_id"] by sequence.

        layer_number_for_index: bool, default: True
            If True, the 3rd value in 'layers_table_field_names' will be considered as 'layer_number'.
            If False, the 3rd value in 'layers_table_field_names' will be considered as 'material_name'.

        selected_profiles: list[str] | None, default: None
            The selected boreholes from the input tables to convert to MultiProfile1D.
        """
        super().__init__(mname=mname, auto_run=auto_run)

        self._ports_in = {
            "InputTables": Port(
                ptype=PortType.TableCollection,
                data=tables,
                pdoc="The input tables data.",
            ),
            "InputMaterialTable": Port(
                ptype=PortType.MaterialTable,
                data=material_table,
                pdoc="The input material table data.",
            ),
        }
        self._ports_out = {
            "OutputMultiProfile1D": Port(
                ptype=PortType.MultiProfile1D, pdoc="The output MultiProfile1D object."
            )
        }

        self.profiles_table_name = profiles_table_name
        self.profiles_table_field_names = profiles_table_field_names
        self.layers_table_name = layers_table_name
        self.layers_table_field_names = layers_table_field_names
        self.layer_number_for_index = layer_number_for_index
        self.selected_profiles = selected_profiles

    @property
    def InputTables(self) -> None:
        raise AttributeError("Property 'InputTables' is write-only.")

    @InputTables.setter
    def InputTables(self, value: PortTypeHint.TableCollection) -> None:
        self["InputTables"] = value

    @property
    def InputMaterialTable(self) -> None:
        raise AttributeError("Property 'InputMaterialTable' is write-only.")

    @InputMaterialTable.setter
    def InputMaterialTable(self, value: PortTypeHint.MaterialTable) -> None:
        self["InputMaterialTable"] = value

    @property
    def OutputMultiProfile1D(self) -> PortTypeHint.MultiProfile1D | None:
        return self["OutputMultiProfile1D"]

    def set_cal_params(self, reset: bool = True) -> dict[str, RangeModel]:
        tables: PortTypeHint.TableCollection | None = self._ports_in["InputTables"].data
        if tables is None:
            table_name_choices = None
            profiles_table_field_names_choices = None
            layers_table_field_names_choices = None
            selected_profiles_choices = None
        else:
            # Table name choices
            table_name_choices = tables.table_titles
            if reset:
                self.profiles_table_name = table_name_choices[
                    0
                ]  # use the first table as the profiles table
                self.layers_table_name = table_name_choices[
                    1
                ]  # use the second table as the layers table
            # Fields choices
            if self.profiles_table_name:
                profiles_table_field_names_choices = tables.get_table(
                    self.profiles_table_name
                ).field_titles
            else:
                profiles_table_field_names_choices = None
            if self.layers_table_name:
                layers_table_field_names_choices = tables.get_table(
                    self.layers_table_name
                ).field_titles
            else:
                layers_table_field_names_choices = None
            # Profiles choices
            if self.profiles_table_field_names and self.profiles_table_name:
                selected_profiles_choices = tables.get_table(self.profiles_table_name)[
                    self.profiles_table_field_names[0]
                ].values.tolist()
            else:
                selected_profiles_choices = None

        values_range = {
            "profiles_table_name": RangeModel(
                vtype="str",
                title="钻孔一览表名称",
                choices=table_name_choices,
                default=self.profiles_table_name,
            ),
            "layers_table_name": RangeModel(
                vtype="str",
                title="地层表名称",
                choices=table_name_choices,
                default=self.layers_table_name,
            ),
            "profiles_table_field_names": RangeModel(
                vtype="list",
                title="钻孔一览表字段名称",
                list_type="str",
                list_len=4,
                choices=profiles_table_field_names_choices,
                default=self.profiles_table_field_names,
            ),
            "layers_table_field_names": RangeModel(
                vtype="list",
                title="地层表字段名称",
                list_type="str",
                list_len=3,
                choices=layers_table_field_names_choices,
                default=self.layers_table_field_names,
            ),
            "selected_profiles": RangeModel(
                vtype="list",
                title="选择钻孔",
                list_type="str",
                choices=selected_profiles_choices,
                default=self.selected_profiles,
            ),
        }
        return values_range

    def execute(self) -> PortTypeHint.MultiProfile1D | None:
        tables: PortTypeHint.TableCollection | None = self._ports_in["InputTables"].data
        material_table: PortTypeHint.MaterialTable | None = self._ports_in[
            "InputMaterialTable"
        ].data

        if (
            tables is None
            or self.profiles_table_name is None
            or material_table is None
            or self.layers_table_name is None
        ):
            self._ports_out["OutputMultiProfile1D"].data = None
            return None

        # Get the profiles table
        profiles_table = tables.get_table(self.profiles_table_name)
        if profiles_table is None:
            raise ValueError(
                f"The table '{self.profiles_table_name}' is not found in the input tables."
            )
        if self.profiles_table_field_names is None:
            # Consider the first 5 columns are ["pnum", "top", "x_coord", "y_coord", "gwt_depth"]
            try:
                profiles_table = pd.DataFrame(
                    data=profiles_table.values[:, :5],
                    columns=["pnum", "top", "x_coord", "y_coord", "gwt_depth"],
                )
            except:
                raise ValueError(
                    f"The table '{self.profiles_table_name}' cannot be converted to profile table automatically."
                )
        else:
            # Get the values from profile_table using field names
            if len(self.profiles_table_field_names) != 5:
                raise ValueError(
                    f"The number of names for property `profiles_table_field_names` must be 5,"
                    f"but got {len(self.profiles_table_field_names)}."
                )
            values = []
            for field_name in self.profiles_table_field_names:
                values.append(profiles_table[field_name].values)
            # Create DataFrame with standard column names
            profiles_table = pd.DataFrame(
                {
                    "pnum": values[0],
                    "top": values[1],
                    "x_coord": values[2],
                    "y_coord": values[3],
                    "gwt_depth": values[4],
                }
            )
        # Formate the profiles_table
        profiles_table = format_dataframe_type(
            profiles_table,
            dtype={
                "pnum": "str",
                "top": "float",
                "x_coord": "float",
                "y_coord": "float",
                "gwt_depth": "float",
            },
        )
        # Filter the unwantted rows in profiles_table
        if self.selected_profiles is not None:
            profiles_table = profiles_table[
                profiles_table["pnum"].isin(self.selected_profiles)
            ]

        # Get the layers table
        layers_table = tables.get_table(self.layers_table_name)
        if layers_table is None:
            raise ValueError(
                f"The table '{self.layers_table_name}' is not found in the input tables."
            )
        if self.layers_table_field_names is None:
            # Consider the first 2 columns are ["pnum", "layers_depth"], 3rd column is "layer_number"or "material_name"
            # Sort the table to ensure the layers_depth are in ascending order.
            layers_table = layers_table.sort_values(layers_table.columns[1])
            # Get the matertials_id
            materials_id = []
            if self.layer_number_for_index:
                for layer_number in layers_table.values[:, 2]:
                    materials_id.append(
                        material_table.get_material_id(layer_number=layer_number)
                    )
            else:
                for material_name in layers_table.values[:, 2]:
                    materials_id.append(
                        material_table.get_material_id(material_name=material_name)
                    )
            layers_table.iloc[2] = np.array(materials_id)
            try:
                layers_table = pd.DataFrame(
                    data=layers_table.values[:, :3],
                    columns=["pnum", "layers_depth", "materials_id"],
                )
            except:
                raise ValueError(
                    f"The table '{self.layers_table_name}' cannot be converted to layers table automatically."
                )
        else:
            # Get the values from layers_table using field names
            if len(self.layers_table_field_names) != 3:
                raise ValueError(
                    f"The number of names for property `layers_table_field_names` must be 3,"
                    f"but got {len(self.layers_table_field_names)}."
                )
            # Sort the table to ensure the layers_depth are in ascending order.
            layers_table = layers_table.sort_values(self.layers_table_field_names[1])
            # Get the materials_id
            materials_id = []
            if self.layer_number_for_index:
                for layer_number in layers_table[self.layers_table_field_names[2]]:
                    materials_id.append(
                        material_table.get_material_id(layer_number=layer_number)
                    )
            else:
                for material_name in layers_table[self.layers_table_field_names[2]]:
                    materials_id.append(
                        material_table.get_material_id(material_name=material_name)
                    )
            # Get all values
            values = []
            for field_name in self.layers_table_field_names[:-1]:
                values.append(layers_table[field_name].values)
            values.append(np.array(materials_id))
            # Create DataFrame with standard column names
            layers_table = pd.DataFrame(
                {
                    "pnum": values[0],
                    "layers_depth": values[1],
                    "materials_id": values[2],
                }
            )

        # Formate the layers_table
        layers_table = format_dataframe_type(
            layers_table, dtype={"pnum": "str", "layers_depth": "float"}
        )
        # Filter the unwantted rows in layers_table
        if self.selected_profiles is not None:
            layers_table = layers_table[
                layers_table["pnum"].isin(self.selected_profiles)
            ]

        # Create the MultiProfile1D
        multi_profile_1d = MultiProfile1D(
            profiles_table=profiles_table, layers_table=layers_table
        )

        self._ports_out["OutputMultiProfile1D"].data = multi_profile_1d
        return multi_profile_1d


@status_manage
class PdfToImages(PipeModule):
    """Convert PDF files to images."""

    def __init__(
        self,
        mname: str = "PdfToImages",
        auto_run: bool = True,
        files: PortTypeHint.FilePath | PortTypeHint.FilesPath | None = None,
        save_folders: list[str | Path] | str | Path | None = None,
        dpi: int = 200,
        format: Literal["png", "jpg", "jpeg"] = "png",
        image_prefixes: list[str] | str = "image",
        first_pages: list[int] | int = 1,
        last_pages: list[int | None] | int | None = None,
        widths: list[int | None] | int | None = None,
        heights: list[int | None] | int | None = None,
        output_path: str | Path | None = None,
        pipeline_workspace_as_output_path: bool = True,
    ) -> None:
        """Initialize a PdfToImages object.

        Parameters
        ----------
        files: PortTypeHint.FilePath | PortTypeHint.FilesPath | None, default: None
            Single PDF file path or list of PDF files path to convert.
            If the files are assinged by input port, the self.files will be overwritten by the input data.

        save_folders: list[str | Path] | str | Path | None, default: None
            List of folders to save images for each PDF, or single folder for all PDFs.
            If None, images will be saved to output_path directly.

        dpi: int, default: 200
            The DPI of the pictures converted from documents.

        format: Literal["png", "jpg", "jpeg"], default: "png"
             The format of the images converted from PDFs.

        image_prefixes: list[str] | str, default: "image"
            List of prefixes for each PDF, or single prefix for all PDFs.

        first_pages: list[int] | int, default: 1
            List of first pages for each PDF, or single first page for all PDFs (1-based).

        last_pages: list[int | None] | int | None, default: None
            List of last pages for each PDF, or single last page for all PDFs (1-based, inclusive).
            If None, convert all pages.

        widths: list[int | None] | int | None, default: None
            List of widths for each PDF, or single width for all PDFs.
            If None, use DPI for scaling.

        heights: list[int | None] | int | None, default: None
            List of heights for each PDF, or single height for all PDFs.
            If None, use DPI for scaling.

        output_path: str | Path | None, default: None
            The base path to save the output images.
            If None, the module will try to use the workspace from the pipeline, else use the current working directory.

        pipeline_workspace_as_output_path: bool, default: True
            If True, the 'workspace' attribute of the pipeline will be used to be the base path of the output images.
            If False, the module's attributes 'output_path' will be used to be the base path of the output images.
        """
        super().__init__(mname=mname, auto_run=auto_run)

        self._ports_in = {
            "InputFiles": Port(
                ptype=[PortType.FilePath, PortType.FilesPath],
                data=files,
                pdoc="The input PDF file(s).",
            )
        }
        self._ports_out = {
            "OutputImages": Port(
                ptype=PortType.FilesPath, pdoc="The output image files."
            )
        }

        self.files = files
        self.save_folders = save_folders
        self.dpi = dpi
        self.format = format
        self.image_prefixes = image_prefixes
        self.first_pages = first_pages
        self.last_pages = last_pages
        self.widths = widths
        self.heights = heights
        self.output_path = output_path
        self.pipeline_workspace_as_output_path = pipeline_workspace_as_output_path

    @property
    def InputFiles(self) -> None:
        raise AttributeError("Property 'InputFiles' is write-only.")

    @InputFiles.setter
    def InputFiles(self, value: PortTypeHint.FilePath | PortTypeHint.FilesPath) -> None:
        self["InputFiles"] = value

    @property
    def OutputImages(self) -> PortTypeHint.FilesPath | None:
        return self["OutputImages"]

    def set_cal_params(self, reset: bool = True) -> dict[str, RangeModel]:
        return {
            "files": RangeModel(
                vtype=("str", "list"),
                list_type="str",
                title="PDF文件路径",
                default=self.files,
            ),
            "image_prefixes": RangeModel(
                vtype=("str", "list"),
                list_type="str",
                title="图片前缀",
                default=self.image_prefixes,
            ),
            "first_pages": RangeModel(
                vtype=("int", "list"),
                list_type="int",
                title="起始页码",
                default=self.first_pages,
            ),
            "last_pages": RangeModel(
                vtype=("int", "list", "None"),
                list_type=("int", "None"),
                title="终止页码",
                default=self.last_pages,
            ),
        }

    def execute(self) -> PortTypeHint.FilesPath | None:
        pdf_files: PortTypeHint.FilePath | PortTypeHint.FilesPath | None = (
            self._ports_in["InputFiles"].data
        )

        if pdf_files is None and self.files is None:
            self._ports_out["OutputImages"].data = None
            return None

        if pdf_files is None:
            pdf_files = self.files
        else:
            self.files = pdf_files

        # Normalize input to list
        if isinstance(pdf_files, str):
            pdf_paths = [pdf_files]
        else:
            pdf_paths = pdf_files

        # Determine output base path
        if self.pipeline_workspace_as_output_path:
            if self.pipeline and self.pipeline.workspace:
                base_output_path = Path(self.pipeline.workspace)
            elif self.output_path is not None:
                base_output_path = Path(self.output_path)
            else:
                base_output_path = Path.cwd()
        else:
            if self.output_path is not None:
                base_output_path = Path(self.output_path)
            elif self.pipeline and self.pipeline.workspace:
                base_output_path = Path(self.pipeline.workspace)
            else:
                base_output_path = Path.cwd()

        # Process save_folders
        if self.save_folders is not None:
            if isinstance(self.save_folders, (str, Path)):
                # Single folder for all PDFs
                actual_save_folders = [
                    Path(base_output_path) / self.save_folders
                ] * len(pdf_paths)
            else:
                # List of folders - verify length matches pdf_paths
                if len(self.save_folders) != len(pdf_paths):
                    raise ValueError(
                        f"Length of save_folders ({len(self.save_folders)}) must match "
                        f"length of pdf_paths ({len(pdf_paths)})"
                    )
                actual_save_folders = [
                    (
                        Path(base_output_path) / folder
                        if folder
                        else Path(base_output_path)
                    )
                    for folder in self.save_folders
                ]
        else:
            # Use base output path for all
            actual_save_folders = [base_output_path] * len(pdf_paths)

        # Convert PDFs to images
        if len(pdf_paths) == 1:
            # Single PDF conversion
            image_paths = convert_pdf_to_images(
                pdf_path=pdf_paths[0],
                output_path=actual_save_folders[0],
                dpi=self.dpi,
                format=self.format,
                image_prefix=(
                    self.image_prefixes
                    if isinstance(self.image_prefixes, str)
                    else self.image_prefixes[0]
                ),
                first_page=(
                    self.first_pages
                    if isinstance(self.first_pages, int)
                    else self.first_pages[0]
                ),
                last_page=(
                    self.last_pages
                    if isinstance(self.last_pages, (int, type(None)))
                    else self.last_pages[0]
                ),
                width=(
                    self.widths
                    if isinstance(self.widths, (int, type(None)))
                    else self.widths[0]
                ),
                height=(
                    self.heights
                    if isinstance(self.heights, (int, type(None)))
                    else self.heights[0]
                ),
            )
        else:
            # Multiple PDFs conversion
            image_paths = convert_multiple_pdfs_to_images(
                pdf_paths=pdf_paths,
                output_paths=actual_save_folders,
                dpi=self.dpi,
                format=self.format,
                image_prefixes=self.image_prefixes,
                first_pages=self.first_pages,
                last_pages=self.last_pages,
                widths=self.widths,
                heights=self.heights,
            )

        self._ports_out["OutputImages"].data = image_paths
        return image_paths


@status_manage
class TableToDataFrame(PipeModule):
    """Convert TableData to DataFrame.

    This module converts a TableData object (which has enhanced metadata like field titles,
    units, descriptions) to a standard pandas DataFrame. This is useful when you need to
    use the data with libraries or functions that expect pure pandas DataFrames.

    Examples
    --------
    Basic usage:
    >>> converter = TableToDataFrame()
    >>> converter.InputTable = my_table_data
    >>> result_df = converter.OutputDataFrame

    Using field titles as column names:
    >>> converter = TableToDataFrame(use_titles_as_columns=True)
    >>> converter.InputTable = my_table_data
    >>> result_df = converter.OutputDataFrame  # DataFrame with human-readable column names

    Resetting index:
    >>> converter = TableToDataFrame(preserve_index=False)
    >>> converter.InputTable = my_table_data
    >>> result_df = converter.OutputDataFrame  # DataFrame with default 0, 1, 2... index
    """

    def __init__(
        self,
        mname: str = "TableToDataFrame",
        auto_run: bool = True,
        table: PortTypeHint.TableData | None = None,
        use_titles_as_columns: bool = False,
        preserve_index: bool = True,
    ) -> None:
        """Initialize a TableToDataFrame object.

        Parameters
        ----------
        table: PortTypeHint.TableData | None, default: None
            The input TableData to convert.

        use_titles_as_columns: bool, default: False
            If True, use field titles as column names in the resulting DataFrame.
            If False, use the original column names.

        preserve_index: bool, default: True
            If True, preserve the original index of the TableData.
            If False, reset the index to default range index.
        """
        super().__init__(mname=mname, auto_run=auto_run)

        self._ports_in = {
            "InputTable": Port(
                ptype=PortType.TableData, data=table, pdoc="The input TableData."
            )
        }
        self._ports_out = {
            "OutputGeneralTable": Port(
                ptype=PortType.GeneralTable, pdoc="The output pandas DataFrame."
            )
        }

        self.use_titles_as_columns = use_titles_as_columns
        self.preserve_index = preserve_index

    @property
    def InputTable(self) -> None:
        raise AttributeError("Property 'InputTable' is write-only.")

    @InputTable.setter
    def InputTable(self, value: PortTypeHint.TableData | None) -> None:
        self["InputTable"] = value

    @property
    def OutputGeneralTable(self) -> PortTypeHint.GeneralTable | None:
        return self["OutputGeneralTable"]

    def set_cal_params(self, reset: bool = True) -> dict[str, RangeModel]:
        return {
            "use_titles_as_columns": RangeModel(
                vtype="bool",
                title="使用字段标题作为列名",
                default=self.use_titles_as_columns,
            ),
            "preserve_index": RangeModel(
                vtype="bool", title="保留原始索引", default=self.preserve_index
            ),
        }

    def execute(self) -> PortTypeHint.GeneralTable | None:
        table: PortTypeHint.TableData | None = self._ports_in["InputTable"].data

        if table is None:
            self._ports_out["OutputGeneralTable"].data = None
            return None

        # Create a copy to avoid modifying the original
        result_table = table.copy()

        # Rename columns to titles if requested
        if self.use_titles_as_columns:
            result_table.rename_columns_to_titles(inplace=True)

        # Convert to pure pandas DataFrame
        result_df = pd.DataFrame(result_table)

        # Handle index preservation
        if not self.preserve_index:
            result_df = result_df.reset_index(drop=True)

        self._ports_out["OutputGeneralTable"].data = result_df
        return result_df


@module_decorator()
class TableToSingleResult(PipeModule):
    """Convert a specified row or column of a TableData to a SingleResult.

    This module extracts values from a specified row or column in a TableData and converts them
    to a SingleResult object. The output can be in two forms:
    1. Single UnitResult with value as a list containing all row or column values
    2. Multiple UnitResult objects, each containing one row or column value

    Examples
    --------
    Single list result:
    >>> converter = TableToSingleResult(axis="row", row_index=0, result_mode="single")
    >>> converter.InputTable = my_table_data
    >>> result = converter.OutputSingleResult  # SingleResult with one UnitResult containing list of all values in current row

    Multiple individual results:
    >>> converter = TableToSingleResult(axis="column", column_index=0, result_mode="multiple")
    >>> converter.InputTable = my_table_data
    >>> result = converter.OutputSingleResult  # SingleResult with multiple UnitResults, one per row
    """

    InputTable: PortReference[PortTypeHint.TableData]
    OutputSingleResult: PortReference[PortTypeHint.SingleResult]

    _port_docs = {
        "InputTable": "The input TableData.",
        "OutputSingleResult": "The output SingleResult.",
    }

    def __init__(
        self,
        mname: str = "TableToSingleResult",
        auto_run: bool = True,
        table: PortTypeHint.TableData | None = None,
        axis: Literal["row", "column"] = "row",
        row_index: str | int = 0,
        column_index: str | int = 0,
        result_mode: Literal["single", "multiple"] = "multiple",
        single_result_name: str | None = None,
        single_result_title: str | None = None,
        multiple_result_prefix: str = "",
        use_row_index: bool = True,
    ) -> None:
        """Initialize a TableToSingleResult object.

        Parameters
        ----------
        table: PortTypeHint.TableData | None, default: None
            The input TableData to convert.

        axis: Literal["row", "column"], default: "row"
            The axis to extract the data from.
            - "row": Extract the data from one row.
            - "column": Extract the data from one column.

        row_index: str | int, default: 0
            The index of the row to extract the data from.
            If str, the index of the table will be used.
            If int, the index number of the row will be used.

        column_index: str | int, default: 0
            The index of the column to extract the data from.
            If str, the column name or title of the table will be used.
            If int, the index number of the column will be used.

        result_mode: Literal["single", "multiple"], default: "multiple"
            Mode of conversion:
            - "single": Convert all values of the specified row or column to a list and saved in a UnitResult.
            - "multiple": Create multiple UnitResult objects, one per column or one per row.

        single_result_name: str | None, default: None
            Name for the single UnitResult when result_mode="single".
            If None,
                axis="row": uses the row index.
                axis="column": uses the column name.

        single_result_title: str | None, default: None
            Title for the single UnitResult when result_mode="single".
            If None,
                axis="row": uses the row index.
                axis="column": uses the column title from metadata.

        multiple_result_prefix: str | None, default: None
            Prefix for naming multiple UnitResults when result_mode="multiple".
            When axis="row", Results will be named as "{prefix}_{column_name}". If None, uses the column name and title.
            When axis="column", Results will be named as "{prefix}_{sequential_number}" or "{prefix}_{row_index}". If None, uses the row index.

        use_row_index: bool, default: True
            Only valid when result_mode="multiple" and axis="column".
            If True, uses the row index in result names.
            If False, uses sequential numbering starting from 0.
        """
        super().__init__(mname=mname, auto_run=auto_run)

        if table is not None:
            self.InputTable = table

        self.axis = axis
        self.row_index = row_index
        self.column_index = column_index
        self.result_mode = result_mode
        self.single_result_name = single_result_name
        self.single_result_title = single_result_title
        self.multiple_result_prefix = multiple_result_prefix
        self.use_row_index = use_row_index

    def execute(self) -> PortTypeHint.SingleResult | None:
        table: PortTypeHint.TableData | None = self._ports_in["InputTable"].data

        if table is None:
            self._ports_out["OutputSingleResult"].data = None
            return None

        fields_metadata = table.fields_meta

        if self.axis == "row":
            # Extract data from a specific row
            if isinstance(self.row_index, str):
                data_series = table.loc[self.row_index]
            else:
                data_series = table.iloc[self.row_index]
            # Single result
            if self.result_mode == "single":
                data_values = data_series.tolist()
                if self.single_result_name is None:
                    name = str(self.row_index)
                else:
                    name = self.single_result_name
                if self.single_result_title is None:
                    title = f"{self.row_index} 行"
                else:
                    title = self.single_result_title

                unit_result = UnitResult(
                    name=name,
                    title=title,
                    unit=Units.UNITLESS,
                    value=data_values,
                    description=f"Values from row {self.row_index}",
                )
                single_result = SingleResult([unit_result])
            # Multiple result
            elif self.result_mode == "multiple":
                unit_results = []
                for column_name, value in data_series.items():
                    column_title = fields_metadata[column_name].title
                    if self.multiple_result_prefix:
                        name = f"{self.multiple_result_prefix}_{column_name}"
                        title = f"{self.multiple_result_prefix}_{column_title}"
                    else:
                        name = column_name
                        title = column_title
                    unit = fields_metadata[column_name].unit
                    description = fields_metadata[column_name].description
                    unit_result = UnitResult(
                        name=name,
                        title=title,
                        value=value,
                        unit=unit,
                        description=description,
                    )
                    unit_results.append(unit_result)
                single_result = SingleResult(unit_results)
            else:
                raise ValueError(
                    f"Invalid result_mode: {self.result_mode}. Must be 'single' or 'multiple'."
                )

        elif self.axis == "column":
            # Extract data from a specific column
            if isinstance(self.column_index, str):
                data_series = table[self.column_index]
                field_meta = table.get_field_metadata(self.column_index)
            else:
                data_series = table.iloc[:, self.column_index]
                field_meta = fields_metadata[table.field_names[self.column_index]]

            # Single result
            if self.result_mode == "single":
                data_values = data_series.tolist()
                if self.single_result_name is None:
                    if isinstance(self.column_index, int):
                        name = str(self.column_index)
                    else:
                        if self.column_index in table.field_names:
                            name = self.column_index
                        else:
                            name = table.title_to_name[self.column_index]
                else:
                    name = self.single_result_name
                if self.single_result_title is None:
                    if isinstance(self.column_index, int):
                        title = f"{self.column_index} 列"
                    else:
                        if self.column_index in table.field_titles:
                            title = self.column_index
                        else:
                            title = table.name_to_title[self.column_index]
                else:
                    title = self.single_result_title

                unit_result = UnitResult(
                    name=name,
                    title=title,
                    unit=field_meta.unit,
                    value=data_values,
                    description=field_meta.description,
                )
                single_result = SingleResult([unit_result])
            # Multiple result
            elif self.result_mode == "multiple":
                unit_results = []
                i = 0
                for index, value in data_series.items():
                    if self.multiple_result_prefix:
                        prefix = f"{self.multiple_result_prefix}_"
                    else:
                        prefix = ""
                    if self.use_row_index:
                        name = f"{prefix}{index}"
                        title = f"{prefix}({index} 行)"
                    else:
                        name = f"{prefix}_{i}"
                        title = f"{prefix}({i} 行)"

                    unit_result = UnitResult(
                        name=name,
                        title=title,
                        value=value,
                        unit=field_meta.unit,
                        description=field_meta.description,
                    )
                    unit_results.append(unit_result)
                single_result = SingleResult(unit_results)
        else:
            raise ValueError(f"Invalid axis: {self.axis}. Must be 'row' or 'column'.")

        self._ports_out["OutputSingleResult"].data = single_result
        return single_result


@status_manage
class SingleResultToTable(PipeModule):
    """Convert a SingleResult to a TableData.

    This module takes a SingleResult object and converts it to a TableData object
    where each field in the SingleResult becomes a column in the table, and the
    values are stored in a single row.

    Examples
    --------
    >>> converter = SingleResultToTable()
    >>> converter.InputSingleResult = my_single_result
    >>> table_data = converter.OutputTable  # TableData with one row containing all values
    """

    def __init__(
        self,
        mname: str = "SingleResultToTable",
        auto_run: bool = True,
        single_result: PortTypeHint.SingleResult | None = None,
        table_name: str | None = None,
        table_title: str | None = None,
        joiner: str = ",",
    ) -> None:
        """Initialize a SingleResultToTable object.

        Parameters
        ----------
        single_result: PortTypeHint.SingleResult | None, default: None
            The input SingleResult to convert.

        table_name: str | None, default: None
            The name of the table.
            If None, default name "single_result" will be used.

        table_title: str | None, default: None
            The title of the table.
            If None, default title "single_result" will be used.

        joiner: str, default: ","
            The string used to join list values into a single string.
        """
        super().__init__(mname=mname, auto_run=auto_run)

        self._ports_in = {
            "InputSingleResult": Port(
                ptype=PortType.SingleResult,
                data=single_result,
                pdoc="The input SingleResult.",
            )
        }
        self._ports_out = {
            "OutputTable": Port(ptype=PortType.TableData, pdoc="The output TableData.")
        }

        self.table_name = table_name
        self.table_title = table_title
        self.joiner = joiner

    @property
    def InputSingleResult(self) -> None:
        raise AttributeError("Property 'InputSingleResult' is write-only.")

    @InputSingleResult.setter
    def InputSingleResult(self, value: PortTypeHint.SingleResult | None) -> None:
        self["InputSingleResult"] = value

    @property
    def OutputTable(self) -> PortTypeHint.TableData | None:
        return self["OutputTable"]

    def set_cal_params(self, reset: bool = True) -> dict[str, RangeModel]:
        return {
            "joiner": RangeModel(
                vtype="str", title="列表值连接符", default=self.joiner, visible=False
            )
        }

    def execute(self) -> PortTypeHint.TableData | None:
        single_result: PortTypeHint.SingleResult | None = self._ports_in[
            "InputSingleResult"
        ].data

        if single_result is None:
            self._ports_out["OutputTable"].data = None
            return None

        # Use the convert_to_table_data method from SingleResult
        table_data = single_result.convert_to_table_data(joiner=self.joiner)
        if self.table_name is not None:
            table_data.name = self.table_name
        if self.table_title is not None:
            table_data.title = self.table_title

        self._ports_out["OutputTable"].data = table_data
        return table_data


@module_decorator()
class VrtToGeoTiff(PipeModule):
    """Convert a VRT file to a GeoTIFF or PNG file with metadata."""

    OutputFiles: PortReference[PortTypeHint.FilesPath | PortTypeHint.FilePath]

    _port_docs = {
        "OutputFiles": "The output GeoTIFF or PNG files.",
    }

    def __init__(
        self,
        mname: str = "VrtToGeoTiff",
        auto_run: bool = True,
        vrt_file: str | Path | None = None,
        directory_path: str | Path | None = None,
        output_path: str | Path | None = None,
        colormap: str | dict | None = None,
        vrt_file_pattern: str | None = None,
        time_info_pattern: str | None = None,
        format: Literal["geotiff", "png"] = "geotiff",
        compress: str = "lzw",
        predictor: int = 2,
    ) -> None:
        """Initialize a VrtToGeoTiff object.

        Parameters
        ----------
        vrt_file: str | Path | None, default: None
            Path to a single VRT file. Mutually exclusive with directory_path.

        directory_path: str | Path | None, default: None
            Path to directory containing VRT files for time series processing.
            Mutually exclusive with vrt_file.

        output_path: str | Path | None, default: None
            Output directory for generated files.
            If None, creates output in same directory as input with format-specific suffix.

        colormap: str | dict | None, default: None
            Colormap specification. Can be:
            - **Plotly colorscale name**: "viridis", "plasma", "inferno", "magma", "cividis",
              "turbo", "rainbow", "jet", "hot", "cool", "RdYlBu", "RdBu", "Spectral", etc.
            - **Custom dict**: {value: [r, g, b, a]} where values are 0-1 floats or 0-255 integers
              Example: {0.0: [0, 0, 255, 255], 0.5: [255, 255, 0, 255], 1.0: [255, 0, 0, 255]}
            - **None**: Uses default "viridis" colorscale

        vrt_file_pattern: str | None, default: None
            File pattern to match when processing directory (e.g., "*.vrt", "Depth*.vrt").
            Only used when directory_path is provided.

        time_info_pattern: str | None, default: None
            Regex pattern to extract time information from filename
            If None, uses default patterns

        format: Literal["geotiff", "png"] = "geotiff", default: "geotiff"
            Output format:
            - **"geotiff"**: Generates .tif files with embedded geospatial metadata
            - **"png"**: Generates .png files with separate .pgw (world file), .prj (projection),
              and metadata.json files for GIS compatibility

        compress: str = "lzw", default: "lzw"
            Compression method for GeoTIFF. Only used when format="geotiff".

            Options and recommendations:
            - **"lzw"** (default): Lossless, good compression ratio, widely supported.
              Best for: Most raster data, scientific data, when file size matters.
            - **"deflate"**: Lossless, similar to LZW but sometimes better compression.
              Best for: Alternative to LZW, some software prefers deflate.
            - **"jpeg"**: Lossy compression, smallest files but quality loss.
              Best for: RGB imagery where small quality loss is acceptable.
              Note: Not suitable for scientific data or single-band rasters.
            - **"none"**: No compression, largest files but fastest access.
              Best for: When processing speed is critical, temporary files.

        predictor: int = 2, default: 2
            Predictor for lossless compression optimization. Only used when format="geotiff"
            and compress is "lzw" or "deflate".

            Options and recommendations:
            - **1** (none): No prediction, compress data as-is.
              Best for: Random/noisy data, already compressed data.
            - **2** (horizontal differencing, default): Stores differences between adjacent pixels.
              Best for: Most imagery, gradual spatial changes, typical raster data.
              Typical improvement: 20-50% better compression for spatial data.
            - **3** (floating point): Optimized for floating-point scientific data.
              Best for: Elevation models, temperature data, continuous surfaces.
              Typical improvement: 40-60% better compression for float32/64 data.

        Ports
        -----
        OutputFiles: PortReference[PortTypeHint.FilesPath | PortTypeHint.FilePath]
        """
        super().__init__(mname=mname, auto_run=auto_run)

        self.vrt_file = vrt_file
        self.directory_path = directory_path
        self.output_path = output_path
        self.colormap = colormap
        self.vrt_file_pattern = vrt_file_pattern
        self.time_info_pattern = time_info_pattern
        self.format: Literal["geotiff", "png"] = format
        self.compress = compress
        self.predictor = predictor

    def update_ui_schema(self, reset: bool = False) -> dict[str, UIAttributeSchema]:
        return {
            "directory_path": StringAttributeSchema(title="目录路径"),
            "colormap": StringAttributeSchema(title="色带名称"),
        }

    def execute(self) -> PortTypeHint.FilesPath | PortTypeHint.FilePath | None:
        if self.vrt_file is None and self.directory_path is None:
            self._ports_out["OutputFiles"].data = None
            return None

        vrf_handler = VrtHandler(
            time_info_pattern=self.time_info_pattern,
        )

        generated_files = vrf_handler.generate_geotiff(
            vrt_path=self.vrt_file,
            directory_path=self.directory_path,
            output_path=self.output_path,
            colormap=self.colormap,
            pattern=self.vrt_file_pattern,
            format=self.format,
            compress=self.compress,
            predictor=self.predictor,
        )

        if len(generated_files) == 1:
            self._ports_out["OutputFiles"].data = generated_files[0]
            return generated_files[0]
        else:
            self._ports_out["OutputFiles"].data = generated_files
            return generated_files  # type: ignore


#########################
# --- Legacy Modules ---
#########################


@module_decorator()
class ConvertToBoreForCadDraw(PipeModule):
    """Convert the input table collection to a BoreForCadDraw object and save it to .gsc file
    which can be used for cad drawing (钻孔柱状图)."""

    InputTables: PortReference[PortTypeHint.TableCollection]
    InputGeoParamsTable: PortReference[PortTypeHint.TableData]
    InputProjectInfo: PortReference[PortTypeHint.SingleResult]
    InputToken: PortReference[PortTypeHint.Token]
    OutputBoreForCadDraw: PortReference[PortTypeHint.BoreForCadDraw]
    OutputFile: PortReference[PortTypeHint.FilePath | PortTypeHint.GdimFile]

    _port_docs = {
        "InputTables": "The input table collection containing bore, layer, and lab test data etc.",
        "InputGeoParamsTable": "The input geo params table.",
        "InputProjectInfo": "The input project info.",
        "OutputBoreForCadDraw": "The output BoreForCadDraw object.",
        "OutputFile": "The output gsc file.",
    }

    def __init__(
        self,
        mname: str | None = "ConvertToBoreForCadDraw",
        auto_run: bool = True,
        tables: PortTypeHint.TableCollection | None = None,
        geo_params_table: PortTypeHint.TableData | None = None,
        proj_info: PortTypeHint.SingleResult | None = None,
        name_maps: dict[str, dict[str, str]] | None = None,
        selected_bores: list[str] | None = None,
        drawing_scales: dict[str, int] | list[dict[str, str | int]] | int | None = None,
        proj_info_name_map: dict[str, str] | None = None,
        sample_types_map: dict[str, int] | None = None,
        output_path: str | Path | None = None,
        gsc_file_name: str = "bore_for_cad_draw.gsc",
        pipeline_workspace_as_output_path: bool = True,
        token: str | None = None,
        proj_id: int | str | None = None,
        save_to_gdim: bool = False,
    ) -> None:
        """Initialize a ConvertToBoreForCadDraw object.

        Parameters
        ----------
        tables: PortTypeHint.TableCollection | None, default: None
            The input table collection containing bore, layer, and test data etc.
            Here are the table names:
            bore_table: 钻孔一览表
            layer_table: 地层表
            materials_table: 标准地层表
            spt_table: 标贯表
            cpt_table: 双桥静探表
            dpt_table: 动探表
            wave_table: 波速表
            samples_table: 取样表
            soils_test_table: 常规试验表

        geo_params_table: PortTypeHint.TableData | None, default: None
            The input table data containing geo parameters data from Gdim App '岩土参数建议值表'.
            geo_parameters_table: 岩土参数建议值表

        proj_info: PortTypeHint.SingleResult | None, default: None
            The input single result containing project info data.

        name_maps: dict[str, dict[str, str]] | None, default: None
            Mapping of table names and field names. Structure:
            {
                "table_names": {"bore_table": "actual_bore_table_name", ...},
                "field_names": {"bore_table": {"bore_num": "actual_bore_num_field", ...}, ...}
            }

        selected_bores: list[str] | None, default: None
            List of bores (input bore numbers) to convert. If None, all bores will be converted.

        drawing_scales: dict[str, int] | int | None, default: None
            Drawing scales for bores.
            If int, it will be used for all bores.
            If dict, the key is bore number, the value is drawing scale.
            If list, use the following format -
                `[{"bore_num": "num_1", "drawing_scales": 200}, {"bore_num": "num_2", "drawing_scales": 300}]`

        proj_info_name_map: dict[str, str] | None, default: None
            Mapping for project info fields from proj_info to ProjectInfo.

        sample_types_map: dict[str, int] | None, default: None
            Mapping from string sample type names to integer codes.
            For example: {"厚壁原状": 0, "薄壁原状": 0, "扰动样": 1, "岩石样": 2, "水样": 3}

        output_path: str | Path | None, default: None
            The path to save the output gsc file.
            If None, the module will try to use the workspace from the pipeline, else use the current working directory.

        gsc_file_name: str, default: "bore_for_cad_draw.gsc"
            The name of the gsc file.

        pipeline_workspace_as_output_path: bool, default: True
            If True, the 'workspace' attribute of the pipeline will be used to find the output_path at first.
            If False, the module's attributes 'output_path' will be used to find the output_path at first.

        token: str | None
            The token of the user.
            It's valid only when `save_to_gdim` is True.

        proj_id: int | str | None
            The id of the gdim project.
            It's valid only when `save_to_gdim` is True.

        save_to_gdim: bool, default: False
            If True, the generated .gsc file will be saved to the gdim file server.
            If False, the generated .gsc file will be saved to the local file system.
        """
        super().__init__(mname=mname, auto_run=auto_run)

        if tables is not None:
            self.InputTables = tables
        if geo_params_table is not None:
            self.InputGeoParamsTable = geo_params_table
        if proj_info is not None:
            self.InputProjectInfo = proj_info
        if token is not None or proj_id is not None:
            self.InputToken = (token, proj_id)

        self.name_maps = name_maps or {}
        self.selected_bores = selected_bores
        self.drawing_scales = drawing_scales
        self.proj_info_name_map = proj_info_name_map
        self.sample_types_map = sample_types_map
        self.output_path = output_path
        self.gsc_file_name = gsc_file_name
        self.pipeline_workspace_as_output_path = pipeline_workspace_as_output_path
        self.save_to_gdim = save_to_gdim

    def update_ui_schema(self, reset: bool = False) -> dict[str, UIAttributeSchema]:
        tables: PortTypeHint.TableCollection | None = self._ports_in["InputTables"].data

        bore_num_selections = None
        if tables is not None:
            # Try to get bore numbers from bore table
            bore_table_name = self.name_maps.get("table_names", {}).get(
                "bore_table", "bore_table"
            )
            if bore_table_name in tables:
                bore_table = tables.get_table(bore_table_name)
                bore_num_field = (
                    self.name_maps.get("field_names", {})
                    .get("bore_table", {})
                    .get("bore_num", "bore_num")
                )
                if (
                    bore_num_field in bore_table.columns
                    or bore_num_field in bore_table.field_titles
                ):
                    bore_num_selections = bore_table[bore_num_field].unique().tolist()

        selected_bores_schema = ArrayAttributeSchema(
            title="选择钻孔",
            selections=bore_num_selections,
            items=StringAttributeSchema(),
            depends_on="InputTables",
        )
        selected_bores_schema.widget = "select"

        bore_num_column = StringAttributeSchema(title="钻孔编号", readonly=True)
        drawing_scales_column = IntegerAttributeSchema(
            title="柱状图比例", default=200, minimum=1, maximum=1000000
        )
        if self.selected_bores is None:
            min_rows = 0
            max_rows = 0
        else:
            bore_num_column.default = [
                selected_bore for selected_bore in self.selected_bores
            ]
            min_rows = len(self.selected_bores)
            max_rows = min_rows

        widget_attributes = WidgetAttribute(table_enable_set_all_rows=True)
        drawing_scales_schema = TableAttributeSchema(
            title="设置柱状图比例",
            columns=[bore_num_column, drawing_scales_column],
            columns_name=["bore_num", "drawing_scales"],
            min_rows=min_rows,
            max_rows=max_rows,
            widget_attributes=widget_attributes,
            depends_on="selected_bores",
        )

        gsc_file_name_schema = StringAttributeSchema(
            title="GSC文件名",
            default="bore_for_cad_draw.gsc",
        )

        return {
            "selected_bores": selected_bores_schema,
            "drawing_scales": drawing_scales_schema,
            "gsc_file_name": gsc_file_name_schema,
        }

    def set_cal_params(self, reset: bool = True) -> dict[str, RangeModel]:
        tables: PortTypeHint.TableCollection | None = self._ports_in["InputTables"].data
        if tables is None:
            bore_num_choices = None
        else:
            bore_num_choices = None
            # Try to get bore numbers from bore table
            bore_table_name = self.name_maps.get("table_names", {}).get(
                "bore_table", "bore_table"
            )
            if bore_table_name in tables:
                bore_table = tables.get_table(bore_table_name)
                bore_num_field = (
                    self.name_maps.get("field_names", {})
                    .get("bore_table", {})
                    .get("bore_num", "bore_num")
                )
                if (
                    bore_num_field in bore_table.columns
                    or bore_num_field in bore_table.field_titles
                ):
                    bore_num_choices = bore_table[bore_num_field].unique().tolist()

        values_range = {
            "selected_bores": RangeModel(
                vtype="list",
                title="选择钻孔",
                list_type="str",
                choices=bore_num_choices,
                default=self.selected_bores,
            ),
            "drawing_scales": RangeModel(
                vtype="dict",
                title="柱状图比例",
                dict_key_choices=bore_num_choices,
                dict_value_type="int",
                minmax=(0, None),
                include_min=False,
                default=self.drawing_scales,
            ),
        }
        return values_range

    def execute(self) -> BoreForCadDraw | None:
        tables: PortTypeHint.TableCollection | None = self._ports_in["InputTables"].data
        geo_params_table: PortTypeHint.TableData | None = self._ports_in[
            "InputGeoParamsTable"
        ].data
        proj_info_data: PortTypeHint.SingleResult | None = self._ports_in[
            "InputProjectInfo"
        ].data
        input_token: PortTypeHint.Token | None = self._ports_in["InputToken"].data

        if not tables or proj_info_data is None:
            self._ports_out["OutputBoreForCadDraw"].data = None
            return None

        if self.save_to_gdim:
            if input_token is None:
                self._ports_out["OutputFile"].data = None
                return None
            else:
                token, proj_id = input_token
                if token is None or proj_id is None:
                    self._ports_out["OutputFile"].data = None
                    return None

        # Convert project info
        project_info = convert_to_project_info(proj_info_data, self.proj_info_name_map)

        tables = tables.copy()
        # Add geo parameters table to tables
        if geo_params_table is not None:
            tables.add_table(geo_params_table)
        updated_tables = tables

        # Add drawing_scales to tables if provided
        if self.drawing_scales is not None:
            if isinstance(self.drawing_scales, list):
                drawing_scales = {
                    row["bore_num"]: row["drawing_scales"]
                    for row in self.drawing_scales
                }
            else:
                drawing_scales = self.drawing_scales
            # Add drawing_scales to bore table
            bore_table_name = self.name_maps.get("table_names", {}).get(
                "bore_table", "bore_table"
            )
            if bore_table_name in tables:
                bore_table = tables.get_table(bore_table_name).copy()
                bore_num_field = (
                    self.name_maps.get("field_names", {})
                    .get("bore_table", {})
                    .get("bore_num", "bore_num")
                )

                if isinstance(self.drawing_scales, int):
                    # Single scale for all bores
                    bore_table["drawing_scales"] = self.drawing_scales
                elif isinstance(self.drawing_scales, dict):
                    # Different scales for different bores
                    bore_table["drawing_scales"] = (
                        bore_table[bore_num_field]
                        .map(self.drawing_scales)
                        .fillna(self.drawing_scales.get("default", 1))
                    )

                # Update the table in the collection for both int and dict cases
                updated_tables = TableCollection(
                    name=tables.name, title=tables.title, description=tables.description
                )
                for table in tables:
                    if table.name == bore_table_name or table.title == bore_table_name:
                        updated_tables.add_table(bore_table)
                    else:
                        updated_tables.add_table(table)

        # Convert to section bores and materials
        section_bores, materials = convert_to_section_bores_with_matrials(
            tables=updated_tables,
            name_maps=self.name_maps,
            selected_bore_nums=self.selected_bores,
            sample_types_map=self.sample_types_map,
        )

        # Create BoreForCadDraw object
        bore_for_cad_draw = BoreForCadDraw(
            project_infos=project_info, bores=section_bores, materials=materials
        )

        # Save .gsc files
        if self.save_to_gdim:
            output_path = None
            pipeline_workspace_as_output_path = True
        else:
            output_path = self.output_path
            pipeline_workspace_as_output_path = self.pipeline_workspace_as_output_path

        if pipeline_workspace_as_output_path:
            # Try to find the output_path from the pipeline at first
            if self.pipeline and self.pipeline.workspace:
                final_output_path = self.pipeline.workspace
            elif output_path:
                final_output_path = output_path
            else:
                final_output_path = Path.cwd()
        else:
            # Try to find the output_path from the module at first
            if output_path:
                final_output_path = output_path
            elif self.pipeline and self.pipeline.workspace:
                final_output_path = self.pipeline.workspace
            else:
                final_output_path = Path.cwd()

        output_gsc_file = Path(final_output_path) / self.gsc_file_name
        with zipfile.ZipFile(output_gsc_file, "w", zipfile.ZIP_DEFLATED) as zipf:
            zipf.writestr("bores_log.json", bore_for_cad_draw.model_dump_json())

        if self.save_to_gdim:
            if output_gsc_file.exists():
                gdim_file = upload_file_to_gdim(
                    user_token=token,
                    pid=proj_id,
                    file_path=output_gsc_file,
                    upload_type="temp",
                )
                self._ports_out["OutputFile"].data = gdim_file
            else:
                self._ports_out["OutputFile"].data = None
        else:
            self._ports_out["OutputFile"].data = str(output_gsc_file)

        self._ports_out["OutputBoreForCadDraw"].data = bore_for_cad_draw
        return bore_for_cad_draw


@module_decorator()
class ConvertToBoreForPlanDraw(PipeModule):
    """Convert the input table collection to a BoreForPlanDraw object and save it to .gsc file
    which can be used for bores plan drawing (钻孔平面布置图)."""

    InputTables: PortReference[PortTypeHint.TableCollection]
    InputProjectInfo: PortReference[PortTypeHint.SingleResult]
    InputCoordinateSystem: PortReference[PortTypeHint.CoordinateSystem]
    InputToken: PortReference[PortTypeHint.Token]
    OutputBoreForPlanDraw: PortReference[PortTypeHint.BoreForPlanDraw]
    OutputFile: PortReference[PortTypeHint.FilePath | PortTypeHint.GdimFile]

    _port_docs = {
        "InputTables": "The input table collection containing bore and section line data.",
        "InputProjectInfo": "The input project info.",
        "InputCoordinateSystem": "The input coordinate system.",
        "OutputBoreForPlanDraw": "The output BoreForPlanDraw object.",
        "OutputFile": "The output gsc file.",
    }

    def __init__(
        self,
        mname: str | None = "ConvertToBorePlanForCad",
        auto_run: bool = True,
        tables: PortTypeHint.TableCollection | None = None,
        proj_info: PortTypeHint.SingleResult | None = None,
        coordinate_system: PortTypeHint.SingleResult | None = None,
        name_maps: dict[str, dict[str, str]] | None = None,
        bore_types_map: dict[str, BoreTypes] | None = None,
        proj_info_name_map: dict[str, str] | None = None,
        output_path: str | Path | None = None,
        gsc_file_name: str = "bore_plan_for_cad_draw.gsc",
        pipeline_workspace_as_output_path: bool = True,
        token: str | None = None,
        proj_id: int | str | None = None,
        save_to_gdim: bool = False,
    ) -> None:
        """Initialize a ConvertToBorePlanForCad object.

        Parameters
        ----------
        tables: PortTypeHint.TableCollection | None, default: None
            The input table collection containing bore and section line data.
            Here are the table names:
            bore_table: 钻孔一览表
            section_line_table: 剖面线表

        proj_info: PortTypeHint.SingleResult | None, default: None
            The input single result containing project info data.

        name_maps: dict[str, dict[str, str]] | None, default: None
            Mapping of table names and field names. Structure:
            {
                "table_names": {"bore_table": "actual_bore_table_name", "section_line_table": "actual_section_line_table_name"},
                "field_names": {
                    "bore_table": {"bore_num": "actual_bore_num_field",
                    "x": "actual_x_field",
                    "y": "actual_y_field",
                    "bore_type": "actual_bore_type_field",
                    "top": "actual_top_field"},
                    "section_line_table": {"name": "actual_name_field", "bores": "actual_bores_field"}
                }
            }

        bore_types_map: dict[str, BoreTypes] | None, default: None
            Mapping from string bore type names to BoreTypes enum.
            For example: {"鉴别孔": BoreTypes.IdentificationBore, "取土试样钻孔": BoreTypes.SoilSamplingBore}
            If the map is not specified, will try to compare to the names and titles of BoreTypes automatically.

        proj_info_name_map: dict[str, str] | None, default: None
            Mapping for project info fields from proj_info to ProjectInfo.

        output_path: str | Path | None, default: None
            The path to save the output gsc file.
            If None, the module will try to use the workspace from the pipe line, else use the current working directory.

        gsc_file_name: str, default: "bore_plan_for_cad_draw.gsc"
            The name of the gsc file. If None, the gsc file will not be saved.

        pipeline_workspace_as_output_path: bool, default: True
            If True, the 'workspace' attribute of the pipeline will be used to find the output_path at first.
            If False, the module's attributes 'output_path' will be used to find the output_path at first.

        token: str | None
            The token of the user.
            It's valid only when `save_to_gdim` is True.

        proj_id: int | str | None
            The id of the gdim project.
            It's valid only when `save_to_gdim` is True.

        save_to_gdim: bool, default: False
            If True, the generated .gsc file will be saved to the gdim file server.
            If False, the generated .gsc file will be saved to the local file system.
        """
        super().__init__(mname=mname, auto_run=auto_run)

        if tables is not None:
            self.InputTables = tables
        if proj_info is not None:
            self.InputProjectInfo = proj_info
        if coordinate_system is not None:
            self.InputCoordinateSystem = coordinate_system
        if token is not None or proj_id is not None:
            self.InputToken = (token, proj_id)

        self.name_maps = name_maps or {}
        self.bore_types_map = bore_types_map
        self.proj_info_name_map = proj_info_name_map
        self.output_path = output_path
        self.gsc_file_name = gsc_file_name
        self.pipeline_workspace_as_output_path = pipeline_workspace_as_output_path
        self.save_to_gdim = save_to_gdim

    def update_ui_schema(self, reset: bool = False) -> dict[str, UIAttributeSchema]:
        return {
            "gsc_file_name": StringAttributeSchema(
                title="GSC文件名",
                default="bore_for_cad_draw.gsc",
            ),
        }

    def execute(self) -> BoreForPlanDraw | None:
        tables: PortTypeHint.TableCollection | None = self._ports_in["InputTables"].data
        proj_info_data: PortTypeHint.SingleResult | None = self._ports_in[
            "InputProjectInfo"
        ].data
        coordinate_system_data: PortTypeHint.CoordinateSystem | None = self._ports_in[
            "InputCoordinateSystem"
        ].data
        input_token: PortTypeHint.Token | None = self._ports_in["InputToken"].data

        if not tables or proj_info_data is None or coordinate_system_data is None:
            self._ports_out["OutputBoreForPlanDraw"].data = None
            return None

        if self.save_to_gdim:
            if input_token is None:
                self._ports_out["OutputFile"].data = None
                return None
            else:
                token, proj_id = input_token
                if token is None or proj_id is None:
                    self._ports_out["OutputFile"].data = None
                    return None

        # Convert project info
        project_info = convert_to_project_info(proj_info_data, self.proj_info_name_map)

        # Convert to section bores and section lines
        section_bores, section_lines = convert_to_plan_bores(
            tables=tables, name_maps=self.name_maps, bore_types_map=self.bore_types_map
        )

        # Create BoreForPlanDraw object
        y_direction = coordinate_system_data.yAxisDirection
        if y_direction == "north":
            y_direction = "N"
        elif y_direction == "east":
            y_direction = "E"
        bore_for_plan_draw = BoreForPlanDraw(
            project_infos=project_info,
            bores=section_bores,
            scection_lines=section_lines,
            y_direction=y_direction,
        )

        # Save .gsc files
        if self.save_to_gdim:
            output_path = None
            pipeline_workspace_as_output_path = True
        else:
            output_path = self.output_path
            pipeline_workspace_as_output_path = self.pipeline_workspace_as_output_path

        if pipeline_workspace_as_output_path:
            # Try to find the output_path from the pipeline at first
            if self.pipeline and self.pipeline.workspace:
                final_output_path = self.pipeline.workspace
            elif output_path:
                final_output_path = output_path
            else:
                final_output_path = Path.cwd()
        else:
            # Try to find the output_path from the module at first
            if output_path:
                final_output_path = output_path
            elif self.pipeline and self.pipeline.workspace:
                final_output_path = self.pipeline.workspace
            else:
                final_output_path = Path.cwd()

        output_gsc_file = Path(final_output_path) / self.gsc_file_name
        with zipfile.ZipFile(output_gsc_file, "w", zipfile.ZIP_DEFLATED) as zipf:
            zipf.writestr("bore_plan.json", bore_for_plan_draw.model_dump_json())

        if self.save_to_gdim:
            if output_gsc_file.exists():
                gdim_file = upload_file_to_gdim(
                    user_token=token,
                    pid=proj_id,
                    file_path=output_gsc_file,
                    upload_type="temp",
                )
                self._ports_out["OutputFile"].data = gdim_file
            else:
                self._ports_out["OutputFile"].data = None
        else:
            self._ports_out["OutputFile"].data = str(output_gsc_file)

        self._ports_out["OutputBoreForPlanDraw"].data = bore_for_plan_draw
        return bore_for_plan_draw
