# writer modules in gdi-engine
import json
import os
import shutil
import warnings
import zipfile
from pathlib import Path
from typing import Literal

import numpy as np
from docxtpl import DocxTemplate, InlineImage

from ..connectors.fileConnector import ExcelTemplate
from ..connectors.gdimConnector import (
    download_gdim_file,
    get_project_info,
    get_project_info_structure,
    update_project_info,
    upload_file_to_gdim,
    write_mdb_from_minIO_file,
    write_table_data,
)
from ..dataclass import GDIDataQualityWarning
from ..dataclass.gdimData import GdimMinIOFile, SimpleJsonTable
from ..dataclass.geoProfiles import BoreForCadDraw, BoreForPlanDraw
from ..dataclass.results import DocData, SingleResult, ToGdimJsonTable, UnitResult
from ..dataclass.tables import TableCollection, TableData
from ..dataclass.terminologies import BoreTypes, DataTypes, Units
from ..dataTransformers.dataConverter import (
    convert_to_plan_bores,
    convert_to_project_info,
    convert_to_section_bores_with_matrials,
)
from ..pipeline.pipeData import (
    ArrayAttributeSchema,
    FileAttributeSchema,
    IntegerAttributeSchema,
    RangeModel,
    StringAttributeSchema,
    TableAttributeSchema,
    UIAttributeSchema,
    WidgetAttribute,
)
from ..pipeline.pipeline import (
    PipeModule,
    Port,
    PortReference,
    module_decorator,
    status_manage,
)
from ..pipeline.portTypes import PortType, PortTypeHint


@status_manage
class GdimSimpleTablesWriter(PipeModule):
    """Write a TableCollection (multiple tables) to a simgle json formate for gdim rendering."""

    def __init__(
        self,
        mname: str | None = None,
        auto_run: bool = True,
        tables: PortTypeHint.TableCollection | PortTypeHint.TableData | None = None,
        highlight_rows: PortTypeHint.TablesRowMask | None = None,
        name_type: Literal["name", "title"] = "name",
        columns: list[str] | None = None,
        precision: dict[str, int | None] | None = None,
        table_name_type: Literal["name", "title"] = "title",
        column_name_type: Literal["name", "title"] = "title",
        highlight_rows_required: bool = False,
    ) -> None:
        """Initialize the GdimSimpleTablesWriter object.

        Parameters
        ----------
        tables: TableCollection | TableData | None, default: None
            The table collection to be converted.
            If it's TableCollection, serveral table will be converted and keys are either table name or table title.
            If it's TableData, only one table will be converted.

        highlight_rows: PortTypeHint.TablesRowMask | None, default: None
            A mask to mark the rows to be highlighted.
            Key is the name of the table. Value is mask which is a list of boolean values.

        name_type: Literal["name", "title"], default: "name"
            The type of the names used in column names selected, the key of precision and the name of the main table.

        columns: list[str] | None, default: None
            The columns to be converted. All tables in the TableCollection will use the same columns.
            If a column is not found in a table, it will be ignored.
            If None, all columns will be converted.
            If name_type is "name", the columns should be the names of the columns.
            If name_type is "title", the columns should be the titles of the columns.
            The table will be rendered in the same sequence of the columns.

        precision: dict[str, int] | None, default: None
            The precision of the table.
            Key is the name of the column.
            If name_type is "name", the key is the name of the column.
            If name_type is "title", the key is the title of the column.

        table_name_type: Literal["name", "title"], default: "title"
            The type of the names used for the table names in the output.

        column_name_type: Literal["name", "title"], default: "title"
            The type of the names used for the column names in the output.

        highlight_rows_required: bool, default: False
            Whether the highlight rows are required.
            If it's True, the InputHighlightRows cannot be None, or None will be returned after execution.
        """
        super().__init__(mname, auto_run)

        self._ports_in = {
            "InputTables": Port(
                ptype=[PortType.TableCollection, PortType.TableData],
                data=tables,
                pdoc="The table collection to be converted.",
            ),
            "InputHighlightRows": Port(
                ptype=PortType.TablesRowMask,
                data=highlight_rows,
                pdoc="The rows to be highlighted.",
            ),
        }
        self._ports_out = {
            "OutputRenderTables": Port(
                ptype=PortType.SimpleJsonTables,
                pdoc="The json format of the table data for rendering on Gdim.",
            )
        }

        self.name_type = name_type
        self.columns = columns
        self.precision = precision
        self.table_name_type = table_name_type
        self.column_name_type = column_name_type
        self.highlight_rows_required = highlight_rows_required

    @property
    def InputTables(self) -> None:
        raise AttributeError("Property 'InputTables' is write-only.")

    @InputTables.setter
    def InputTables(
        self, value: PortTypeHint.TableCollection | PortTypeHint.TableData | None
    ) -> None:
        self._ports_in["InputTables"].data = value

    @property
    def InputHighlightRows(self) -> None:
        raise AttributeError("Property 'InputHighlightRows' is write-only.")

    @InputHighlightRows.setter
    def InputHighlightRows(self, value: PortTypeHint.TablesRowMask | None) -> None:
        self._ports_in["InputHighlightRows"].data = value

    @property
    def OutputRenderTables(self) -> PortTypeHint.SimpleJsonTables | None:
        return self._ports_out["OutputRenderTables"].data

    def set_cal_params(self, reset: bool = True) -> None:
        return None

    def execute(self) -> PortTypeHint.SimpleJsonTables | None:
        tables: PortTypeHint.TableCollection | PortTypeHint.TableData | None = (
            self._ports_in["InputTables"].data
        )
        highlight_rows_mask: PortTypeHint.TablesRowMask | None = self._ports_in[
            "InputHighlightRows"
        ].data

        if self.highlight_rows_required:
            if tables is None or highlight_rows_mask is None:
                self._ports_out["OutputRenderTables"].data = None
                return None
        else:
            if tables is None:
                self._ports_out["OutputRenderTables"].data = None
                return None

        if isinstance(tables, PortTypeHint.TableData):
            new_table = TableCollection()
            new_table.add_table(tables)
            tables = new_table

        render_tables = []
        for table in tables:
            # Find valid columns
            valid_columns = []
            column_names = table.columns.tolist()
            column_titles = table.field_titles
            for column in self.columns:
                if self.name_type == "name":
                    if column in column_names:
                        valid_columns.append(column)
                elif self.name_type == "title":
                    if column in column_titles:
                        valid_columns.append(column)
                else:
                    raise ValueError(f"Invalid column name type: {self.name_type}")
            # Convert to json format
            if valid_columns:
                if self.name_type == "name":
                    new_table = table[valid_columns]
                elif self.name_type == "title":
                    new_table = table.select_by_titles(*valid_columns)
                else:
                    raise ValueError(f"Invalid column name type: {self.name_type}")

            # if self.column_name_type == "title":
            #     new_table.rename_columns_to_titles()
            # Convert precision to column name
            precision = {}
            if self.name_type == "title":
                title_to_name = new_table.title_to_name
                for k, v in self.precision.items():
                    precision[title_to_name[k]] = v
            else:
                precision = self.precision

            json_table = new_table.export_doc_context(
                precision=precision, key_type=self.column_name_type
            )

            json_table = json_table["data"]
            # Get column sequence
            column_sequence = [key for key in json_table[0].keys()]
            # Get highlight rows index
            if highlight_rows_mask:
                if table.name in highlight_rows_mask:
                    rows_mask = highlight_rows_mask[table.name]
                    highlight_rows_index = np.where(np.array(rows_mask))[0]
            else:
                highlight_rows_index = None
            # Set output
            if self.table_name_type == "name":
                render_table = SimpleJsonTable(
                    name=table.name,
                    data=json_table,
                    columns=column_sequence,
                    highlight_rows=highlight_rows_index,
                )
            elif self.table_name_type == "title":
                render_table = SimpleJsonTable(
                    name=table.title,
                    data=json_table,
                    columns=column_sequence,
                    highlight_rows=highlight_rows_index,
                )
            else:
                raise ValueError(f"Invalid table name type: {self.table_name_type}")

            render_tables.append(render_table)

        self._ports_out["OutputRenderTables"].data = render_tables
        return render_tables


@status_manage
class GdimRenderTableWriter(PipeModule):
    """Convert a TableData, TableCollection or SingleResult to a json format so that it can be rendered on Gdim.
    Only one table can be generated."""

    def __init__(
        self,
        mname: str | None = None,
        auto_run: bool = True,
        data: (
            PortTypeHint.TableData
            | PortTypeHint.TableCollection
            | PortTypeHint.SingleResult
            | None
        ) = None,
        name_type: Literal["name", "title"] = "name",
        columns: list[str] | None = None,
        precision: dict[str, int | None] | None = None,
        group_by: str | None = None,
        main_table: str | None = None,
        sub_tables: list[str] | None = None,
    ) -> None:
        """Initialize the GdimRenderTableWriter object.

        Parameters
        ----------
        data: TableData | TableCollection | SingleResult | None, default: None
            The data to be converted.
            If it's TableCollection, the tables will be merged into a single table by columns.

        name_type: Literal["name", "title"], default: "name"
            The type of the names used in column names selected, the key of precision and the name of the main table.

        columns: list[str] | None, default: None
            The columns to be converted.
            If None, all columns will be converted.
            If name_type is "name", the columns should be the names of the columns.
            If name_type is "title", the columns should be the titles of the columns.

        precision: dict[str, int] | None, default: None
            The precision of the table.
            Key is the name of the column.
            If name_type is "name", the key is the name of the column.
            If name_type is "title", the key is the title of the column.

        group_by: str | None, default: None
            The column name to be grouped by. It's used when the table_data is a TableCollection.
            If None, the "group_by" will be the first column with the same name in all tables.
            If the name_type is "title", the group_by should be the title of the column.
            If the name_type is "name", the group_by should be the name of the column.

        main_table: str | None, default: None
            The name of the main table. It's used when the table_data is a TableCollection.
            If None, the table with the least number of rows will be used as the main table.
            If the name_type is "title", the main_table should be the title of the main table.
            If the name_type is "name", the main_table should be the name of the main table.

        sub_tables: list[str] | None, default: None
            The names of the sub tables. It's used when the table_data is a TableCollection.
            If None, all tables except the main table will be used as sub tables.
        """
        super().__init__(mname, auto_run)

        self._ports_in = {
            "InputData": Port(
                ptype=[
                    PortType.TableData,
                    PortType.TableCollection,
                    PortType.SingleResult,
                ],
                data=data,
                pdoc="The table data to be converted.",
            )
        }
        self._ports_out = {
            "OutputRenderTable": Port(
                ptype=PortType.GdimJsonTable,
                pdoc="The json format of the table data for rendering on Gdim.",
            )
        }

        self.name_type = name_type
        self.columns = columns
        self.precision = precision
        self.group_by = group_by
        self.main_table = main_table
        self.sub_tables = sub_tables

    @property
    def InputData(self) -> None:
        raise AttributeError("Property 'InputData' is write-only.")

    @InputData.setter
    def InputData(
        self,
        value: (
            PortTypeHint.TableData
            | PortTypeHint.TableCollection
            | PortTypeHint.SingleResult
            | None
        ),
    ) -> None:
        self._ports_in["InputData"].data = value

    @property
    def OutputRenderTable(self) -> PortTypeHint.GdimJsonTable | None:
        return self._ports_out["OutputRenderTable"].data

    def set_cal_params(self, reset: bool = True) -> dict[str, RangeModel]:
        data: (
            PortTypeHint.TableData
            | PortTypeHint.TableCollection
            | PortTypeHint.SingleResult
            | None
        ) = self._ports_in["InputData"].data

        group_by_choices = None  # Only used when data is a TableCollection
        main_table_choices = None  # Only used when data is a TableCollection
        if data is None:
            columns_choices = None
        else:
            if self.name_type == "name":
                if isinstance(data, PortTypeHint.SingleResult):
                    columns_choices = data.names
                elif isinstance(data, PortTypeHint.TableData):
                    columns_choices = data.columns
                elif isinstance(data, PortTypeHint.TableCollection):
                    columns_choices = []
                    for table in data.values():
                        columns_choices.extend(table.columns)
                    group_by_choices = columns_choices
                    main_table_choices = data.table_names
            elif self.name_type == "title":
                if isinstance(data, PortTypeHint.SingleResult):
                    columns_choices = data.titles
                elif isinstance(data, PortTypeHint.TableData):
                    columns_choices = data.title_to_name.keys()
                elif isinstance(data, PortTypeHint.TableCollection):
                    columns_choices = []
                    for table in data.values():
                        columns_choices.extend(table.title_to_name.keys())
                    group_by_choices = columns_choices
                    main_table_choices = data.table_titles

        values_range = {
            "name_type": RangeModel(
                vtype="str",
                choices=["name", "title"],
                default=self.name_type,
                title="列名类型",
            ),
            "columns": RangeModel(
                vtype="list",
                list_type=["str"],
                choices=columns_choices,
                default=self.columns,
                title="列名",
            ),
            "group_by": RangeModel(
                vtype="str",
                choices=group_by_choices,
                default=self.group_by,
                title="表合并依据的列",
            ),
            "main_table": RangeModel(
                vtype="str",
                choices=main_table_choices,
                default=self.main_table,
                title="主表名",
            ),
        }
        return values_range

    def execute(self) -> PortTypeHint.GdimJsonTable | None:
        data: (
            PortTypeHint.TableData
            | PortTypeHint.TableCollection
            | PortTypeHint.SingleResult
            | None
        ) = self._ports_in["InputData"].data

        if data is None:
            self._ports_out["OutputRenderTable"].data = None
            return None

        # If sub_tables is not None, only use the tables mentioned
        if self.sub_tables is not None:
            if isinstance(data, PortTypeHint.TableCollection):
                new_data = TableCollection(
                    name=data.name, title=data.title, description=data.description
                )
                new_data.add_table(data.get_table(self.main_table))
                for table_name in self.sub_tables:
                    new_data.add_table(data.get_table(table_name))
                data = new_data
        else:
            if isinstance(data, PortTypeHint.TableCollection):
                if data.sub_tables:
                    new_data = TableCollection(
                        name=data.name, title=data.title, description=data.description
                    )
                    new_data.add_table(data.get_table(data.main_table))
                    for table_name in data.sub_tables:
                        new_data.add_table(data.get_table(table_name))
                    data = new_data

        # Convert columns from titles to names if specified
        if self.name_type == "title":
            if isinstance(data, (PortTypeHint.TableData, PortTypeHint.SingleResult)):
                if self.columns is not None:
                    columns_name = [
                        data.title_to_name[title]
                        for title in self.columns
                        if title in data.title_to_name
                    ]
                else:
                    # If columns is None, use all available columns (converted from titles to names)
                    if isinstance(data, PortTypeHint.SingleResult):
                        columns_name = data.names
                    else:
                        columns_name = list(data.columns)

                if self.precision is not None:
                    precision = {
                        data.title_to_name[title]: value
                        for title, value in self.precision.items()
                        if title in data.title_to_name
                    }
            elif isinstance(data, PortTypeHint.TableCollection):
                if self.columns is not None:
                    # Map titles to names by searching all tables
                    mapped_columns = []
                    for title in self.columns:
                        # found = False
                        for table in data.values():
                            if title in table.title_to_name:
                                mapped_columns.append(table.title_to_name[title])
                                # found = True
                                break
                        # if not found:
                        #     raise ValueError(f"Title '{title}' not found in any table")
                    if len(mapped_columns) == 0:
                        mapped_columns = None
                    columns_name = mapped_columns
                else:
                    # If columns is None, use all available columns from all tables
                    all_columns = set()
                    for table in data.values():
                        all_columns.update(table.columns)
                    columns_name = list(all_columns)

                if self.precision is not None:
                    # Map precision titles to names by searching all tables
                    mapped_precision = {}
                    for title, value in self.precision.items():
                        # # If title is already in mapped columns, use that mapping
                        # if self.columns is not None and title in self.columns:
                        #     # Get index of title in self.columns to get corresponding name
                        #     idx = self.columns.index(title)
                        #     mapped_precision[mapped_columns[idx]] = value
                        #     continue

                        # Otherwise search tables for the title
                        # found = False
                        for table in data.values():
                            if title in table.title_to_name:
                                mapped_precision[table.title_to_name[title]] = value
                                # found = True
                                break
                        # if not found:
                        #     raise ValueError(f"Title '{title}' not found in any table")
                    precision = mapped_precision

                if self.main_table is not None:
                    # Convert main table title to name
                    main_table_name = data.get_table(self.main_table).name
                if self.group_by is not None:
                    # Convert group by title to name
                    group_by_name = data.get_table(self.group_by).title_to_name[
                        self.group_by
                    ]
        else:
            if isinstance(data, (PortTypeHint.TableData, PortTypeHint.SingleResult)):
                if isinstance(data, PortTypeHint.SingleResult):
                    available_columns = data.names
                else:
                    available_columns = data.columns

                if self.columns is not None:
                    columns_name = [
                        col for col in self.columns if col in available_columns
                    ]
                else:
                    # If columns is None, use all available columns
                    columns_name = list(available_columns)
            elif isinstance(data, PortTypeHint.TableCollection):
                if self.columns is not None:
                    columns_name = []
                    for col in self.columns:
                        for table in data.values():
                            if col in table.columns:
                                columns_name.append(col)
                                break
                else:
                    # If columns is None, use all available columns from all tables
                    all_columns = set()
                    for table in data.values():
                        all_columns.update(table.columns)
                    columns_name = list(all_columns)

            precision = self.precision
            main_table_name = self.main_table
            group_by_name = self.group_by

        # Use the main table and sub tables of table colleciton
        if isinstance(data, PortTypeHint.TableCollection):
            if main_table_name is None:
                if data.main_table:
                    main_table_name = data.main_table
            if group_by_name is None:
                if data.main_key:
                    group_by_name = data.main_key

        to_gdim_json = ToGdimJsonTable(
            data=data,
            columns=columns_name,
            precision=precision,
            group_by=group_by_name,
            main_table=main_table_name,
        )
        output = to_gdim_json.convert()
        self._ports_out["OutputRenderTable"].data = output
        return output


@module_decorator()
class DocDataWriter(PipeModule):
    """Write all kinds of result to a data format that can be used to generate a .docx document."""

    InputData: PortReference[
        PortTypeHint.SingleResult
        | PortTypeHint.TableData
        | PortTypeHint.TableCollection
        | PortTypeHint.FilePath
        | PortTypeHint.FilesPath
        | PortTypeHint.SingleResultList
        | PortTypeHint.TableDataList
        | PortTypeHint.TableCollectionList
    ]
    OutputDocData: PortReference[PortTypeHint.DocData]

    # Port docs definition (matched by name to annotations)
    _port_docs = {
        "InputData": "The data to be written.",
        "OutputDocData": "The data in a format that can be used to generate a .docx document.",
    }

    def __init__(
        self,
        mname: str | None = None,
        auto_run: bool = True,
        data: (
            PortTypeHint.SingleResult
            | PortTypeHint.TableData
            | PortTypeHint.TableCollection
            | PortTypeHint.FilePath
            | PortTypeHint.FilesPath
            | PortTypeHint.SingleResultList
            | PortTypeHint.TableDataList
            | PortTypeHint.TableCollectionList
            | None
        ) = None,
        precision: dict[str, int | None] | None = None,
        table_doc_key_name: str | None = None,
        table_doc_key_title: str | None = None,
        main_table_name: str | None = None,
        sub_tables_name: str | list[str] | None = None,
        group_by: str | None = None,
        sub_table_doc_key_name: str | None = None,
        sub_table_doc_key_title: str | None = None,
        joiner: str = ",",
        file_path_keys: str | list[str] | None = None,
        files_path_to_list: str | None = None,
        files_path: list[str] | list[Path] | None = None,
    ) -> None:
        """Initialize the DocDataWriter object.

        Parameters
        ----------
        data: SingleResult | TableData | TableCollection | FilePath | FilesPath | SingleResultList | TableDataList | TableCollectionList | None, default: None
            The data to be written to a format that can be used for .docx document.

        precision: dict[str, int | None] | None, default: None
            The precision of numeric values for formatting. Controls the number of decimal places
            for numeric data when exported to document format.

            - Key: Can use either field/column **names** or **titles** for TableData/TableCollection,
                   or either result **names** or **titles** for SingleResult
            - Value: number of decimal places (int) or None to skip formatting

            Examples:
            - For TableData with column names 'depth_m', 'bearing_capacity_kpa' and
              titles 'Depth', 'Bearing Capacity':
              precision={'depth_m': 2, 'Bearing Capacity': 1}  # Mixed names and titles
              Result: depth_m values formatted to 2 decimals (e.g., 15.23),
                      bearing_capacity_kpa to 1 decimal (e.g., 125.5)

            - For SingleResult with result names 'calculated_area', 'total_volume' and
              titles 'Area', 'Volume':
              precision={'Area': 3, 'total_volume': None}  # Mixed titles and names
              Result: calculated_area formatted to 3 decimals (e.g., 45.678),
                      total_volume keeps original formatting

            - Using only titles:
              precision={'Depth': 0, 'Bearing Capacity': 4}
              Result: depth column shows integers (e.g., 123),
                      bearing capacity shows 4 decimals (e.g., 125.5000)

            Flexibility: Keys are automatically converted from titles to names internally.
            If a key matches both a name and a title, the name takes precedence.

            Note: Only applies to numeric columns/values. Non-numeric data is unaffected.
            NaN and None values are converted to empty strings regardless of precision.

        table_doc_key_name: str | None, default: None
            Only avaible for TableData and TableCollection.
            The doc key name of the table data.
            If None, the table name or tablecollection name will be used.
            Note: if table name or tablecollection name is `None`, this parameter cannot be `None`

        table_doc_key_title: str | None, default: None
            Only avaible for TableData and TableCollection.
            The doc key title of the table data.
            If None, the table title or tablecollection title will be used.

        main_table_name: str | None, default: None
            Only available for TableCollection.
            The name of the main table.

        sub_tables_name: str | list[str] | None, default: None
            Only available for TableCollection.
            The names of the sub tables.

        group_by: str | None, default: None
            Only available for TableCollection.
            The column name to be grouped by.

        sub_table_doc_key_name: str | None, default: None
            Only available for TableCollection.
            The doc key name of the sub table.
            If None, the sub table name will be used.
            Note: if the name of sub tables in the TableCollection is `None`, this parameter cannot be `None`

        sub_table_doc_key_title: str | None, default: None
            Only available for TableCollection.
            The doc key title of the sub table.
            If None, the sub table title will be used.

        joiner: str, default: ","
            Only available for SingleResult.
            If the value of the result is a list, the value will be joined by the joiner to convert to a string.

        file_path_keys: str | list[str] | None, default: None
            Only available for FilePath and FilesPath.
            The keys of the file path.
            If None, the file name will be used as the key.
            Note: it's better to set the file_path_keys if the file name is not a valid doc key.

        files_path_to_list: str | None, default: None
            Only available for FilesPath.
            If not None, this string will be used as the key and all the values from the FilesPath will be converted to a list.

        files_path: list[str] | list[Path] | None, default: None
            This attribute is used for merge several sub doc files into one.
            If not None, the port 'InputData' will be ignored.
        """
        super().__init__(mname=mname, auto_run=auto_run)

        # Set initial data directly to the input port
        if data is not None:
            self.InputData = data

        self.precision = precision
        self.table_doc_key_name = table_doc_key_name
        self.table_doc_key_title = table_doc_key_title
        self.main_table_name = main_table_name
        self.sub_tables_name = sub_tables_name
        self.group_by = group_by
        self.sub_table_doc_key_name = sub_table_doc_key_name
        self.sub_table_doc_key_title = sub_table_doc_key_title
        self.joiner = joiner
        self.file_path_keys = file_path_keys
        self.files_path_to_list = files_path_to_list
        self.files_path = files_path

        self._title = "生成文档数据"
        self._description = "将数据转为用于打印.docx文档或.xlsx表格的数据"

    def update_ui_schema(self, reset: bool = False) -> dict[str, UIAttributeSchema]:
        return {
            "files_path": FileAttributeSchema(
                title="选择文件", extension="docx", max_files=None
            )
        }

    def set_cal_params(self, reset: bool = True) -> dict[str, RangeModel]:
        return {
            "files_path": RangeModel(
                vtype=("list", "None"),
                title="选择的文件",
                list_type="str",
                default=self.files_path,
            )
        }

    def _convert_precision_titles_to_names(
        self,
        data: (
            PortTypeHint.SingleResult
            | PortTypeHint.TableData
            | PortTypeHint.TableCollection
            | list
            | str
            | Path
        ),
        precision: dict[str, int | None] | None,
    ) -> dict[str, int | None] | None:
        """Convert precision keys from titles to names if possible.

        Parameters
        ----------
        data: SingleResult | TableData | TableCollection | list
            The data object to get title_to_name mapping from
        precision: dict[str, int | None] | None
            Original precision dictionary with potentially mixed keys (names/titles)

        Returns
        -------
        dict[str, int | None] | None
            Precision dictionary with all keys converted to names
        """
        if precision is None:
            return None

        converted_precision = {}

        # Handle SingleResult and SingleResultList
        if isinstance(data, SingleResult) or (
            isinstance(data, list)
            and len(data) > 0
            and isinstance(data[0], SingleResult)
        ):
            if isinstance(data, list):
                # For list of SingleResult, collect all title_to_name mappings
                all_title_to_name = {}
                for single_result in data:
                    all_title_to_name.update(single_result.title_to_name)
                title_to_name = all_title_to_name
            else:
                title_to_name = data.title_to_name

            for key, value in precision.items():
                # Try to convert title to name, fallback to original key
                converted_key = title_to_name.get(key, key)
                converted_precision[converted_key] = value

        # Handle TableData, TableCollection and their lists
        elif isinstance(data, (TableData, TableCollection)) or (
            isinstance(data, list)
            and len(data) > 0
            and isinstance(data[0], (TableData, TableCollection))
        ):
            if isinstance(data, list):
                # For list of tables, collect all title_to_name mappings
                all_title_to_name = {}
                for table_obj in data:
                    if isinstance(table_obj, TableCollection):
                        # For TableCollection, get mappings from all tables
                        for table in table_obj.values():
                            all_title_to_name.update(table.title_to_name)
                    else:
                        all_title_to_name.update(table_obj.title_to_name)
                title_to_name = all_title_to_name
            else:
                if isinstance(data, TableCollection):
                    # For TableCollection, get mappings from all tables
                    all_title_to_name = {}
                    for table in data.values():
                        all_title_to_name.update(table.title_to_name)
                    title_to_name = all_title_to_name
                else:
                    title_to_name = data.title_to_name

            for key, value in precision.items():
                # Try to convert title to name, fallback to original key
                converted_key = title_to_name.get(key, key)
                converted_precision[converted_key] = value
        else:
            # For other data types (FilePath, FilesPath), return precision as-is
            return precision

        return converted_precision

    def execute(self) -> PortTypeHint.DocData | None:
        data: (
            PortTypeHint.SingleResult
            | PortTypeHint.TableData
            | PortTypeHint.TableCollection
            | PortTypeHint.FilePath
            | PortTypeHint.FilesPath
            | None
        ) = self._ports_in["InputData"].data

        if data is None and self.files_path is None:
            self._ports_out["OutputDocData"].data = None
            return None

        if data is None:
            data = self.files_path

        # Convert precision titles to names if possible
        converted_precision = self._convert_precision_titles_to_names(
            data, self.precision
        )

        # For SingleResult and SingleResultList
        if isinstance(data, SingleResult) or (
            isinstance(data, list)
            and len(data) > 0
            and isinstance(data[0], SingleResult)
        ):
            if (
                isinstance(data, list)
                and len(data) > 0
                and isinstance(data[0], SingleResult)
            ):
                # Process all SingleResult objects in the list and merge them
                merged_doc_data = {}
                merged_doc_keys_struct = {}

                for idx, single_result in enumerate(data):
                    context = single_result.export_doc_context(
                        precision=converted_precision, joiner=self.joiner
                    )
                    result_doc_keys_struct = context.pop("doc_keys_struct")

                    # Handle key conflicts by adding index suffix
                    for key, value in context.items():
                        original_key = key
                        suffix_counter = 1

                        # Check for duplicates and rename if necessary
                        while key in merged_doc_data:
                            key = f"{original_key}_{suffix_counter}"
                            suffix_counter += 1

                        merged_doc_data[key] = value

                    # Handle doc_keys_struct merging with duplicate key resolution
                    for key, value in result_doc_keys_struct.items():
                        original_key = key
                        suffix_counter = 1

                        # Check for duplicates and rename if necessary
                        while key in merged_doc_keys_struct:
                            key = f"{original_key}_{suffix_counter}"
                            suffix_counter += 1

                        merged_doc_keys_struct[key] = value

                doc_data = DocData(
                    data=merged_doc_data, doc_keys_struct=merged_doc_keys_struct
                )
            else:
                # Handle single SingleResult
                context = data.export_doc_context(
                    precision=converted_precision, joiner=self.joiner
                )
                doc_keys_struct = context.pop("doc_keys_struct")
                doc_data = DocData(data=context, doc_keys_struct=doc_keys_struct)

        # For TableData, TableCollection, TableDataList and TableCollectionList
        elif isinstance(data, (TableData, TableCollection)) or (
            isinstance(data, list)
            and len(data) > 0
            and isinstance(data[0], (TableData, TableCollection))
        ):
            if (
                isinstance(data, list)
                and len(data) > 0
                and isinstance(data[0], (TableData, TableCollection))
            ):
                # Process all table objects in the list and merge them
                merged_doc_data = {}
                merged_doc_keys_struct = {}

                for idx, table_obj in enumerate(data):
                    if isinstance(table_obj, TableCollection):
                        if (
                            len(table_obj) == 1
                        ):  # If there is only one table in the TableCollection, convert it to a TableData
                            table_obj = table_obj[0]

                    if isinstance(table_obj, TableData):
                        if converted_precision is not None:
                            context = table_obj.export_doc_context(
                                precision=converted_precision
                            )
                        else:
                            context = table_obj.export_doc_context()
                    else:
                        if converted_precision is not None:
                            context = table_obj.export_doc_context(
                                precision=converted_precision,
                                main_table_name=self.main_table_name,
                                sub_tables_name=self.sub_tables_name,
                                group_by=self.group_by,
                                sub_table_doc_key_name=self.sub_table_doc_key_name,
                                sub_table_doc_key_title=self.sub_table_doc_key_title,
                            )
                        else:
                            context = table_obj.export_doc_context(
                                main_table_name=self.main_table_name,
                                sub_tables_name=self.sub_tables_name,
                                group_by=self.group_by,
                                sub_table_doc_key_name=self.sub_table_doc_key_name,
                                sub_table_doc_key_title=self.sub_table_doc_key_title,
                            )

                    if self.table_doc_key_name:
                        table_doc_key_name = self.table_doc_key_name
                    else:
                        table_doc_key_name = table_obj.name
                    if not table_doc_key_name:
                        raise ValueError(
                            "Both parameter `table_doc_key_name` and `name` of the `TableData` or `TableCollection` are `None` or empty string which is not allowed."
                        )

                    if self.table_doc_key_title:
                        table_doc_key_title = self.table_doc_key_title
                    else:
                        table_doc_key_title = table_obj.title

                    # Handle key conflicts by adding index suffix
                    original_key = table_doc_key_name
                    suffix_counter = 1
                    while table_doc_key_name in merged_doc_data:
                        table_doc_key_name = f"{original_key}_{suffix_counter}"
                        suffix_counter += 1

                    merged_doc_data[table_doc_key_name] = context["data"]
                    merged_doc_keys_struct[table_doc_key_name] = {
                        "title": table_doc_key_title,
                        "column_keys": context["doc_keys_struct"],
                    }

                doc_data = DocData(
                    data=merged_doc_data, doc_keys_struct=merged_doc_keys_struct
                )
            else:
                # Handle single TableData or TableCollection
                if isinstance(data, TableCollection):
                    if (
                        len(data) == 1
                    ):  # If there is only one table in the TableCollection, convert it to a TableData
                        data = data[0]

                if isinstance(data, TableData):
                    if converted_precision is not None:
                        context = data.export_doc_context(precision=converted_precision)
                    else:
                        context = data.export_doc_context()
                else:
                    if converted_precision is not None:
                        context = data.export_doc_context(
                            precision=converted_precision,
                            main_table_name=self.main_table_name,
                            sub_tables_name=self.sub_tables_name,
                            group_by=self.group_by,
                            sub_table_doc_key_name=self.sub_table_doc_key_name,
                            sub_table_doc_key_title=self.sub_table_doc_key_title,
                        )
                    else:
                        context = data.export_doc_context(
                            main_table_name=self.main_table_name,
                            sub_tables_name=self.sub_tables_name,
                            group_by=self.group_by,
                            sub_table_doc_key_name=self.sub_table_doc_key_name,
                            sub_table_doc_key_title=self.sub_table_doc_key_title,
                        )

                if self.table_doc_key_name:
                    table_doc_key_name = self.table_doc_key_name
                else:
                    table_doc_key_name = data.name
                if not table_doc_key_name:
                    raise ValueError(
                        "Both parameter `table_doc_key_name` and `name` of the `TableData` or `TableCollection` are `None` or empty string which is not allowed."
                    )

                if self.table_doc_key_title:
                    table_doc_key_title = self.table_doc_key_title
                else:
                    table_doc_key_title = data.title

                table_data = {table_doc_key_name: context["data"]}
                doc_keys_struct = {
                    table_doc_key_name: {
                        "title": table_doc_key_title,
                        "column_keys": context["doc_keys_struct"],
                    }
                }
                doc_data = DocData(data=table_data, doc_keys_struct=doc_keys_struct)

        # For FilePath and FilesPath
        elif isinstance(data, (str, Path, list)):
            if self.files_path_to_list and isinstance(data, list):
                doc_key = self.files_path_to_list
                context = {doc_key: data}
                doc_keys_struct = {
                    doc_key: {
                        "title": self.files_path_to_list,
                        "unit": Units.UNITLESS,
                        "description": DataTypes.FilesPath,
                        "column_keys": None,
                    }
                }
                doc_data = DocData(data=context, doc_keys_struct=doc_keys_struct)
            else:
                if isinstance(data, (str, Path)):
                    data = [data]

                unit_result_list = []
                for file_path in data:
                    file_path = Path(file_path)
                    uint_result = UnitResult(
                        name=file_path.stem.replace("-", "_"),
                        title=file_path.stem,
                        value=str(file_path),
                        description=DataTypes.FilePath,
                    )
                    unit_result_list.append(uint_result)

                single_result = SingleResult(unit_result_list)
                context = single_result.export_doc_context()
                doc_keys_struct = context.pop("doc_keys_struct")
                doc_data = DocData(data=context, doc_keys_struct=doc_keys_struct)

        self._ports_out["OutputDocData"].data = doc_data
        return doc_data

    def get_doc_keys(self, file_path: str | None = None) -> dict | None:
        """Export the doc keys to a json file."""
        doc_data: DocData | None = self["OutputDocData"]
        if doc_data is None:
            return None

        doc_keys_struct = doc_data.doc_keys_struct
        if file_path:
            doc_data.export_keys_to_json(file_path)
        return doc_keys_struct

    def get_doc_data(self, file_path: str | None = None) -> dict | None:
        """Export the doc data to a json file."""
        doc_data: DocData | None = self["OutputDocData"]
        if doc_data is None:
            return None

        data = doc_data.data
        if file_path:
            doc_data.export_data_to_json(file_path)
        return data


@module_decorator()
class MergeDocData(PipeModule):
    """Merge the data from multiple DocData objects into a single DocData object."""

    OutputDocData: PortReference[PortTypeHint.DocData]

    _port_docs = {"OutputDocData": "The merged doc data."}

    def __init__(
        self,
        mname: str | None = None,
        auto_run: bool = True,
        all_ports_required: bool = True,
    ) -> None:
        """Initialize the MergeDocData object.

        Parameters
        ----------
        all_ports_required: bool, default: True
            If True, the execution will be blocked until all the dynamic ports are connected with invalid data.
        """
        super().__init__(mname, auto_run)

        self.dynamic_ports_in_type = PortType.DocData
        self.all_ports_required = all_ports_required

        self._title = "合并文档数据"
        self._description = "将多个DocData文档数据合并为一个DocData文档数据"

    def execute(self) -> PortTypeHint.DocData | None:
        dynamic_ports: dict[str, PortTypeHint.DocData] | None = self.dynamic_ports_in
        if not dynamic_ports:
            self._ports_out["OutputDocData"].data = None
            return None

        if self.all_ports_required:
            all_ports_with_data = True
            for port_name, port in dynamic_ports.items():
                if port.data is None:
                    all_ports_with_data = False
                    break

            if not all_ports_with_data:
                self._ports_out["OutputDocData"].data = None
                return None

        merged_doc_data = {}
        merged_doc_keys_struct = {}

        for port_name, port in dynamic_ports.items():
            port_data = port.data
            if port_data:
                doc_data = port_data.data
                doc_keys_struct = port_data.doc_keys_struct

                # Handle doc_data merging with duplicate key resolution
                for key, value in doc_data.items():
                    original_key = key
                    suffix_counter = 1

                    # Check for duplicates and rename if necessary
                    while key in merged_doc_data:
                        warnings.warn(
                            f"Duplicate key '{original_key}' found in port '{port_name}'. Renaming to '{original_key}_{suffix_counter}'"
                        )
                        key = f"{original_key}_{suffix_counter}"
                        suffix_counter += 1

                    merged_doc_data[key] = value

                # Handle doc_keys_struct merging with duplicate key resolution
                for key, value in doc_keys_struct.items():
                    original_key = key
                    suffix_counter = 1

                    # Check for duplicates and rename if necessary
                    while key in merged_doc_keys_struct:
                        # Only print warning once per original key (when processing doc_data)
                        key = f"{original_key}_{suffix_counter}"
                        suffix_counter += 1

                    merged_doc_keys_struct[key] = value

        # Create the merged DocData object
        merged_doc_data_obj = DocData(
            data=merged_doc_data, doc_keys_struct=merged_doc_keys_struct
        )

        self._ports_out["OutputDocData"].data = merged_doc_data_obj
        return merged_doc_data_obj

    def get_doc_keys(self, file_path: str | None = None) -> dict | None:
        """Export the doc keys to a json file."""
        doc_data: DocData | None = self["OutputDocData"]
        if doc_data is None:
            return None

        doc_keys_struct = doc_data.doc_keys_struct
        if file_path:
            doc_data.export_keys_to_json(file_path)
        return doc_keys_struct

    def get_doc_data(self, file_path: str | None = None) -> dict | None:
        """Export the doc data to a json file."""
        doc_data: DocData | None = self["OutputDocData"]
        if doc_data is None:
            return None

        data = doc_data.data
        if file_path:
            doc_data.export_data_to_json(file_path)
        return data


@module_decorator()
class DocPrinter(PipeModule):
    """Write the doc data to a .docx document."""

    InputDocData: PortReference[PortTypeHint.DocData]
    InputToken: PortReference[PortTypeHint.Token]
    OutputFile: PortReference[
        PortTypeHint.FilePath
        | PortTypeHint.FilesPath
        | PortTypeHint.GdimFile
        | PortTypeHint.GdimFiles
    ]

    _port_docs = {
        "InputDocData": "The doc data to be printed.",
        "InputToken": "The token of the user.",
        "OutputFile": "The files of the generated document.",
    }

    def __init__(
        self,
        mname: str | None = None,
        auto_run: bool = True,
        doc_data: PortTypeHint.DocData | None = None,
        template: str | Path | dict | list[str | Path | dict] | None = None,
        output_path: str | Path | None = None,
        output_name: str | list[str] | None = None,
        pipeline_workspace_as_output_path: bool = True,
        image_size_type: (
            Literal["width", "height"] | dict[str, Literal["width", "height"]]
        ) = "width",
        image_size: dict[str, float] | dict[str, list[float]] | None = None,
        save_to_gdim: bool = False,
        token: str | None = None,
        proj_id: int | str | None = None,
        host: str | None = None,
    ) -> None:
        """Initialize the DocPrinter object.

        Parameters
        ----------
        doc_data: PortTypeHint.DocData | None
            The doc data to be printed.

        template: str | Path | list[str | Path] | dict | list[dict] | None
            The template file or files used for printing.
            If None, the module will not be executed.
            If str or Path, the template file path should be specified.
            If list[str | Path], the template files should be specified.
            If dict, it will be converted to a GdimMinIOFile object to get the template from minIO server.
            If list[dict], it will be converted to a list of GdimMinIOFile objects to get the template from minIO server.

        output_path: str | Path | None
            Valid only when `save_to_gdim` is False.
            The path to save the generated document.
            If None,
                The 'workspace' attribute of the pipeline will be used.
                If pipeline not found or its 'workspace' attribute is None, the current working directory will be used.

        output_name: str | list[str] | None
            The name of the generated document.
            If list[str], the DocPrinter will try to print all the with all the doc templates with corresponding output names.
            If None, the name of the 'template' with a suffix '_printed' will be used.

        pipeline_workspace_as_output_path: bool, default: True
            Valid only when `save_to_gdim` is False.
            If True, the 'workspace' attribute of the pipeline will be used to as the output_path first.
            If False, the module's attributes 'output_path' will be used to as the output_path first.

        image_size_type: Literal["width", "height"] | dict[str, Literal["width", "height"]], default: "width"
            The type of the image size.
            If "width", the image size will be set to the width of the image.
            If "height", the image size will be set to the height of the image.
            If dict, different key can use different type of image size. If the key is not found, `width` will be used as default.

        image_size: dict[str, float] | dict[str, list[float]] | None, default: None
            The size of the image.
            If dict[str, float], `key` is the key in the `doc_data` and the `value` is the size of the image.
            If dict[str, list[float]],`key` is the key in the `doc_data` and the `value` is the size for each image in the list.
            If None, the image size will be as same as its original size.

        save_to_gdim: bool, default: False
            If True, the generated document will be saved to the gdim file server.
                A local document will be generated at the pipeline workspace or current working directory at first.
            If False, the generated document will be saved to the local file system.

        token: str | None
            The token of the user.
            Cannot be None when `save_to_gdim` is True.

        proj_id: int | str | None
            The id of the gdim project.
            Cannot be None when `save_to_gdim` is True.

        host: str | None
            The host of the gdim platform.
        """
        super().__init__(mname, auto_run)

        if doc_data is not None:
            self.InputDocData = doc_data
        if token is not None:
            self.InputToken = (token, proj_id, host)

        self.template = template
        self.output_path = output_path
        self.output_name = output_name
        self.pipeline_workspace_as_output_path = pipeline_workspace_as_output_path
        self.save_to_gdim = save_to_gdim
        self.image_size_type = image_size_type
        self.image_size = image_size

        self._title = "打印文档"
        self._description = "将DocData数据打印为.docx格式文档"

    def update_ui_schema(self, reset: bool = False) -> dict[str, UIAttributeSchema]:
        template_schema = FileAttributeSchema(
            title="模板文件", extension="docx", visible=False
        )
        return {
            "template": template_schema,
        }

    def execute(
        self,
    ) -> (
        PortTypeHint.FilePath
        | PortTypeHint.FilesPath
        | PortTypeHint.GdimFile
        | PortTypeHint.GdimFiles
        | None
    ):
        doc_data: PortTypeHint.DocData | None = self._ports_in["InputDocData"].data
        input_token: PortTypeHint.Token | None = self._ports_in["InputToken"].data

        if doc_data is None or self.template is None:
            self._ports_out["OutputFile"].data = None
            return None

        if self.save_to_gdim:
            input_token = self.get_token(input_token)
            if input_token is None:
                self._ports_out["OutputFile"].data = None
                return None
            token, proj_id, host = input_token
            # Download the template file to pipeline workspace or current working directory
            # Save the generated document at workspace before uploading to server
            output_path = None
            pipeline_workspace_as_output_path = True
        else:
            output_path = self.output_path
            pipeline_workspace_as_output_path = self.pipeline_workspace_as_output_path

        # Get the output path
        final_output_path = self.get_workspace(
            output_path, pipeline_workspace_as_output_path
        )
        final_output_path = Path(final_output_path)

        # Get the template files
        if isinstance(self.template, (str, Path, dict)):
            templates = [self.template]
        else:
            templates = self.template

        template_files = []
        for teample in templates:
            if isinstance(teample, str):
                template_files.append(Path(teample))
            elif isinstance(teample, Path):
                template_files.append(teample)
            elif isinstance(teample, dict):
                template = GdimMinIOFile(**teample)
                download_url = template.downloadUrl
                template_file = download_gdim_file(
                    download_url, file_path=final_output_path
                )
                template_files.append(template_file)

        if isinstance(self.output_name, str):
            output_names = [self.output_name]
        else:
            output_names = self.output_name

        output_files = []
        # In case the input doc_data is modified when the value is a path, we need to make a deep copy of the doc_data
        doc_data = doc_data.model_copy(deep=True)
        for idx, template_file in enumerate(template_files):
            if not os.path.exists(template_file):
                raise FileNotFoundError(f"Template file '{template_file}' not found.")
            output_name = output_names[idx]
            if output_name is None:
                output_name = template_file.stem + "_printed.docx"
            output_file = (
                final_output_path / output_name
            )  # Length consistency is not checked

            tpl = DocxTemplate(template_file)
            # Check if there is subdoc in the doc_data
            for k, v in doc_data.doc_keys_struct.items():
                description = v.get("description")
                if description == DataTypes.FilePath:
                    file_path = doc_data.data[k]
                    if not Path(file_path).exists():
                        raise FileNotFoundError(
                            f"用于key '{k}' 的文件 '{Path(file_path).name}' 未找到！"
                        )
                    subdoc = tpl.new_subdoc(file_path)
                    doc_data.data[k] = subdoc
                elif description == DataTypes.FilesPath:
                    file_paths = doc_data.data[k]
                    for file_path in file_paths:
                        if not Path(file_path).exists():
                            raise FileNotFoundError(
                                f"用于key '{k}' 的文件 '{Path(file_path).name}' 未找到！"
                            )
                    subdocs = [tpl.new_subdoc(file_path) for file_path in file_paths]
                    doc_data.data[k] = subdocs
                elif description == DataTypes.ImagePath:
                    image_path = doc_data.data[k]
                    if not Path(image_path).exists():
                        raise FileNotFoundError(
                            f"用于key '{k}' 的图片 '{Path(image_path).name}' 未找到！"
                        )
                    image_size = None
                    if not self.image_size and k in self.image_size.keys():
                        image_size = self.image_size[k]
                        if not isinstance(image_size, (float, int)):
                            raise ValueError(
                                f"image_size for key '{k}' must be a single number."
                            )
                    if image_size:  # Size is provided
                        if isinstance(self.image_size_type, dict):
                            image_size_type = self.image_size_type.get(k, "width")
                        else:
                            image_size_type = self.image_size_type
                        if image_size_type == "width":
                            image = InlineImage(tpl, image_path, width=image_size)
                        elif image_size_type == "height":
                            image = InlineImage(tpl, image_path, height=image_size)
                        else:
                            raise ValueError(
                                f"image_size_type for key '{k}' must be 'width' or 'height'."
                            )
                    else:
                        image = InlineImage(tpl, image_path)
                    doc_data.data[k] = image
                elif description == DataTypes.ImagesPath:
                    image_paths = doc_data.data[k]
                    images = []
                    for idx, image_path in enumerate(image_paths):
                        if not Path(image_path).exists():
                            raise FileNotFoundError(
                                f"用于key '{k}' 的图片 '{Path(image_path).name}' 未找到！"
                            )
                        image_size = None
                        if not self.image_size and k in self.image_size.keys():
                            image_size = self.image_size[k]
                            if isinstance(image_size, list):
                                if len(image_size) != len(image_paths):
                                    raise ValueError(
                                        f"image_size for key '{k}' must be a list of the same length as image_paths."
                                    )
                            else:
                                image_size = [image_size] * len(image_paths)
                        if image_size:
                            if isinstance(self.image_size_type, dict):
                                image_size_type = self.image_size_type.get(k, "width")
                            else:
                                image_size_type = self.image_size_type
                            if image_size_type == "width":
                                image = InlineImage(
                                    tpl, image_path, width=image_size[idx]
                                )
                            elif image_size_type == "height":
                                image = InlineImage(
                                    tpl, image_path, height=image_size[idx]
                                )
                            else:
                                raise ValueError(
                                    f"image_size_type for key '{k}' must be 'width' or 'height'."
                                )
                        else:
                            image = InlineImage(tpl, image_path)
                        images.append(image)
                    doc_data.data[k] = images

            try:
                tpl.render(doc_data.data)
                # Check if output file exists and remove it to handle case sensitivity issues on Windows
                if os.path.exists(output_file):
                    os.remove(output_file)
                tpl.save(output_file)
                output_files.append(output_file)
            except Exception as e:
                raise Exception(
                    f"Failed to print the document '{template_file.name}'.\n{e}"
                )

        if len(output_files) == 0:
            self._ports_out["OutputFile"].data = None
            return None

        if len(output_files) == 1:
            output_file = output_files[0]
            if os.path.exists(output_file):
                if self.save_to_gdim:
                    gdim_file = upload_file_to_gdim(
                        user_token=token,
                        pid=proj_id,  # type: ignore
                        file_path=output_file,
                        upload_type="app_auto_report",
                        host=host,
                    )
                    self._ports_out["OutputFile"].data = gdim_file
                    return gdim_file
                else:
                    self._ports_out["OutputFile"].data = output_file
                    return output_file
            else:
                self._ports_out["OutputFile"].data = None
                return None

        final_output_files = []
        for idx, output_file in enumerate(output_files):
            if os.path.exists(output_file):
                if self.save_to_gdim:
                    gdim_file = upload_file_to_gdim(
                        user_token=token,
                        pid=proj_id,  # type: ignore
                        file_path=output_file,
                        upload_type="app_auto_report",
                        host=host,
                    )
                    final_output_files.append(gdim_file)
                else:
                    final_output_files.append(output_file)
        self._ports_out["OutputFile"].data = final_output_files
        return final_output_files


@module_decorator()
class ExcelPrinter(PipeModule):
    """Write the doc data to a .xlsx document."""

    InputDocData: PortReference[PortTypeHint.DocData]
    InputToken: PortReference[PortTypeHint.Token]
    OutputFile: PortReference[
        PortTypeHint.FilePath
        | PortTypeHint.FilesPath
        | PortTypeHint.GdimFile
        | PortTypeHint.GdimFiles
    ]

    _port_docs = {
        "InputDocData": "The doc data to be printed.",
        "InputToken": "The token of the user.",
        "OutputFile": "The files of the generated Excel document.",
    }

    def __init__(
        self,
        mname: str = "ExcelPrinter",
        auto_run: bool = True,
        doc_data: PortTypeHint.DocData | None = None,
        template: str | Path | dict | list[str | Path | dict] | None = None,
        output_path: str | Path | None = None,
        output_name: str | list[str] | None = None,
        pipeline_workspace_as_output_path: bool = True,
        include_table_titles: bool = False,
        apply_table_borders: bool = False,
        border_style: str = "thin",
        image_size_type: (
            Literal["width", "height"] | dict[str, Literal["width", "height"]]
        ) = "width",
        image_size: dict[str, float] | dict[str, list[float]] | None = None,
        save_to_gdim: bool = False,
        token: str | None = None,
        proj_id: int | str | None = None,
        host: str | None = None,
    ) -> None:
        """Initialize the ExcelPrinter object.

        Parameters
        ----------
        doc_data: PortTypeHint.DocData | None
            The doc data to be printed.

        template_path: str | dict | list[dict] | None
            The path to the template file.
            If None, the 'workspace' attribute of the pipeline will be used.
            If pipeline not found or its 'workspace' attribute is None, the current working directory will be used.
            If dict, it will be converted to a GdimMinIOFile object to get the template from minIO server.

        template_name: str | list[str] | None
            The name of the template file.
            If list[str], the ExcelPrinter will try to print all the with all the Excel templates.

        output_path: str | None
            It's valid only when `save_to_gdim` is False.
            The path to save the generated document.
            If None, the 'workspace' attribute of the pipeline will be used.
            If pipeline not found or its 'workspace' attribute is None, the current working directory will be used.

        output_name: str | list[str] | None
            The name of the generated document.
            If list[str], the ExcelPrinter will try to print all the with all the Excel templates with corresponding output names.
            If None, the name of the 'template' with a suffix '_printed' will be used.

        pipeline_workspace_as_output_path: bool, default: True
            It's valid only when `save_to_gdim` is False.
            If True, the 'workspace' attribute of the pipeline will be used to find the output_path first.
            If False, the module's attributes 'output_path' will be used to find the output_path first.

        include_table_titles: bool, default: False
            If True, table headers will be automatically generated from doc_keys_struct.
            If False, headers are assumed to be already in the template.

        apply_table_borders: bool, default: False
            If True, apply outer borders to all table regions.

        border_style: str, default: "thin"
            Border style for tables - "thin", "medium", "thick", "double", "dashed", "dashDot",
                "dashDotDot", "hair", "mediumDashed", "mediumDashDot", "mediumDashDotDot", "slantDashDot" etc.

        image_size_type: str | dict[str, str], default: "width"
            The type of the image size.
            If "width", the image size will be set to the width of the image (height calculated to maintain aspect ratio).
            If "height", the image size will be set to the height of the image (width calculated to maintain aspect ratio).
            If dict, different key can use different type of image size. If the key is not found, "width" will be used as default.

        image_size: dict[str, float] | dict[str, list[float]] | None, default: None
            The size of the image in meters.
            If dict[str, float], key is the key in the doc_data and the value is the size of the image in meters.
            If dict[str, list[float]], key is the key in the doc_data and the value is the size for each image in the list in meters.
            If None, the image size will be as same as its original size.
            Note: Aspect ratio is always preserved when resizing.

        save_to_gdim: bool, default: False
            If True, the generated document will be saved to the gdim file server.
            If False, the generated document will be saved to the local file system.

        token: str | None
            The token of the user.
            It's valid only when `save_to_gdim` is True.

        proj_id: int | str | None
            The id of the gdim project.
            It's valid only when `save_to_gdim` is True.

        host: str | None
            The host of the gdim platform.
        """
        super().__init__(mname, auto_run)

        if doc_data is not None:
            self.InputDocData = doc_data
        if token is not None:
            self.InputToken = (token, proj_id, host)

        self.template = template
        self.output_path = output_path
        self.output_name = output_name
        self.pipeline_workspace_as_output_path = pipeline_workspace_as_output_path
        self.save_to_gdim = save_to_gdim
        self.include_table_titles = include_table_titles
        self.apply_table_borders = apply_table_borders
        self.border_style = border_style
        self.image_size_type = image_size_type
        self.image_size = image_size

        self._title = "打印表格"
        self._description = "将DocData文档数据打印为.xlsx格式表格"

    def update_ui_schema(self, reset: bool = False) -> dict[str, UIAttributeSchema]:
        template_schema = FileAttributeSchema(
            title="模板文件", extension="xlsx", visible=False
        )
        return {
            "template": template_schema,
        }

    def execute(
        self,
    ) -> (
        PortTypeHint.FilePath
        | PortTypeHint.FilesPath
        | PortTypeHint.GdimFile
        | PortTypeHint.GdimFiles
        | None
    ):
        doc_data: PortTypeHint.DocData | None = self._ports_in["InputDocData"].data
        input_token: PortTypeHint.Token | None = self._ports_in["InputToken"].data

        if doc_data is None or self.template is None:
            self._ports_out["OutputFile"].data = None
            return None

        if self.save_to_gdim:
            input_token = self.get_token(input_token)
            if input_token is None:
                self._ports_out["OutputFile"].data = None
                return None
            token, proj_id, host = input_token
            # Download the template file to pipeline workspace or current working directory
            # Save the generated document at workspace before uploading to server
            output_path = None
            pipeline_workspace_as_output_path = True
        else:
            output_path = self.output_path
            pipeline_workspace_as_output_path = self.pipeline_workspace_as_output_path

        # Get the output path
        final_output_path = self.get_workspace(
            output_path, pipeline_workspace_as_output_path
        )
        final_output_path = Path(final_output_path)

        # Get the template files
        if isinstance(self.template, (str, Path, dict)):
            templates = [self.template]
        else:
            templates = self.template

        template_files = []
        for teample in templates:
            if isinstance(teample, str):
                template_files.append(Path(teample))
            elif isinstance(teample, Path):
                template_files.append(teample)
            elif isinstance(teample, dict):
                template = GdimMinIOFile(**teample)
                download_url = template.downloadUrl
                template_file = download_gdim_file(
                    download_url, file_path=final_output_path
                )
                template_files.append(template_file)

        if isinstance(self.output_name, str):
            output_names = [self.output_name]
        else:
            output_names = self.output_name

        output_files = []
        for idx, template_file in enumerate(template_files):
            if not os.path.exists(template_file):
                raise FileNotFoundError(f"Template file '{template_file}' not found.")
            output_name = output_names[idx]
            if output_name is None:
                output_name = template_file.stem + "_printed.xlsx"
            output_file = (
                final_output_path / output_name
            )  # Length consistency is not checked

            try:
                excel_template = ExcelTemplate(template_file)

                # Render the template with doc_data
                success = excel_template.render(
                    doc_data,
                    include_table_titles=self.include_table_titles,
                    apply_table_borders=self.apply_table_borders,
                    border_style=self.border_style,
                    image_size_type=self.image_size_type,  # type: ignore
                    image_size=self.image_size,
                )

                if not success:
                    raise Exception(
                        f"Failed to render Excel template '{template_file}'"
                    )

                # Check if output file exists and remove it to handle case sensitivity issues on Windows
                if os.path.exists(output_file):
                    os.remove(output_file)

                # Save the rendered Excel file
                excel_template.save(output_file)
                output_files.append(output_file)

            except Exception as e:
                raise Exception(
                    f"Failed to print the Excel document '{template_file.name}'.\n{e}"
                )

        if len(output_files) == 0:
            self._ports_out["OutputFile"].data = None
            return None

        if len(output_files) == 1:
            output_file = output_files[0]
            if os.path.exists(output_file):
                if self.save_to_gdim:
                    gdim_file = upload_file_to_gdim(
                        user_token=token,
                        pid=proj_id,  # type: ignore
                        file_path=output_file,
                        upload_type="app_auto_report",
                    )
                    self._ports_out["OutputFile"].data = gdim_file
                    return gdim_file
                else:
                    self._ports_out["OutputFile"].data = output_file
                    return output_file
            else:
                self._ports_out["OutputFile"].data = None
                return None

        final_output_files = []
        for idx, output_file in enumerate(output_files):
            if os.path.exists(output_file):
                if self.save_to_gdim:
                    gdim_file = upload_file_to_gdim(
                        user_token=token,
                        pid=proj_id,  # type: ignore
                        file_path=output_file,
                        upload_type="app_auto_report",
                        host=host,
                    )
                    final_output_files.append(gdim_file)
                else:
                    final_output_files.append(output_file)
        self._ports_out["OutputFile"].data = final_output_files
        return final_output_files


@module_decorator()
class GdimTableWriter(PipeModule):
    """Write the data of serveral tables in a Gdim project.
    Can only be used for GDIM"""

    InputToken: PortReference[PortTypeHint.Token]
    InputData: PortReference[
        PortTypeHint.SingleResult
        | PortTypeHint.TableData
        | PortTypeHint.TableCollection
        | PortTypeHint.GeneralTable
        | PortTypeHint.NumberTable
    ]

    _port_docs = {
        "InputToken": "The token of the user.",
        "InputData": "The data to be written to the gdim tables.",
    }

    def __init__(
        self,
        mname: str | None = "GdimTableWriter",
        auto_run: bool = True,
        data: (
            PortTypeHint.SingleResult
            | PortTypeHint.TableData
            | PortTypeHint.TableCollection
            | PortTypeHint.GeneralTable
            | PortTypeHint.NumberTable
            | None
        ) = None,
        table_names: str | list[str] | None = None,
        fields_mapping: dict[str, str] | dict[str, dict[str, str]] | None = None,
        raise_error: bool = True,
        strict_datetime_validation: bool = True,
        validation_level: Literal["fast", "full"] = "full",
        token: str | None = None,
        proj_id: int | str | None = None,
        host: str | None = None,
    ) -> None:
        """Initialize the GdimTableWriter object.

        Parameters
        ----------
        data: PortTypeHint.SingleResult | PortTypeHint.TableData | PortTypeHint.TableCollection | PortTypeHint.GeneralTable | PortTypeHint.NumberTable | None
            The data to be written to the gdim tables.

        table_names: str | list[str] | None, default: None
            The name or title of the table to be written to. table name will be checked at first, if not found, the title will be checked.
            If None, for `TableData` and `TableCollection`, the name of the table in the table data will used. Otherwise, the table name is neccessary.
            If not None, for `TableData` and `TableCollection`, the table name in the data will be ignored.
            If `list`, the data should be `TableCollection` and has the same length as the data.

        fields_mapping: dict[str, str] | dict[str, dict[str, str]] | None, default: None
            The mapping of the fields in the data to the fields in the gdim tables.
            If dict[str, str], the key can be either the column name or field title in the source data,
            and the value can be either the field name or field title in the GDIM tables.
            If dict[str, dict[str, str]], the key is the table name or title, the value is the corresponding
            field mapping where keys can be source column names or field titles, and values can be
            GDIM field names or field titles.
            If the mapping for a field in the data is not defined, the original field name or title will be used.
            If None, the fields in the data will be written to the gdim tables without any mapping using the original field names or titles.

            Examples:
            - {"source_col_name": "target_field_name"}  # Column name to field name
            - {"Source Title": "Target Title"}          # Field title to field title
            - {"source_col": "Target Title"}            # Column name to field title
            - {"Source Title": "target_field"}          # Field title to field name

        raise_error: bool, default: True
            If True, the module will raise an error if the write operation is not successful when writing a table.
                The module will be regarded as exectued only if all the write operations are successful.
            If False, the module will return the error message as a warning and continue to write the next table.
                The module will be regarded as exectued as long as at least one of the write operations is successful.

        strict_datetime_validation: bool, default: True
            If True, use regex + datetime parsing validation (slower, more accurate).
            If False, use regex-only validation (faster, slightly less accurate).
            Default is True for data quality.

        validation_level: Literal["fast", "full"], default: "full"
            Validation complexity level:
            - "fast": Only vectorized operations + basic type checks (fastest)
            - "full": Complete validation including complex structure checks (slower, most accurate)
            Default is "full" for maximum data quality.

        token: str | None
            The token of the user.

        proj_id: int | str | None
            The id of the gdim project.

        host: str | None
            The host of the gdim platform.

        Ports
        -----
        InputToken: PortTypeHint.Token
            The token of the user.

        InputData: PortTypeHint.SingleResult | PortTypeHint.TableData | PortTypeHint.TableCollection | PortTypeHint.GeneralTable | PortTypeHint.NumberTable
            The data to be written to the gdim tables.
        """
        super().__init__(mname=mname, auto_run=auto_run)

        if data is not None:
            self.InputData = data
        if token is not None or proj_id is not None:
            self.InputToken = (token, proj_id, host)

        self.table_names = table_names
        self.fields_mapping = fields_mapping
        self.raise_error = raise_error
        self.strict_datetime_validation = strict_datetime_validation
        self.validation_level = validation_level

        self._title = "写入GDIM表格"
        self._description = "将数据写入GDIM表格数据库中"

    def execute(self) -> None | bool:
        data: (
            PortTypeHint.SingleResult
            | PortTypeHint.TableData
            | PortTypeHint.TableCollection
            | PortTypeHint.GeneralTable
            | PortTypeHint.NumberTable
            | None
        ) = self._ports_in["InputData"].data
        input_token = self._ports_in["InputToken"].data

        input_token = self.get_token(input_token)
        if input_token is None or data is None:
            return None

        token, proj_id, host = input_token

        error_msg = write_table_data(
            user_token=token,
            proj_id=proj_id,
            data=data,
            raise_error=self.raise_error,
            table_names=self.table_names,
            fields_mapping=self.fields_mapping,
            strict_datetime_validation=self.strict_datetime_validation,
            validation_level=self.validation_level,  # type: ignore
            host=host,
        )
        if len(error_msg) == 0:
            return True
        else:
            data_saved = False
            for table_title, error_msg in error_msg.items():
                if "没有可写入GDIM数据库的数据" in error_msg:
                    warnings.warn(
                        f"表 '{table_title}' 写入GDIM数据库失败: {error_msg}",
                        GDIDataQualityWarning,
                    )
                else:
                    warnings.warn(
                        f"表 '{table_title}' 部分数据写入GDIM数据库失败: {error_msg}",
                        GDIDataQualityWarning,
                    )
                    data_saved = True

            if isinstance(data, TableCollection):
                if len(error_msg) < len(data):
                    data_saved = True

            if data_saved:
                return True
            else:
                return None


@module_decorator()
class MdbWriter(PipeModule):
    """Write the data into a .mdb file."""

    InputToken: PortReference[PortTypeHint.Token]
    InputTables: PortReference[PortTypeHint.TableCollection]
    OutputGdimFile: PortReference[PortTypeHint.GdimFile]

    _port_docs = {
        "InputToken": "The token of the user.",
        "InputTables": "The data to be written to the mdb.",
        "OutputGdimFile": "The mdb file download url.",
    }

    def __init__(
        self,
        mname: str | None = "MdbWriter",
        auto_run: bool = True,
        tables: PortTypeHint.TableCollection | None = None,
        mdb_template_file: str | dict | None = None,
        token: str | None = None,
        proj_id: int | str | None = None,
        host: str | None = None,
    ) -> None:
        """Initialize the MdbWriter object.

        Parameters
        ----------
        data: PortTypeHint.TableCollection | None
            The data to be written to the mdb.

        mdb_template_file: str | dict | None
            The mdb template file.
            If it is a dict, it will be converted to a GdimMinIOFile object.
            If it is a str, it will be used as the path to the mdb template file.

        token: str | None
            The token of the user.

        proj_id: int | str | None
            The id of the gdim project.

        host: str | None
            The host of the gdim platform.
        """
        super().__init__(mname=mname, auto_run=auto_run)

        if tables is not None:
            self.InputTables = tables
        if token is not None:
            self.InputToken = (token, proj_id, host)

        self.mdb_template_file = mdb_template_file

        self._title = "生成mdb文件"
        self._description = "将数据写入mdb文件中，并生成mdb文件"

    def update_ui_schema(self, reset: bool = False) -> dict[str, UIAttributeSchema]:
        return {
            "mdb_template_file": FileAttributeSchema(
                title="mdb文件", extension="mdb", upload=False
            )
        }

    def execute(self) -> None | PortTypeHint.GdimFile:
        tables_gdim: PortTypeHint.TableCollection | None = self._ports_in[
            "InputTables"
        ].data
        input_token: PortTypeHint.Token | None = self._ports_in["InputToken"].data

        input_token = self.get_token(input_token)
        if input_token is None or tables_gdim is None or self.mdb_template_file is None:
            self._ports_out["OutputGdimFile"].data = None
            return None

        token, proj_id, host = input_token

        # 将tables上传至服务器
        tables_dic = tables_gdim.to_dict(serializable=True)  # 将表数据转为json格式
        json_bytes = json.dumps(tables_dic, ensure_ascii=False).encode(
            "utf-8"
        )  # 将json数据转为bytes
        tables_response = upload_file_to_gdim(
            user_token=token, pid=proj_id, data=json_bytes, filename="mdb_data.json"
        )
        json_fileId = tables_response.fileId  # 获取json文件的fileID

        # 判断mdb_template是否是字典型数据
        if isinstance(self.mdb_template_file, dict):
            mdb_template_file = GdimMinIOFile.model_validate(self.mdb_template_file)
            mdb_fileId = mdb_template_file.fileId
        elif isinstance(self.mdb_template_file, str):  # 使用本地的mdb模板写入mdb
            mdb_template_file = Path(self.mdb_template_file)
            if not mdb_template_file.exists():
                raise FileNotFoundError(f"mdb模板文件 '{mdb_template_file}' 未找到！")
            mdb_response = upload_file_to_gdim(
                user_token=token, pid=proj_id, file_path=mdb_template_file
            )
            mdb_fileId = mdb_response.fileId  # 获取mdb的fileID
        else:
            raise ValueError("mdb_template_file must be a dict or a str.")

        # 调用接口写入mdb
        new_mdb_gdimFile = write_mdb_from_minIO_file(
            user_token=token,
            pid=proj_id,
            mdbFileId=mdb_fileId,
            jsonFileId=json_fileId,
            host=host,
        )

        if not new_mdb_gdimFile.success:
            warnings.warn(f"写入mdb文件失败！", GDIDataQualityWarning)
            self._ports_out["OutputGdimFile"].data = None
            return None
        else:
            self._ports_out["OutputGdimFile"].data = new_mdb_gdimFile
            return new_mdb_gdimFile


@module_decorator()
class ExportGdimTables(PipeModule):
    """Export all tables' data of a gdim project in specifc format."""

    InputTables: PortReference[PortTypeHint.TableCollection | PortTypeHint.TableData]
    InputTemplateId: PortReference[PortTypeHint.SingleResult]
    InputToken: PortReference[PortTypeHint.Token]
    OutputFile: PortReference[PortTypeHint.FilePath | PortTypeHint.GdimFile]

    _port_docs = {
        "InputTables": "The tables to be saved.",
        "InputTemplateId": "The template id stored in the project information.",
        "InputToken": "The token of the user.",
        "OutputFile": "The saved file.",
    }

    def __init__(
        self,
        mname: str = "ExportGdimTables",
        auto_run: bool = True,
        tables: PortTypeHint.TableCollection | PortTypeHint.TableData | None = None,
        template_id: PortTypeHint.SingleResult | None = None,
        format: Literal["gtb"] = "gtb",
        output_path: str | None = None,
        output_name: str = "gdim_tables",
        pipeline_workspace_as_output_path: bool = True,
        save_to_gdim: bool = False,
        token: str | None = None,
        proj_id: int | str | None = None,
        host: str | None = None,
    ) -> None:
        """Initialize the ExportGdimTables object.

        Parameters
        ----------
        format: Literal["gtb"], default: "gtb"
            The format of the saved file.
            If "gtb", the saved file will be saved as a gtb file.

        output_path: str | None
            The path to save the saved file.
            If None, the 'workspace' attribute of the pipeline will be used.
            If None and if pipeline not found or its 'workspace' attribute is None, the current working directory will be used.

        output_name: str, default: "gdim_tables"
            The name of the saved file.

        pipeline_workspace_as_output_path: bool, default: True
            If True, the 'workspace' attribute of the pipeline will be used to find the output_path first.
            If False, the module's attributes 'output_path' will be used to find the output_path first.

        save_to_gdim: bool, default: False
            If True, the saved file will be saved to the gdim file server.
            If False, the saved file will be saved to the local file system.

        token: str | None
            The token of the user.
            It's valid only when `save_to_gdim` is True.

        proj_id: int | str | None
            The id of the gdim project.
            It's valid only when `save_to_gdim` is True.

        host: str | None
            The host of the gdim platform.

        Ports
        -----
        InputTables: PortTypeHint.TableCollection | PortTypeHint.TableData
            The tables to be saved.

        InputTemplateId: PortTypeHint.SingleResult
            The template id stored in the project information.

        InputToken: PortTypeHint.Token
            The token of the user.

        OutputFile: PortTypeHint.FilePath | PortTypeHint.GdimFile
            The saved file.
        """
        super().__init__(mname=mname, auto_run=auto_run)

        if tables is not None:
            self.InputTables = tables
        if template_id is not None:
            self.InputTemplateId = template_id
        if token is not None or proj_id is not None:
            self.InputToken = (token, proj_id)

        self.format = format
        self.output_path = output_path
        self.output_name = output_name
        self.pipeline_workspace_as_output_path = pipeline_workspace_as_output_path
        self.save_to_gdim = save_to_gdim

        self._title = "导出GDIM表格"
        self._description = "将GDIM表格数据导出为指定格式"

    def execute(self) -> PortTypeHint.FilePath | PortTypeHint.GdimFile | None:
        tables: PortTypeHint.TableCollection | PortTypeHint.TableData | None = (
            self._ports_in["InputTables"].data
        )
        template_id_result: PortTypeHint.SingleResult | None = self._ports_in[
            "InputTemplateId"
        ].data
        input_token: PortTypeHint.Token | None = self._ports_in["InputToken"].data

        # Check if we have required data
        if tables is None or template_id_result is None:
            self._ports_out["OutputFile"].data = None
            return None

        # Handle save_to_gdim logic similar to DocPrinter
        if self.save_to_gdim:
            input_token = self.get_token(input_token)
            if input_token is None:
                self._ports_out["OutputFile"].data = None
                return None
            token, proj_id, host = input_token
            # Save to local first, then upload to gdim
            output_path = None
            pipeline_workspace_as_output_path = True
        else:
            output_path = self.output_path
            pipeline_workspace_as_output_path = self.pipeline_workspace_as_output_path

        # Determine final output path (similar to DocPrinter logic)
        final_output_path = self.get_workspace(
            output_path, pipeline_workspace_as_output_path
        )
        final_output_path = Path(final_output_path)

        # Create the final GTB file path
        gtb_file_path = final_output_path / f"{self.output_name}.gtb"

        # Create a temporary directory for building the GTB content
        temp_dir = final_output_path / f"{self.output_name}_temp"
        temp_dir.mkdir(exist_ok=True)

        try:
            # Handle different table input types
            if isinstance(tables, TableCollection):
                table_list = tables
            else:
                table_list = [tables]

            # Save each table as CSV
            for table in table_list:
                if table.name:
                    csv_filename = f"{table.name}.csv"
                else:
                    raise ValueError("'name' is required for each table.")

                csv_file_path = temp_dir / csv_filename

                # Save table to CSV with headers
                table.to_csv(csv_file_path, index=False, encoding="utf-8")

            # Save template ID if provided
            template_id = template_id_result["dataTemplateId"].value
            template_id_file = temp_dir / "templateId"
            with open(template_id_file, "w", encoding="utf-8") as f:
                f.write(str(template_id))

            # Create GTB zip file
            with zipfile.ZipFile(gtb_file_path, "w", zipfile.ZIP_DEFLATED) as zipf:
                for file_path in temp_dir.iterdir():
                    if file_path.is_file():
                        zipf.write(file_path, file_path.name)

            # Clean up temporary directory
            shutil.rmtree(temp_dir)

            # Handle saving/uploading
            if self.save_to_gdim:
                gdim_file = upload_file_to_gdim(
                    user_token=token,
                    pid=proj_id,
                    file_path=gtb_file_path,
                    upload_type="temp",
                    host=host,
                )
                self._ports_out["OutputFile"].data = gdim_file
                return gdim_file
            else:
                self._ports_out["OutputFile"].data = gtb_file_path
                return gtb_file_path

        except Exception as e:
            # Clean up temporary directory on error
            if temp_dir.exists():
                shutil.rmtree(temp_dir)
            raise Exception(f"Failed to save GDIM tables as .{self.format} file: {e}")


#####################
# === GDIM APP ===
#####################
@module_decorator()
class UpdateGdimAppProjectInfo(PipeModule):
    """Update the data of Project Information APP."""

    InputToken: PortReference[PortTypeHint.Token]
    InputSingleResult: PortReference[PortTypeHint.SingleResult]
    OutputSingleResult: PortReference[PortTypeHint.SingleResult]

    _port_docs = {
        "InputSingleResult": "The data of the project information.",
        "OutputSingleResult": "The data of the project information.",
    }

    def __init__(
        self,
        mname: str | None = "UpdateGdimAppProjectInfo",
        auto_run: bool = True,
        update_data: PortTypeHint.SingleResult | None = None,
        token: str | None = None,
        proj_id: int | str | None = None,
        host: str | None = None,
    ) -> None:
        """Initialize the UpdateGdimAppProjectInfo object.

        Parameters
        ----------
        update_data : PortTypeHint.SingleResult | None
            The data of the project information to update.
            Coordinate system information can also in inlucded in the `update_data`.
            If key of a field is not in the data, the value of the field will be kept unchanged.
            For projectAddress, here are the keys:
                - projectAddressProvince
                - projectAddressCity
                - projectAddressDistrict
                - projectAddressDetail

        token : str | None
            The token of the user.

        proj_id : int | str | None
            The id of the project.

        host : str | None
            The host of the gdim platform.

        Note
        ----
        For cutomer fields in the project information, both field name or field title is accepted.
        """
        super().__init__(mname=mname, auto_run=auto_run)

        if update_data is not None:
            self.InputData = update_data

        if token is not None:
            self.InputToken = (token, proj_id, host)  # type: ignore

        self._title = "更新GDIM项目信息"
        self._description = "更新GDIM项目信息，包括项目信息和坐标系统"

    def execute(self) -> PortTypeHint.SingleResult | None:
        input_token: PortTypeHint.Token | None = self._ports_in["InputToken"].data
        update_data: PortTypeHint.SingleResult | None = self._ports_in[
            "InputSingleResult"
        ].data

        input_token = self.get_token(input_token)
        if input_token is None or update_data is None:
            self._ports_out["OutputSingleResult"].data = None
            return None

        token, proj_id, host = input_token

        project_info = get_project_info(pid=proj_id, token=token, host=host)
        project_info_structure = get_project_info_structure(
            pid=proj_id, token=token, host=host
        )
        fields_title_to_name = project_info_structure.title_to_name

        for name, value in update_data.name_values.items():
            # Process reserved fields
            if name == "projectName":
                project_info.projectName = value
            # Process coordinate system information
            elif name == "coordinateSystem":
                project_info.coordinateSystem = value
            elif name == "zoneMethod":
                project_info.zoneMethod = value
            elif name == "zoneNumber":
                project_info.zoneNumber = value
            elif name == "centralMeridian":
                project_info.centralMeridian = value
            elif name == "refPointLongitude":
                project_info.refPointLongitude = value
            elif name == "refPointLatitude":
                project_info.refPointLatitude = value
            elif name == "refPointX":
                project_info.refPointX = value
            elif name == "refPointY":
                project_info.refPointY = value
            elif name == "elevationDatum":
                project_info.elevationDatum = value
            elif name == "yAxisDirection":
                project_info.yAxisDirection = value
            # Process project address
            elif name == "projectAddressProvince":
                project_info.projectAddress.province = value
            elif name == "projectAddressCity":
                project_info.projectAddress.city = value
            elif name == "projectAddressDistrict":
                project_info.projectAddress.district = value
            elif name == "projectAddressDetail":
                project_info.projectAddress.address = value
            # Process customer fields
            else:
                if name in fields_title_to_name.values():
                    project_info.customFields[name] = value
                else:
                    title = update_data[name].title
                    if title in fields_title_to_name.keys():
                        project_info.customFields[fields_title_to_name[title]] = value
                    else:
                        warnings.warn(
                            f"Field '{name}' is not in the project information structure, skipped."
                        )

        update_project_info(token=token, data=project_info, host=host)

        single_result = project_info.to_single_result()
        self._ports_out["OutputSingleResult"].data = single_result
        return single_result


@module_decorator()
class BorePlanDrawWriter(PipeModule):
    """Write the input table collection to a .gsc file which can be used for bores plan drawing in KCAD."""

    InputTables: PortReference[PortTypeHint.TableCollection]
    InputProjectInfo: PortReference[PortTypeHint.SingleResult]
    InputCoordinateSystem: PortReference[PortTypeHint.CoordinateSystem]
    InputToken: PortReference[PortTypeHint.Token]
    OutputFile: PortReference[PortTypeHint.FilePath | PortTypeHint.GdimFile]

    _port_docs = {
        "InputTables": "The input table collection containing bore and section line data.",
        "InputProjectInfo": "The input project info.",
        "InputCoordinateSystem": "The input coordinate system.",
        "InputToken": "The input token.",
        "OutputFile": "The output gsc file.",
    }

    def __init__(
        self,
        mname: str | None = "ConvertToBorePlanForCad",
        auto_run: bool = True,
        tables: PortTypeHint.TableCollection | None = None,
        proj_info: PortTypeHint.SingleResult | None = None,
        coordinate_system: PortTypeHint.SingleResult | None = None,
        name_maps: dict[str, dict[str, str]] | None = None,
        bore_types_map: dict[str, BoreTypes] | None = None,
        proj_info_name_map: dict[str, str] | None = None,
        output_path: str | Path | None = None,
        gsc_file_name: str = "bore_plan_for_cad_draw.gsc",
        pipeline_workspace_as_output_path: bool = True,
        save_to_gdim: bool = False,
        token: str | None = None,
        proj_id: int | str | None = None,
        host: str | None = None,
    ) -> None:
        """Initialize a ConvertToBorePlanForCad object.

        Parameters
        ----------
        tables: PortTypeHint.TableCollection | None, default: None
            The input table collection containing bore and section line data.
            Here are the table names:
            bore_table: 钻孔一览表
            section_line_table: 剖面线表

        proj_info: PortTypeHint.SingleResult | None, default: None
            The input single result containing project info data.

        name_maps: dict[str, dict[str, str]] | None, default: None
            Mapping of table names and field names between gsc file and the input tables.
            Key is the field_name in the gsc file and value is the table_name or table_title and field_name or field_title in the input tables.
            Example:
            {
                "table_names": {
                    "bore_table": "actual_bore_table_name",
                    "layer_table": "actual_layer_table_name",
                    "section_line_table": "actual_section_line_table_name",
                    ...
                },
                "field_names": {
                    "bore_table": {
                        "bore_num": "actual_bore_num_field",
                        "x": "actual_x_field",
                        "y": "actual_y_field",
                        "bore_type": "actual_bore_type_field",
                        "top": "actual_top_field",
                        "steady_water_depth": "actual_steady_water_depth_field",
                    },
                    "layer_table": {
                        "bore_num": "actual_bore_num_field",
                        "depths": "actual_depths_field"
                    },
                    "section_line_table": {
                        "name": "actual_name_field",
                        "bores": "actual_bores_field",
                    }
                }
            }

        bore_types_map: dict[str, BoreTypes] | None, default: None
            Mapping from string bore type names to BoreTypes enum.
            For example: {"鉴别孔": BoreTypes.IdentificationBore, "取土试样钻孔": BoreTypes.SoilSamplingBore}
            If the map is not specified, will try to compare to the names and titles of BoreTypes automatically.

        proj_info_name_map: dict[str, str] | None, default: None
            The mapping of the field_name of the `geoProfiles.ProjectInfo` and field_name or field_title of the single result.
            The key is the field_name in `geoProfiles.ProjectInfo`.
            The value is the field_name or field_title from the single result,
            If the mapping of a key is not provided,
            the function will try to find the field_name of single result which is same as the `geoProfiles.ProjectInfo` field_name automatically.

        output_path: str | Path | None, default: None
            The path to save the output gsc file.
            If None, the module will try to use the workspace from the pipe line, else use the current working directory.

        gsc_file_name: str, default: "bore_plan_for_cad_draw.gsc"
            The name of the gsc file. If None, the gsc file will not be saved.

        pipeline_workspace_as_output_path: bool, default: True
            If True, the 'workspace' attribute of the pipeline will be used to find the output_path at first.
            If False, the module's attributes 'output_path' will be used to find the output_path at first.

        save_to_gdim: bool, default: False
            If True, the generated .gsc file will be saved to the gdim file server and `token`, `proj_id` should be input.
            If False, the generated .gsc file will be saved to the local file system.

        token: str | None
            The token of the user.
            It's valid only when `save_to_gdim` is True.

        proj_id: int | str | None
            The id of the gdim project.
            It's valid only when `save_to_gdim` is True.

        host: str | None
            The host of the gdim platform.
        """
        super().__init__(mname=mname, auto_run=auto_run)

        if tables is not None:
            self.InputTables = tables
        if proj_info is not None:
            self.InputProjectInfo = proj_info
        if coordinate_system is not None:
            self.InputCoordinateSystem = coordinate_system
        if token is not None or proj_id is not None:
            self.InputToken = (token, proj_id, host)

        self.name_maps = name_maps or {}
        self.bore_types_map = bore_types_map
        self.proj_info_name_map = proj_info_name_map
        self.output_path = output_path
        self.gsc_file_name = gsc_file_name
        self.pipeline_workspace_as_output_path = pipeline_workspace_as_output_path
        self.save_to_gdim = save_to_gdim

        self._title = "钻孔平面图数据"
        self._description = "将输入的钻孔和剖面线数据写入到 .gsc 文件中，可以用于在 KCAD 中进行钻孔平面图的绘制。"

    def update_ui_schema(self, reset: bool = False) -> dict[str, UIAttributeSchema]:
        return {
            "gsc_file_name": StringAttributeSchema(
                title="GSC文件名",
                default="bore_for_cad_draw.gsc",
            ),
        }

    def execute(self) -> PortTypeHint.FilePath | PortTypeHint.GdimFile | None:
        tables: PortTypeHint.TableCollection | None = self._ports_in["InputTables"].data
        proj_info_data: PortTypeHint.SingleResult | None = self._ports_in[
            "InputProjectInfo"
        ].data
        coordinate_system_data: PortTypeHint.CoordinateSystem | None = self._ports_in[
            "InputCoordinateSystem"
        ].data
        input_token: PortTypeHint.Token | None = self._ports_in["InputToken"].data

        if not tables or proj_info_data is None or coordinate_system_data is None:
            self._ports_out["OutputFile"].data = None
            return None

        if self.save_to_gdim:
            input_token = self.get_token(input_token)
            if input_token is None:
                self._ports_out["OutputFile"].data = None
                return None
            token, proj_id, host = input_token

        # Convert project info
        project_info = convert_to_project_info(proj_info_data, self.proj_info_name_map)

        # Convert to section bores and section lines
        section_bores, section_lines = convert_to_plan_bores(
            tables=tables, name_maps=self.name_maps, bore_types_map=self.bore_types_map
        )

        # Create BoreForPlanDraw object
        y_direction = coordinate_system_data.yAxisDirection
        if y_direction == "north":
            y_direction = "N"
        elif y_direction == "east":
            y_direction = "E"
        bore_for_plan_draw = BoreForPlanDraw(
            project_infos=project_info,
            bores=section_bores,
            scection_lines=section_lines,
            y_direction=y_direction,
        )

        # Save .gsc files
        if self.save_to_gdim:
            output_path = None
            pipeline_workspace_as_output_path = True
        else:
            output_path = self.output_path
            pipeline_workspace_as_output_path = self.pipeline_workspace_as_output_path

        final_output_path = self.get_workspace(
            output_path, pipeline_workspace_as_output_path
        )
        final_output_path = Path(final_output_path)

        output_gsc_file = final_output_path / self.gsc_file_name
        with zipfile.ZipFile(output_gsc_file, "w", zipfile.ZIP_DEFLATED) as zipf:
            zipf.writestr("bore_plan.json", bore_for_plan_draw.model_dump_json())

        if output_gsc_file.exists():
            if self.save_to_gdim:
                gdim_file = upload_file_to_gdim(
                    user_token=token,
                    pid=proj_id,
                    file_path=output_gsc_file,
                    upload_type="temp",
                    host=host,
                )
                self._ports_out["OutputFile"].data = gdim_file
                return gdim_file
            else:
                self._ports_out["OutputFile"].data = str(output_gsc_file)
                return output_gsc_file
        else:
            self._ports_out["OutputFile"].data = None
            return None


@module_decorator()
class BoreLogDrawWriter(PipeModule):
    """Write the input table collection to a .gsc file which can be used for bores log drawing in KCAD."""

    InputTables: PortReference[PortTypeHint.TableCollection]
    InputGeoParamsTable: PortReference[PortTypeHint.TableData]
    InputProjectInfo: PortReference[PortTypeHint.SingleResult]
    InputToken: PortReference[PortTypeHint.Token]
    OutputFile: PortReference[PortTypeHint.FilePath | PortTypeHint.GdimFile]

    _port_docs = {
        "InputTables": "The input table collection containing bore, layer, and lab test data etc.",
        "InputGeoParamsTable": "The input geo params table.",
        "InputProjectInfo": "The input project info.",
        "InputToken": "The input token.",
        "OutputFile": "The output gsc file.",
    }

    def __init__(
        self,
        mname: str | None = "ConvertToBoreForCadDraw",
        auto_run: bool = True,
        tables: PortTypeHint.TableCollection | None = None,
        geo_params_table: PortTypeHint.TableData | None = None,
        proj_info: PortTypeHint.SingleResult | None = None,
        name_maps: dict[str, dict[str, str]] | None = None,
        selected_bores: list[str] | None = None,
        drawing_scales: dict[str, int] | list[dict[str, str | int]] | int | None = None,
        proj_info_name_map: dict[str, str] | None = None,
        sample_types_map: dict[str, int] | None = None,
        output_path: str | Path | None = None,
        gsc_file_name: str = "bore_for_cad_draw.gsc",
        pipeline_workspace_as_output_path: bool = True,
        save_to_gdim: bool = False,
        token: str | None = None,
        proj_id: int | str | None = None,
        host: str | None = None,
    ) -> None:
        """Initialize a ConvertToBoreForCadDraw object.

        Parameters
        ----------
        tables: PortTypeHint.TableCollection | None, default: None
            The input table collection containing bore, layer, and test data etc.
            Here are the table names:
            bore_table: 钻孔一览表
            layer_table: 地层表
            materials_table: 标准地层表
            spt_table: 标贯表
            cpt_table: 双桥静探表
            dpt_table: 动探表
            wave_table: 波速表
            samples_table: 取样表
            soils_test_table: 常规试验表

        geo_params_table: PortTypeHint.TableData | None, default: None
            The input table data containing geo parameters data from Gdim App '岩土参数建议值表'.
            geo_parameters_table: 岩土参数建议值表

        proj_info: PortTypeHint.SingleResult | None, default: None
            The input single result containing project info data.

        name_maps: dict[str, dict[str, str]] | None, default: None
            Mapping of table names and field names between gsc file and the input tables.
            Key is the field_name in the gsc file and value is the table_name or table_title and field_name or field_title in the input tables.
            Example:
            {
                "table_names": {
                    "bore_table": "actual_bore_table_name",
                    "layer_table": "actual_layer_table_name",
                    ...
                },
                "field_names": {
                    "bore_table": {
                        "bore_num": "actual_bore_num_field",
                        "x": "actual_x_field",
                        ...
                    },
                    ...
                }
            }

        selected_bores: list[str] | None, default: None
            List of bores (input bore numbers) to convert. If None, all bores will be converted.

        drawing_scales: dict[str, int] | int | None, default: None
            Drawing scales for bores.
            If int, it will be used for all bores.
            If dict, the key is bore number, the value is drawing scale.
            If list, use the following format -
                `[{"bore_num": "num_1", "drawing_scales": 200}, {"bore_num": "num_2", "drawing_scales": 300}]`

        proj_info_name_map: dict[str, str] | None, default: None
            The mapping of the field_name of the `geoProfiles.ProjectInfo` and field_name or field_title of the single result.
            The key is the field_name in `geoProfiles.ProjectInfo`.
            The value is the field_name or field_title from the single result,
            If the mapping of a key is not provided,
            the function will try to find the field_name of single result which is same as the `geoProfiles.ProjectInfo` field_name automatically.

        sample_types_map: dict[str, int] | None, default: None
            Mapping from string sample type names to integer codes.
            For example: {"厚壁原状": 0, "薄壁原状": 0, "扰动样": 1, "岩石样": 2, "水样": 3}

        output_path: str | Path | None, default: None
            The path to save the output gsc file.
            If None, the module will try to use the workspace from the pipeline, else use the current working directory.

        gsc_file_name: str, default: "bore_for_cad_draw.gsc"
            The name of the gsc file.

        pipeline_workspace_as_output_path: bool, default: True
            If True, the 'workspace' attribute of the pipeline will be used to find the output_path at first.
            If False, the module's attributes 'output_path' will be used to find the output_path at first.

        save_to_gdim: bool, default: False
            If True, the generated .gsc file will be saved to the gdim file server.
            If False, the generated .gsc file will be saved to the local file system.

        token: str | None
            The token of the user.
            It's valid only when `save_to_gdim` is True.

        proj_id: int | str | None
            The id of the gdim project.
            It's valid only when `save_to_gdim` is True.

        host: str | None
            The host of the gdim platform.
        """
        super().__init__(mname=mname, auto_run=auto_run)

        if tables is not None:
            self.InputTables = tables
        if geo_params_table is not None:
            self.InputGeoParamsTable = geo_params_table
        if proj_info is not None:
            self.InputProjectInfo = proj_info
        if token is not None or proj_id is not None:
            self.InputToken = (token, proj_id, host)

        self.name_maps = name_maps or {}
        self.selected_bores = selected_bores
        self.drawing_scales = drawing_scales
        self.proj_info_name_map = proj_info_name_map
        self.sample_types_map = sample_types_map
        self.output_path = output_path
        self.gsc_file_name = gsc_file_name
        self.pipeline_workspace_as_output_path = pipeline_workspace_as_output_path
        self.save_to_gdim = save_to_gdim

        self._title = "钻孔柱状图数据"
        self._description = "将输入的钻孔和柱状图数据写入到 .gsc 文件中，可以用于在 KCAD 中进行钻孔柱状图的绘制。"

    def update_ui_schema(self, reset: bool = False) -> dict[str, UIAttributeSchema]:
        tables: PortTypeHint.TableCollection | None = self._ports_in["InputTables"].data

        bore_num_selections = None
        if tables is not None:
            # Try to get bore numbers from bore table
            bore_table_name = self.name_maps.get("table_names", {}).get(
                "bore_table", "bore_table"
            )
            if bore_table_name in tables:
                bore_table = tables.get_table(bore_table_name)
                bore_num_field = (
                    self.name_maps.get("field_names", {})
                    .get("bore_table", {})
                    .get("bore_num", "bore_num")
                )
                if (
                    bore_num_field in bore_table.columns
                    or bore_num_field in bore_table.field_titles
                ):
                    bore_num_selections = bore_table[bore_num_field].unique().tolist()

        selected_bores_schema = ArrayAttributeSchema(
            title="选择钻孔",
            selections=bore_num_selections,
            items=StringAttributeSchema(),
            depends_on="InputTables",
        )
        selected_bores_schema.widget = "select"

        bore_num_column = StringAttributeSchema(title="钻孔编号", readonly=True)
        drawing_scales_column = IntegerAttributeSchema(
            title="柱状图比例", default=200, minimum=1, maximum=1000000
        )
        if self.selected_bores is None:
            min_rows = 0
            max_rows = 0
        else:
            bore_num_column.default = [
                selected_bore for selected_bore in self.selected_bores
            ]
            min_rows = len(self.selected_bores)
            max_rows = min_rows

        widget_attributes = WidgetAttribute(table_enable_set_all_rows=True)
        drawing_scales_schema = TableAttributeSchema(
            title="设置柱状图比例",
            columns=[bore_num_column, drawing_scales_column],
            columns_name=["bore_num", "drawing_scales"],
            min_rows=min_rows,
            max_rows=max_rows,
            widget_attributes=widget_attributes,
            depends_on="selected_bores",
        )

        gsc_file_name_schema = StringAttributeSchema(
            title="GSC文件名",
            default="bore_for_cad_draw.gsc",
        )

        return {
            "selected_bores": selected_bores_schema,
            "drawing_scales": drawing_scales_schema,
            "gsc_file_name": gsc_file_name_schema,
        }

    def set_cal_params(self, reset: bool = True) -> dict[str, RangeModel]:
        tables: PortTypeHint.TableCollection | None = self._ports_in["InputTables"].data
        if tables is None:
            bore_num_choices = None
        else:
            bore_num_choices = None
            # Try to get bore numbers from bore table
            bore_table_name = self.name_maps.get("table_names", {}).get(
                "bore_table", "bore_table"
            )
            if bore_table_name in tables:
                bore_table = tables.get_table(bore_table_name)
                bore_num_field = (
                    self.name_maps.get("field_names", {})
                    .get("bore_table", {})
                    .get("bore_num", "bore_num")
                )
                if (
                    bore_num_field in bore_table.columns
                    or bore_num_field in bore_table.field_titles
                ):
                    bore_num_choices = bore_table[bore_num_field].unique().tolist()

        values_range = {
            "selected_bores": RangeModel(
                vtype="list",
                title="选择钻孔",
                list_type="str",
                choices=bore_num_choices,
                default=self.selected_bores,
            ),
            "drawing_scales": RangeModel(
                vtype="dict",
                title="柱状图比例",
                dict_key_choices=bore_num_choices,
                dict_value_type="int",
                minmax=(0, None),
                include_min=False,
                default=self.drawing_scales,
            ),
        }
        return values_range

    def execute(self) -> PortTypeHint.FilePath | PortTypeHint.GdimFile | None:
        tables: PortTypeHint.TableCollection | None = self._ports_in["InputTables"].data
        geo_params_table: PortTypeHint.TableData | None = self._ports_in[
            "InputGeoParamsTable"
        ].data
        proj_info_data: PortTypeHint.SingleResult | None = self._ports_in[
            "InputProjectInfo"
        ].data
        input_token: PortTypeHint.Token | None = self._ports_in["InputToken"].data

        if not tables or proj_info_data is None:
            self._ports_out["OutputFile"].data = None
            return None

        if self.save_to_gdim:
            input_token = self.get_token(input_token)
            if input_token is None:
                self._ports_out["OutputFile"].data = None
                return None
            token, proj_id, host = input_token

        # Convert project info
        project_info = convert_to_project_info(proj_info_data, self.proj_info_name_map)

        tables = tables.copy()
        # Add geo parameters table to tables
        if geo_params_table is not None:
            tables.add_table(geo_params_table)
        updated_tables = tables

        # Add drawing_scales to tables if provided
        if self.drawing_scales is not None:
            if isinstance(self.drawing_scales, list):
                drawing_scales = {
                    row["bore_num"]: row["drawing_scales"]
                    for row in self.drawing_scales
                }
            else:
                drawing_scales = self.drawing_scales
            # Add drawing_scales to bore table
            bore_table_name = self.name_maps.get("table_names", {}).get(
                "bore_table", "bore_table"
            )
            if bore_table_name in tables:
                bore_table = tables.get_table(bore_table_name).copy()
                bore_num_field = (
                    self.name_maps.get("field_names", {})
                    .get("bore_table", {})
                    .get("bore_num", "bore_num")
                )

                if isinstance(self.drawing_scales, int):
                    # Single scale for all bores
                    bore_table["drawing_scales"] = self.drawing_scales
                elif isinstance(self.drawing_scales, dict):
                    # Different scales for different bores
                    bore_table["drawing_scales"] = (
                        bore_table[bore_num_field]
                        .map(self.drawing_scales)
                        .fillna(self.drawing_scales.get("default", 1))
                    )

                # Update the table in the collection for both int and dict cases
                updated_tables = TableCollection(
                    name=tables.name, title=tables.title, description=tables.description
                )
                for table in tables:
                    if table.name == bore_table_name or table.title == bore_table_name:
                        updated_tables.add_table(bore_table)
                    else:
                        updated_tables.add_table(table)

        # Convert to section bores and materials
        section_bores, materials = convert_to_section_bores_with_matrials(
            tables=updated_tables,
            name_maps=self.name_maps,
            selected_bore_nums=self.selected_bores,
            sample_types_map=self.sample_types_map,
        )

        # Create BoreForCadDraw object
        bore_for_cad_draw = BoreForCadDraw(
            project_infos=project_info, bores=section_bores, materials=materials
        )

        # Save .gsc files
        if self.save_to_gdim:
            output_path = None
            pipeline_workspace_as_output_path = True
        else:
            output_path = self.output_path
            pipeline_workspace_as_output_path = self.pipeline_workspace_as_output_path

        if pipeline_workspace_as_output_path:
            # Try to find the output_path from the pipeline at first
            if self.pipeline and self.pipeline.workspace:
                final_output_path = self.pipeline.workspace
            elif output_path:
                final_output_path = output_path
            else:
                final_output_path = Path.cwd()
        else:
            # Try to find the output_path from the module at first
            if output_path:
                final_output_path = output_path
            elif self.pipeline and self.pipeline.workspace:
                final_output_path = self.pipeline.workspace
            else:
                final_output_path = Path.cwd()

        output_gsc_file = Path(final_output_path) / self.gsc_file_name
        with zipfile.ZipFile(output_gsc_file, "w", zipfile.ZIP_DEFLATED) as zipf:
            zipf.writestr("bores_log.json", bore_for_cad_draw.model_dump_json())

        if output_gsc_file.exists():
            if self.save_to_gdim:
                gdim_file = upload_file_to_gdim(
                    user_token=token,
                    pid=proj_id,
                    file_path=output_gsc_file,
                    upload_type="temp",
                    host=host,
                )
                self._ports_out["OutputFile"].data = gdim_file
                return gdim_file
            else:
                self._ports_out["OutputFile"].data = str(output_gsc_file)
                return output_gsc_file
        else:
            self._ports_out["OutputFile"].data = None
            return None
