#  Copyright (c) ZenML GmbH 2023. All Rights Reserved.
#
#  Licensed under the Apache License, Version 2.0 (the "License");
#  you may not use this file except in compliance with the License.
#  You may obtain a copy of the License at:
#
#       https://www.apache.org/licenses/LICENSE-2.0
#
#  Unless required by applicable law or agreed to in writing, software
#  distributed under the License is distributed on an "AS IS" BASIS,
#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
#  or implied. See the License for the specific language governing
#  permissions and limitations under the License.
"""Implementation of the Databricks orchestrator."""

import itertools
import os
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Type, cast
from uuid import UUID

from databricks.sdk import WorkspaceClient as DatabricksClient
from databricks.sdk.service.compute import (
    AutoScale,
    ClientsTypes,
    ClusterSpec,
    WorkloadType,
)
from databricks.sdk.service.jobs import CronSchedule, JobCluster
from databricks.sdk.service.jobs import Task as DatabricksTask
from databricks.sdk.service.workspace import ImportFormat

from zenml.client import Client
from zenml.constants import (
    ENV_ZENML_CUSTOM_SOURCE_ROOT,
    METADATA_ORCHESTRATOR_URL,
)
from zenml.integrations.databricks.flavors.databricks_orchestrator_flavor import (
    DatabricksOrchestratorConfig,
    DatabricksOrchestratorSettings,
)
from zenml.integrations.databricks.orchestrators.databricks_orchestrator_entrypoint_config import (
    ENV_ZENML_DATABRICKS_ORCHESTRATOR_RUN_ID,
    DatabricksEntrypointConfiguration,
)
from zenml.integrations.databricks.utils.databricks_utils import (
    convert_step_to_task,
)
from zenml.io import fileio
from zenml.logger import get_logger
from zenml.metadata.metadata_types import MetadataType, Uri
from zenml.models.v2.core.schedule import ScheduleResponse
from zenml.orchestrators import (
    SubmissionResult,
    WheeledOrchestrator,
)
from zenml.orchestrators.utils import get_orchestrator_run_name
from zenml.stack import StackValidator
from zenml.utils.package_utils import clean_requirements
from zenml.utils.pipeline_docker_image_builder import (
    PipelineDockerImageBuilder,
)

if TYPE_CHECKING:
    from zenml.models import PipelineRunResponse, PipelineSnapshotResponse
    from zenml.stack import Stack


logger = get_logger(__name__)

ZENML_STEP_DEFAULT_ENTRYPOINT_COMMAND = "entrypoint.main"
DATABRICKS_WHEELS_DIRECTORY_PREFIX = "/Workspace/Shared/.zenml"
DATABRICKS_LOCAL_FILESYSTEM_PREFIX = "file:/"
DATABRICKS_SPARK_DEFAULT_VERSION = "16.4.x-scala2.12"
DATABRICKS_JOB_ID_PARAMETER_REFERENCE = "{{job.id}}"
DATABRICKS_ZENML_DEFAULT_CUSTOM_REPOSITORY_PATH = "."


class DatabricksOrchestrator(WheeledOrchestrator):
    """Databricks orchestrator."""

    @property
    def validator(self) -> Optional[StackValidator]:
        """Validates the stack.

        In the remote case, checks that the stack contains a container registry,
        image builder and only remote components.

        Returns:
            A `StackValidator` instance.
        """

        def _validate_remote_components(
            stack: "Stack",
        ) -> Tuple[bool, str]:
            for component in stack.components.values():
                if not component.config.is_local:
                    continue

                return False, (
                    f"The Databricks orchestrator runs pipelines remotely, "
                    f"but the '{component.name}' {component.type.value} is "
                    "a local stack component and will not be available in "
                    "the Databricks step.\nPlease ensure that you always "
                    "use non-local stack components with the Databricks "
                    "orchestrator."
                )

            return True, ""

        return StackValidator(
            custom_validation_function=_validate_remote_components,
        )

    def _get_databricks_client(
        self,
    ) -> DatabricksClient:
        """Creates a Databricks client.

        Returns:
            The Databricks client.
        """
        return DatabricksClient(
            host=self.config.host,
            client_id=self.config.client_id,
            client_secret=self.config.client_secret,
        )

    @property
    def config(self) -> DatabricksOrchestratorConfig:
        """Returns the `DatabricksOrchestratorConfig` config.

        Returns:
            The configuration.
        """
        return cast(DatabricksOrchestratorConfig, self._config)

    @property
    def settings_class(self) -> Type[DatabricksOrchestratorSettings]:
        """Settings class for the Databricks orchestrator.

        Returns:
            The settings class.
        """
        return DatabricksOrchestratorSettings

    def get_orchestrator_run_id(self) -> str:
        """Returns the active orchestrator run id.

        Raises:
            RuntimeError: If no run id exists. This happens when this method
                gets called while the orchestrator is not running a pipeline.

        Returns:
            The orchestrator run id.

        Raises:
            RuntimeError: If the run id cannot be read from the environment.
        """
        try:
            return os.environ[ENV_ZENML_DATABRICKS_ORCHESTRATOR_RUN_ID]
        except KeyError:
            raise RuntimeError(
                "Unable to read run id from environment variable "
                f"{ENV_ZENML_DATABRICKS_ORCHESTRATOR_RUN_ID}."
            )

    def setup_credentials(self) -> None:
        """Set up credentials for the orchestrator."""
        connector = self.get_connector()
        assert connector is not None
        connector.configure_local_client()

    def submit_pipeline(
        self,
        snapshot: "PipelineSnapshotResponse",
        stack: "Stack",
        base_environment: Dict[str, str],
        step_environments: Dict[str, Dict[str, str]],
        placeholder_run: Optional["PipelineRunResponse"] = None,
    ) -> Optional[SubmissionResult]:
        """Submits a pipeline to the orchestrator.

        This method should only submit the pipeline and not wait for it to
        complete. If the orchestrator is configured to wait for the pipeline run
        to complete, a function that waits for the pipeline run to complete can
        be passed as part of the submission result.

        Args:
            snapshot: The pipeline snapshot to submit.
            stack: The stack the pipeline will run on.
            base_environment: Base environment shared by all steps. This should
                be set if your orchestrator for example runs one container that
                is responsible for starting all the steps.
            step_environments: Environment variables to set when executing
                specific steps.
            placeholder_run: An optional placeholder run for the snapshot.

        Raises:
            ValueError: If the schedule is not set or if the cron expression
                is not set.
            RuntimeError: If the wheel file cannot be uploaded to the Databricks workspace.

        Returns:
            Optional submission result.
        """
        settings = cast(
            DatabricksOrchestratorSettings, self.get_settings(snapshot)
        )
        if snapshot.schedule:
            if snapshot.schedule.catchup or snapshot.schedule.interval_second:
                logger.warning(
                    "Databricks orchestrator only uses schedules with the "
                    "`cron_expression` property, with optional `start_time` and/or `end_time`. "
                    "All other properties are ignored."
                )
            if snapshot.schedule.cron_expression is None:
                raise ValueError(
                    "Property `cron_expression` must be set when passing "
                    "schedule to a Databricks orchestrator."
                )
            if (
                snapshot.schedule.cron_expression
                and settings.schedule_timezone is None
            ):
                raise ValueError(
                    "Property `schedule_timezone` must be set when passing "
                    "`cron_expression` to a Databricks orchestrator."
                    "Databricks orchestrator requires a Java Timezone ID to run the pipeline on schedule."
                    "Please refer to https://docs.oracle.com/middleware/1221/wcs/tag-ref/MISC/TimeZones.html for more information."
                )

        # Create a callable for future compilation into a dsl.Pipeline.
        def _construct_databricks_pipeline(
            zenml_project_wheel: str, job_cluster_key: str
        ) -> List[DatabricksTask]:
            """Create a databrcks task for each step.

            This should contain the name of the step or task and configures the
            entrypoint of the task to run the step.

            Additionally, this gives each task information about its
            direct downstream steps.

            Args:
                zenml_project_wheel: The wheel package containing the ZenML
                    project.
                job_cluster_key: The ID of the Databricks job cluster.

            Returns:
                A list of Databricks tasks.
            """
            tasks = []
            for step_name, step in snapshot.step_configurations.items():
                # The arguments are passed to configure the entrypoint of the
                # docker container when the step is called.
                arguments = DatabricksEntrypointConfiguration.get_entrypoint_arguments(
                    step_name=step_name,
                    snapshot_id=snapshot.id,
                    wheel_package=self.package_name,
                    databricks_job_id=DATABRICKS_JOB_ID_PARAMETER_REFERENCE,
                )

                # Find the upstream container ops of the current step and
                # configure the current container op to run after them
                upstream_steps = [
                    f"{snapshot.id}_{upstream_step_name}"
                    for upstream_step_name in step.spec.upstream_steps
                ]

                docker_settings = step.config.docker_settings
                docker_image_builder = PipelineDockerImageBuilder()
                # Gather the requirements files
                requirements_files = (
                    docker_image_builder.gather_requirements_files(
                        docker_settings=docker_settings,
                        stack=Client().active_stack,
                        log=False,
                    )
                )

                # Extract and clean the requirements
                requirements = list(
                    itertools.chain.from_iterable(
                        r[1].strip().split("\n") for r in requirements_files
                    )
                )

                # Remove empty items and duplicates
                requirements = sorted(set(filter(None, requirements)))

                task = convert_step_to_task(
                    f"{snapshot.id}_{step_name}",
                    ZENML_STEP_DEFAULT_ENTRYPOINT_COMMAND,
                    arguments,
                    clean_requirements(requirements),
                    depends_on=upstream_steps,
                    zenml_project_wheel=zenml_project_wheel,
                    job_cluster_key=job_cluster_key,
                )
                tasks.append(task)
            return tasks

        # Get the orchestrator run name
        orchestrator_run_name = get_orchestrator_run_name(
            pipeline_name=snapshot.pipeline_configuration.name
        )

        # Copy the repository to a temporary directory and add a setup.py file
        repository_temp_dir = (
            self.copy_repository_to_temp_dir_and_add_setup_py()
        )

        # Create a wheel for the package in the temporary directory
        wheel_path = self.create_wheel(temp_dir=repository_temp_dir)

        databricks_client = self._get_databricks_client()

        # Upload wheel to Workspace files (DBR 15+ compatible)
        snapshot_name = snapshot.pipeline.name
        databricks_directory = f"{DATABRICKS_WHEELS_DIRECTORY_PREFIX}/{snapshot_name}/{orchestrator_run_name}"
        wheel_filename = wheel_path.rsplit("/", 1)[-1]
        databricks_wheel_path = f"{databricks_directory}/{wheel_filename}"

        # Create directory and upload wheel file using Workspace API (works with DBR 15+)
        try:
            databricks_client.workspace.mkdirs(path=databricks_directory)
            with open(wheel_path, "rb") as f:
                databricks_client.workspace.upload(
                    path=databricks_wheel_path,
                    content=f.read(),
                    format=ImportFormat.AUTO,
                    overwrite=True,
                )
            logger.info(
                f"Successfully uploaded wheel to Databricks workspace: "
                f"{databricks_wheel_path}"
            )
        except Exception as e:
            raise RuntimeError(
                f"Failed to upload wheel file to Databricks workspace at "
                f"{databricks_wheel_path}. Ensure your Databricks workspace has "
                f"the necessary permissions and the path is accessible. "
                f"Original error: {e}"
            ) from e

        # Construct the env variables for the pipeline
        env_vars = base_environment.copy()
        spark_env_vars = settings.spark_env_vars
        if spark_env_vars:
            for key, value in spark_env_vars.items():
                env_vars[key] = value
        env_vars[ENV_ZENML_CUSTOM_SOURCE_ROOT] = (
            DATABRICKS_ZENML_DEFAULT_CUSTOM_REPOSITORY_PATH
        )

        fileio.rmtree(repository_temp_dir)

        # using the databricks client uploads the pipeline to databricks
        job_cluster_key = self.sanitize_name(str(snapshot.id))
        self._upload_and_run_pipeline(
            pipeline_name=orchestrator_run_name,
            settings=settings,
            tasks=_construct_databricks_pipeline(
                databricks_wheel_path, job_cluster_key
            ),
            env_vars=env_vars,
            job_cluster_key=job_cluster_key,
            schedule=snapshot.schedule,
        )
        return None

    def _upload_and_run_pipeline(
        self,
        pipeline_name: str,
        settings: DatabricksOrchestratorSettings,
        tasks: List[DatabricksTask],
        env_vars: Dict[str, str],
        job_cluster_key: str,
        schedule: Optional["ScheduleResponse"] = None,
    ) -> None:
        """Uploads and run the pipeline on the Databricks jobs.

        Args:
            pipeline_name: The name of the pipeline.
            tasks: The list of tasks to run.
            env_vars: The environment variables.
            job_cluster_key: The ID of the Databricks job cluster.
            schedule: The schedule to run the pipeline
            settings: The settings for the Databricks orchestrator.

        Raises:
            ValueError: If the `Job Compute` policy is not found.
            ValueError: If the `schedule_timezone` is not set when passing

        """
        databricks_client = self._get_databricks_client()
        spark_conf = settings.spark_conf or {}

        policy_id = settings.policy_id or None
        for policy in databricks_client.cluster_policies.list():
            if policy.name == "Job Compute":
                policy_id = policy.policy_id
        if policy_id is None:
            raise ValueError(
                "Could not find the `Job Compute` policy in Databricks."
            )
        job_cluster = JobCluster(
            job_cluster_key=job_cluster_key,
            new_cluster=ClusterSpec(
                spark_version=settings.spark_version
                or DATABRICKS_SPARK_DEFAULT_VERSION,
                num_workers=settings.num_workers,
                node_type_id=settings.node_type_id or "Standard_D4s_v5",
                policy_id=policy_id,
                autoscale=AutoScale(
                    min_workers=settings.autoscale[0],
                    max_workers=settings.autoscale[1],
                ),
                single_user_name=settings.single_user_name,
                spark_env_vars=env_vars,
                spark_conf=spark_conf,
                workload_type=WorkloadType(
                    clients=ClientsTypes(jobs=True, notebooks=False)
                ),
            ),
        )
        if schedule and schedule.cron_expression:
            schedule_timezone = settings.schedule_timezone
            if schedule_timezone:
                databricks_schedule = CronSchedule(
                    quartz_cron_expression=schedule.cron_expression,
                    timezone_id=schedule_timezone,
                )
            else:
                raise ValueError(
                    "Property `schedule_timezone` must be set when passing "
                    "`cron_expression` to a Databricks orchestrator. "
                    "Databricks orchestrator requires a Java Timezone ID to run the pipeline on schedule. "
                    "Please refer to https://docs.oracle.com/middleware/1221/wcs/tag-ref/MISC/TimeZones.html for more information."
                )
        else:
            databricks_schedule = None

        job = databricks_client.jobs.create(
            name=pipeline_name,
            tasks=tasks,
            job_clusters=[job_cluster],
            schedule=databricks_schedule,
        )
        if job.job_id:
            databricks_client.jobs.run_now(job_id=job.job_id)
        else:
            raise ValueError("An error occurred while getting the job id.")

    def get_pipeline_run_metadata(
        self, run_id: UUID
    ) -> Dict[str, "MetadataType"]:
        """Get general component-specific metadata for a pipeline run.

        Args:
            run_id: The ID of the pipeline run.

        Returns:
            A dictionary of metadata.
        """
        run_url = f"{self.config.host}/jobs/{self.get_orchestrator_run_id()}"
        return {
            METADATA_ORCHESTRATOR_URL: Uri(run_url),
        }
