# Copyright 2023 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import asyncio
import collections
import functools
import hashlib
import inspect
import pathlib
import re
import typing
import urllib.parse
from copy import deepcopy
from datetime import UTC, datetime, timedelta
from typing import Any, Optional, Union

import fastapi.concurrency
import mergedeep
import pytz
import sqlalchemy
from sqlalchemy import (
    Column,
    and_,
    case,
    delete,
    distinct,
    exists,
    func,
    or_,
    select,
    text,
    tuple_,
    types,
)
from sqlalchemy.exc import IntegrityError, SQLAlchemyError
from sqlalchemy.inspection import inspect as sqlalchemy_inspect
from sqlalchemy.orm import Query, Session, aliased, load_only, selectinload
from sqlalchemy.orm.attributes import flag_modified
from sqlalchemy.sql.compiler import IdentifierPreparer
from sqlalchemy.sql.elements import BinaryExpression

import mlrun
import mlrun.common.constants as mlrun_constants
import mlrun.common.formatters
import mlrun.common.model_monitoring
import mlrun.common.runtimes.constants
import mlrun.common.schemas
import mlrun.common.types
import mlrun.errors
import mlrun.k8s_utils
import mlrun.model
from mlrun.artifacts.base import fill_artifact_object_hash
from mlrun.common.db.dialects import Dialects
from mlrun.common.schemas.feature_store import (
    FeatureSetDigestOutputV2,
    FeatureSetDigestSpecV2,
)
from mlrun.common.schemas.model_monitoring import (
    EndpointMode,
    EndpointType,
    ModelEndpointSchema,
    ModelMonitoringAppLabel,
)
from mlrun.config import config
from mlrun.errors import err_to_str
from mlrun.lists import ArtifactList, RunList
from mlrun.model import RunObject
from mlrun.utils import (
    fill_function_hash,
    fill_object_hash,
    generate_artifact_uri,
    generate_object_uri,
    get_in,
    logger,
    parse_artifact_uri,
    update_in,
    validate_artifact_key_name,
    validate_tag_name,
)

import framework.constants
import framework.db.session
import framework.db.sqldb.base
import framework.utils.helpers
from framework.db.base import DBInterface
from framework.db.sqldb.helpers import (
    MemoizationCache,
    generate_query_for_name_with_wildcard,
    generate_query_predicate_for_name,
    generate_time_range_query,
    label_set,
    run_end_time,
    run_labels,
    run_start_time,
    run_state,
    update_labels,
)
from framework.db.sqldb.models import (
    AlertActivation,
    AlertConfig,
    AlertState,
    AlertTemplate,
    ArtifactV2,
    BackgroundTask,
    BackgroundTaskLabel,
    Base,
    DatastoreProfile,
    DataVersion,
    Entity,
    Feature,
    FeatureSet,
    FeatureVector,
    Function,
    HubSource,
    ModelEndpoint,
    PaginationCache,
    Project,
    ProjectSummary,
    Run,
    Schedule,
    SystemMetadata,
    TimeWindowTracker,
    _labeled,
    _tagged,
    _with_notifications,
)

NULL = None  # Avoid flake8 issuing warnings when comparing in filter
unversioned_tagged_object_uid_prefix = "unversioned-"

# Max values for 32-bit and 64-bit signed integers
MAX_INT_32 = 2_147_483_647  # For Integer (4-byte)
MAX_INT_64 = 9_223_372_036_854_775_807  # For BigInteger (8-byte)

conflict_messages = [
    "(sqlite3.IntegrityError) UNIQUE constraint failed",
    "(pymysql.err.IntegrityError) (1062",
    "(pymysql.err.IntegrityError) (1586",
]


def retry_on_conflict(function):
    """
    Most of our store_x functions starting from doing get, then if nothing is found creating the object otherwise
    updating attributes on the existing object. On the SQL level this translates to either INSERT or UPDATE queries.
    Sometimes we have a race condition in which two requests do the get, find nothing, create a new object, but only the
    SQL query of the first one will succeed, the second will get a conflict error, in that case, a retry like we're
    doing on the bottom most layer (like the one for the database is locked error) won't help, cause the object does not
    hold a reference to the existing DB object, and therefore will always translate to an INSERT query, therefore, in
    order to make it work, we need to do the get again, and in other words, call the whole store_x function again
    This why we implemented this retry as a decorator that comes "around" the existing functions
    """

    @functools.wraps(function)
    def wrapper(*args, **kwargs):
        def _try_function():
            try:
                return function(*args, **kwargs)
            except Exception as exc:
                if mlrun.utils.helpers.are_strings_in_exception_chain_messages(
                    exc, conflict_messages
                ):
                    logger.warning(
                        "Got conflict error from DB. Retrying", err=err_to_str(exc)
                    )
                    raise mlrun.errors.MLRunRuntimeError(
                        "Got conflict error from DB"
                    ) from exc
                raise mlrun.errors.MLRunFatalFailureError(original_exception=exc)

        if config.httpdb.db.conflict_retry_timeout:
            interval = config.httpdb.db.conflict_retry_interval
            if interval is None:
                interval = mlrun.utils.create_step_backoff([[0.0001, 1], [3, None]])
            return mlrun.utils.helpers.retry_until_successful(
                interval,
                config.httpdb.db.conflict_retry_timeout,
                logger,
                False,
                _try_function,
            )
        else:
            return function(*args, **kwargs)

    return wrapper


class SQLDB(DBInterface):
    def __new__(cls, dsn: str = ""):
        if cls is SQLDB and dsn:
            scheme = urllib.parse.urlparse(dsn).scheme.lower()
            if scheme.startswith(Dialects.MYSQL):
                return super().__new__(MySQLDB)
            elif scheme.startswith(Dialects.POSTGRESQL):
                return super().__new__(PostgreSQLDB)
            else:
                return super().__new__(cls)
        return super().__new__(cls)

    def __init__(self, dsn=""):
        self.dsn = dsn
        self._name_with_iter_regex = re.compile("^[0-9]+-.+$")

    def initialize(self, session):
        if self.dsn and self.dsn.startswith("sqlite:///"):
            logger.info("Creating sqlite db file", dsn=self.dsn)
            parsed = urllib.parse.urlparse(self.dsn)
            pathlib.Path(parsed.path[1:]).parent.mkdir(parents=True, exist_ok=True)

    # ---- Logs ----
    def store_log(
        self,
        session,
        uid,
        project="",
        body=b"",
        append=False,
    ):
        raise NotImplementedError("DB should not be used for logs storage")

    def get_log(self, session, uid, project="", offset=0, size=0):
        raise NotImplementedError("DB should not be used for logs storage")

    # ---- Runs ----
    @retry_on_conflict
    def store_run(
        self,
        session,
        run_data,
        uid,
        project="",
        iter=0,
    ):
        logger.debug(
            "Storing run to db",
            project=project,
            uid=uid,
            iter=iter,
            run_name=run_data["metadata"]["name"],
        )
        # Do not lock run as it may cause deadlocks
        run = self._get_run(session, uid, project, iter)
        now = datetime.now(UTC)
        if not run:
            run = Run(
                name=run_data["metadata"]["name"],
                uid=uid,
                project=project,
                iteration=iter,
                state=run_state(run_data),
                start_time=run_start_time(run_data) or now,
                requested_logs=False,
            )
        self._enrich_run_model(now, run, run_data)
        self._upsert(session, [run], ignore=True)

    def create_or_get_run(
        self,
        session: Session,
        run_data: dict,
        uid: str,
        project: str = "",
        iter: int = 0,
    ):
        """
        This method is used to ensure a specific run is in the DB.
        Due to isolation levels, it is possible that a certain session is unable to read a run from the DB since it
        has an outdated snapshot. Here, we try to create a run, if we get a conflict, the session was rollbacked, and
        we can now read the run from the DB.
        """
        logger.debug(
            "Creating or getting run in DB",
            project=project,
            uid=uid,
            iter=iter,
            run_name=run_data["metadata"]["name"],
        )
        now = datetime.now(UTC)
        run = Run(
            name=run_data["metadata"]["name"],
            uid=uid,
            project=project,
            iteration=iter,
            state=run_state(run_data),
            start_time=run_start_time(run_data) or now,
            requested_logs=False,
        )
        self._enrich_run_model(now, run, run_data)
        try:
            self._upsert(session, [run], silent=True)
        except mlrun.errors.MLRunConflictError:
            # Session was rollbacked and we now get a new snapshot
            return self.read_run(session, uid=uid, project=project, iter=iter)
        return run_data

    def update_run(self, session, updates: dict, uid: str, project: str, iter: int = 0):
        if not project:
            raise mlrun.errors.MLRunMissingProjectError()
        run = self._get_run(session, uid, project, iter, with_for_update=True)
        if not run:
            run_uri = RunObject.create_uri(project, uid, iter)
            raise mlrun.errors.MLRunNotFoundError(f"Run {run_uri} not found")
        struct = run.struct
        for key, val in updates.items():
            update_in(struct, key, val)
        self._ensure_run_name_on_update(run, struct)
        self._update_run_state(run, struct)
        start_time = run_start_time(struct)
        if start_time:
            run.start_time = start_time

        self._update_run_end_time(run, struct)

        # Update the labels only if the run updates contains labels
        if run_labels(updates):
            update_labels(run, run_labels(struct))
        self._update_run_updated_time(run, struct)
        run.struct = struct
        self._upsert(session, [run])
        self._delete_empty_labels(session, Run.Label)
        return run.struct

    def set_run_retrying_status(
        self,
        session: Session,
        project: str,
        uid: str,
        retrying: bool,
    ) -> dict:
        """
        Atomically acquire a FOR UPDATE lock on the specified run row, then add or remove
        the `retrying` label and update the `rerun_counter`.

        :param session:  SQLAlchemy session to use for the transaction.
        :param project:     Name of the project containing the run.
        :param uid:      UID of the workflow‐runner run to lock and update.
        :param retrying:    Whether to mark the run as retrying (True) or clear that flag (False).
                            - When setting to True, this will:
                              1. lock the row
                              2. verify no existing `retrying` label (else MLRunConflictError)
                              3. add `retrying="true"` and bump `rerun_counter`
                            - When setting to False, it will remove the `retrying` label.
        :returns:           The updated struct of the run.
        :raises MLRunNotFoundError:   If the run does not exist.
        :raises MLRunConflictError:   If attempting to set `retrying=True` when already marked.
        """
        try:
            run = self._get_run(
                session, uid, project, iteration=0, with_for_update=True
            )
            if not run:
                raise mlrun.errors.MLRunNotFoundError(f"Run {project}/{uid} not found")

            struct = run.struct
            labels = run_labels(struct)

            if not retrying:
                labels.pop("retrying", None)
            elif mlrun_constants.MLRunInternalLabels.retrying in labels:
                raise mlrun.errors.MLRunConflictError
            else:
                labels[mlrun_constants.MLRunInternalLabels.retrying] = "true"
                labels[mlrun_constants.MLRunInternalLabels.rerun_counter] = str(
                    int(
                        labels.get(mlrun_constants.MLRunInternalLabels.rerun_counter, 0)
                    )
                    + 1
                )
            update_labels(run, labels)
            run.struct = struct
            self._upsert(session, [run])

            return struct
        finally:
            # ALWAYS commit so the FOR UPDATE lock is released
            session.commit()

    def list_distinct_runs_uids(
        self,
        session,
        project: typing.Optional[str] = None,
        requested_logs_modes: typing.Optional[list[bool]] = None,
        only_uids=True,
        last_update_time_from: typing.Optional[datetime] = None,
        states: typing.Optional[list[str]] = None,
        specific_uids: typing.Optional[list[str]] = None,
    ) -> typing.Union[list[str], RunList]:
        """
        List all runs uids in the DB
        :param session: DB session
        :param project: Project name, `*` or `None` lists across all projects
        :param requested_logs_modes: If not `None`, will return only runs with the given requested logs modes
        :param only_uids: If True, will return only the uids of the runs as list of strings
                          If False, will return the full run objects as RunList
        :param last_update_time_from: If not `None`, will return only runs updated after this time
        :param states: If not `None`, will return only runs with the given states
        :return: List of runs uids or RunList
        """
        if only_uids:
            # using distinct to avoid duplicates as there could be multiple runs with the same uid(different iterations)
            query = self._query(session, distinct(Run.uid))
        else:
            query = self._query(session, Run)

        if project and project != "*":
            query = query.filter(Run.project == project)

        if states:
            query = query.filter(Run.state.in_(states))

        if last_update_time_from is not None:
            query = query.filter(
                Run.updated >= self._ensure_datetime_obj(last_update_time_from)
            )

        if requested_logs_modes is not None:
            query = query.filter(Run.requested_logs.in_(requested_logs_modes))

        if specific_uids:
            query = query.filter(Run.uid.in_(specific_uids))

        if not only_uids:
            # group_by allows us to have a row per uid with the whole record rather than just the uid (as distinct does)
            # note we cannot promise that the same row will be returned each time per uid as the order is not guaranteed
            query = query.group_by(Run.uid)

            runs = RunList()
            for run in query:
                runs.append(run.struct)

            return runs

        # from each row we expect to get a tuple of (uid,) so we need to extract the uid from the tuple
        return [uid for (uid,) in query.all()]

    def update_runs_requested_logs(
        self, session, uids: list[str], requested_logs: bool = True
    ):
        # note that you should commit right after the synchronize_session=False
        # https://stackoverflow.com/questions/70350298/what-does-synchronize-session-false-do-exactly-in-update-functions-for-sqlalch
        self._query(session, Run).filter(Run.uid.in_(uids)).update(
            {
                Run.requested_logs: requested_logs,
                Run.updated: datetime.now(UTC),
            },
            synchronize_session=False,
        )
        session.commit()

    def read_run(
        self,
        session: Session,
        uid: str,
        project: str,
        iter: int = 0,
        with_notifications: bool = False,
        populate_existing: bool = False,
    ):
        if not project:
            raise mlrun.errors.MLRunMissingProjectError()
        run = self._get_run(
            session,
            uid,
            project,
            iter,
            with_notifications=with_notifications,
            populate_existing=populate_existing,
        )
        if not run:
            raise mlrun.errors.MLRunNotFoundError(
                f"Run uid {uid} of project {project} not found"
            )

        run_struct = run.struct
        self._enrich_run_struct_from_model(run, run_struct, with_notifications)
        return run_struct

    def list_runs(
        self,
        session,
        project: typing.Union[str, list[str]],
        name: typing.Optional[str] = None,
        uid: typing.Optional[typing.Union[str, list[str]]] = None,
        labels: typing.Optional[typing.Union[str, list[str]]] = None,
        states: typing.Optional[list[mlrun.common.runtimes.constants.RunStates]] = None,
        sort: bool = True,
        iter: bool = False,
        start_time_from: typing.Optional[datetime] = None,
        start_time_to: typing.Optional[datetime] = None,
        last_update_time_from: typing.Optional[datetime] = None,
        last_update_time_to: typing.Optional[datetime] = None,
        end_time_from: typing.Optional[datetime] = None,
        end_time_to: typing.Optional[datetime] = None,
        partition_by: mlrun.common.schemas.RunPartitionByField = None,
        rows_per_partition: int = 1,
        partition_sort_by: mlrun.common.schemas.SortField = None,
        partition_order: mlrun.common.schemas.OrderType = mlrun.common.schemas.OrderType.desc,
        max_partitions: int = 0,
        requested_logs: typing.Optional[bool] = None,
        return_as_run_structs: bool = True,
        with_notifications: bool = False,
        offset: typing.Optional[int] = None,
        limit: typing.Optional[int] = None,
    ) -> RunList:
        if not project:
            raise mlrun.errors.MLRunMissingProjectError()
        query = self._find_runs(session, uid, project, labels)
        if name is not None:
            query = self._add_run_name_query(query, name)
        if states is not None:
            query = query.filter(Run.state.in_(states))
        if start_time_from is not None:
            query = query.filter(
                Run.start_time >= self._ensure_datetime_obj(start_time_from)
            )
        if start_time_to is not None:
            query = query.filter(
                Run.start_time <= self._ensure_datetime_obj(start_time_to)
            )
        if last_update_time_from is not None:
            query = query.filter(
                Run.updated >= self._ensure_datetime_obj(last_update_time_from)
            )
        if last_update_time_to is not None:
            query = query.filter(
                Run.updated <= self._ensure_datetime_obj(last_update_time_to)
            )
        if end_time_from is not None:
            query = query.filter(
                Run.end_time >= self._ensure_datetime_obj(end_time_from)
            )
        if end_time_to is not None:
            query = query.filter(Run.end_time <= self._ensure_datetime_obj(end_time_to))
        if sort:
            # If the start_time fields are the same, we need a secondary field to sort by.
            query = query.order_by(Run.start_time.desc(), Run.id.desc())
        if not iter:
            query = query.filter(Run.iteration == 0)
        if requested_logs is not None:
            query = query.filter(Run.requested_logs == requested_logs)
        # Purposefully not using outer join to avoid returning runs without notifications
        if with_notifications:
            query = query.join(Run.Notification)
        if partition_by:
            self._assert_partition_by_parameters(
                mlrun.common.schemas.RunPartitionByField,
                partition_by,
                partition_sort_by,
            )
            query = self._create_partitioned_query(
                session,
                query,
                Run,
                partition_by,
                rows_per_partition,
                partition_sort_by,
                partition_order,
                max_partitions,
            )

        query = self._paginate_query(query, offset, limit)

        if not return_as_run_structs:
            return query.all()

        runs = RunList()
        for run in query:
            run_struct = run.struct
            self._enrich_run_struct_from_model(run, run_struct, with_notifications)
            runs.append(run_struct)

        return runs

    def del_run(self, session, uid: str, project: str, iter: int = 0):
        if not project:
            raise mlrun.errors.MLRunMissingProjectError()
        # We currently delete *all* iterations
        self._delete(session, Run, uid=uid, project=project)

    def del_runs(
        self,
        session,
        project: str,
        name=None,
        labels=None,
        state=None,
        days_ago=0,
        uids=None,
    ):
        if not project:
            raise mlrun.errors.MLRunMissingProjectError()
        query = self._find_runs(session, None, project, labels)
        if days_ago:
            since = datetime.now(UTC) - timedelta(days=days_ago)
            query = query.filter(Run.start_time >= since)
        if name:
            query = self._add_run_name_query(query, name)
        if state:
            query = query.filter(Run.state == state)
        if uids:
            query = query.filter(Run.uid.in_(uids))
        for run in query:  # Can not use query.delete with join
            session.delete(run)
        session.commit()

    def _enrich_run_struct_from_model(
        self, run: Run, run_struct: dict, with_notifications: bool
    ):
        status = run_struct.setdefault("status", {})

        # Return the value from the column to ensure the ordering is correct since the sort is done on the table
        # columns and timestamps are being saved with fsp=3 while struct fields are fsp=6.
        # In SQLite, the start_time and updated columns return timestamps with fsp=6.
        for status_field, struct_field in [
            ("end_time", "end_time"),
            ("start_time", "start_time"),
            ("last_update", "updated"),
        ]:
            if field_value := getattr(run, struct_field, None):
                # Handle cases where milliseconds/microseconds are missing in timestamp, because isoformat by default
                # ignores them if they are zero
                status[status_field] = self._add_utc_timezone(field_value).isoformat(
                    timespec="microseconds"
                )

        if with_notifications:
            self._fill_run_struct_with_notifications(run.notifications, run_struct)

    def _delete_project_runs(self, session: Session, project: str):
        logger.debug("Removing project runs from db", project=project)
        self._delete_multi_objects(
            session=session,
            main_table=Run,
            project=project,
        )

    def _fill_run_struct_with_notifications(self, notifications, run_struct):
        if not notifications:
            return
        run_struct.setdefault("spec", {})["notifications"] = []
        run_struct.setdefault("status", {})["notifications"] = {}
        for notification in notifications:
            (
                notification_spec,
                notification_status,
            ) = self._transform_notification_record_to_spec_and_status(notification)
            run_struct["spec"]["notifications"].append(notification_spec)
            run_struct["status"]["notifications"][notification.name] = (
                notification_status
            )

    def _enrich_run_model(self, now: datetime, run: Run, run_data: dict):
        self._ensure_run_name_on_update(run, run_data)
        labels = run_labels(run_data)
        self._update_run_state(run, run_data)
        update_labels(run, labels)
        # Note that this code basically allowing anyone to override the run's start time after it was already set
        # This is done to enable the context initialization to set the start time to when the user's code actually
        # started running, and not when the run record was initially created (happening when triggering the job)
        # In the future we might want to limit who can actually do that
        start_time = run_start_time(run_data) or SQLDB._add_utc_timezone(run.start_time)
        run_data.setdefault("status", {})["start_time"] = start_time.isoformat()
        run.start_time = start_time
        self._update_run_updated_time(run, run_data, now=now)
        self._update_run_end_time(run, run_data, end_time=run_end_time(run_data))

        run.struct = run_data

    def _add_run_name_query(self, query, name):
        exact_name = self._escape_characters_for_like_query(name)
        if name.startswith("~"):
            query = query.filter(Run.name.ilike(f"%{exact_name[1:]}%", escape="\\"))
        else:
            query = query.filter(Run.name == name)
        return query

    @staticmethod
    def _ensure_run_name_on_update(run_record: Run, run_dict: dict):
        body_name = run_dict["metadata"]["name"]
        if body_name != run_record.name:
            raise mlrun.errors.MLRunInvalidArgumentError(
                "Changing name for an existing run is invalid"
            )

    @staticmethod
    def _update_run_end_time(run: Run, run_dict: dict, end_time: Optional[str] = None):
        """
        Update the run's end time if the run is in a terminal state and the end time is not set.
        If the run is in terminal state and the end time is set then keep the end time as is.
        :param run: The run object
        :param run_dict: The run dict
        :param end_time: The end time to set - used when in 'store' flow to set the end time
        """
        endable_states = mlrun.common.runtimes.constants.RunStates.terminal_states() + [
            mlrun.common.runtimes.constants.RunStates.pending_retry
        ]
        if run.state in endable_states and not run.end_time:
            if end_time is None:
                # Ensures fsp 6 for MySQL NOW() to includes microseconds
                end_time = func.now(6)
            run.end_time = end_time
        elif run.state not in endable_states:
            # Ensure end time is not set if the run is not in a terminal state
            run.end_time = None
            run_dict.setdefault("status", {}).pop("end_time", None)

    @staticmethod
    def _update_run_updated_time(
        run_record: Run, run_dict: dict, now: typing.Optional[datetime] = None
    ):
        if now is None:
            now = datetime.now(UTC)
        run_record.updated = now
        run_dict.setdefault("status", {})["last_update"] = now.isoformat()

    @staticmethod
    def _update_run_state(run_record: Run, run_dict: dict):
        state = run_state(run_dict)
        run_record.state = state
        run_dict.setdefault("status", {})["state"] = state

    # ---- Artifacts ----
    @retry_on_conflict
    def store_artifact(
        self,
        session,
        key,
        artifact,
        project,
        uid=None,
        iter=None,
        tag="",
        producer_id="",
        best_iteration=False,
        always_overwrite=False,
    ) -> str:
        if not project:
            raise mlrun.errors.MLRunMissingProjectError()
        tag = tag or mlrun.common.constants.RESERVED_TAG_NAME_LATEST

        # handle link artifacts separately
        if artifact.get("kind") == mlrun.common.schemas.ArtifactCategories.link.value:
            return self._mark_best_iteration_artifact(
                session,
                project,
                key,
                artifact,
                uid,
            )

        if tag:
            # fail early if tag is invalid
            validate_tag_name(tag, "artifact.metadata.tag")

        original_uid = uid

        if isinstance(artifact, dict):
            artifact_dict = artifact
        else:
            artifact_dict = artifact.to_dict()

        if not artifact_dict.get("metadata", {}).get("key"):
            artifact_dict.setdefault("metadata", {})["key"] = key
        if not artifact_dict.get("metadata", {}).get("project"):
            artifact_dict.setdefault("metadata", {})["project"] = project

        # calculate uid
        uid = fill_artifact_object_hash(artifact_dict, iter, producer_id)

        # If object was referenced by UID, the request cannot modify it
        if original_uid and uid != original_uid:
            raise mlrun.errors.MLRunInvalidArgumentError(
                "Changing uid for an object referenced by its uid"
            )

        # for easier querying, we mark artifacts without iteration as best iteration
        if not best_iteration and (iter is None or iter == 0):
            best_iteration = True

        # try to get an existing artifact with the same calculated uid
        existing_artifact = self._get_existing_artifact(
            session, project, key, uid, producer_id=producer_id, iteration=iter
        )

        # if the object is not new, we need to check if we need to update it or create a new one
        if existing_artifact:
            if (
                self._should_update_artifact(existing_artifact, uid, iter)
                or always_overwrite
            ):
                logger.debug(
                    "Updating an existing artifact",
                    project=project,
                    key=key,
                    iteration=iter,
                    uid=uid,
                )
                db_artifact = existing_artifact
                self._update_artifact_record_from_dict(
                    session,
                    db_artifact,
                    artifact_dict,
                    project,
                    key,
                    uid,
                    iter,
                    best_iteration,
                    producer_id,
                )
                self._upsert(session, [db_artifact])
                if tag:
                    self.tag_artifacts(session, tag, [db_artifact], project)
                return uid
            logger.debug(
                "A similar artifact exists, but some values have changed - creating a new artifact",
                project=project,
                key=key,
                iteration=iter,
                producer_id=producer_id,
            )

        return self.create_artifact(
            session,
            project,
            artifact_dict,
            key,
            tag,
            uid,
            iter,
            producer_id,
            best_iteration,
        )

    def create_artifact(
        self,
        session,
        project,
        artifact,
        key,
        tag="",
        uid=None,
        iteration=None,
        producer_id="",
        best_iteration=False,
    ):
        if not project:
            raise mlrun.errors.MLRunMissingProjectError()
        if not uid:
            uid = fill_artifact_object_hash(artifact, iteration, producer_id)

        # check if the object already exists
        query = self._query(session, ArtifactV2, key=key, project=project, uid=uid)
        existing_object = query.one_or_none()
        if existing_object:
            object_uri = generate_object_uri(project, key, tag)
            raise mlrun.errors.MLRunConflictError(
                f"Adding an already-existing {ArtifactV2.__name__} - {object_uri}"
            )

        validate_artifact_key_name(key, "artifact.key")

        db_artifact = ArtifactV2(project=project, key=key)
        self._update_artifact_record_from_dict(
            session,
            db_artifact,
            artifact,
            project,
            key,
            uid,
            iteration,
            best_iteration,
            producer_id,
        )

        self._upsert(session, [db_artifact])
        if tag:
            validate_tag_name(tag, "artifact.metadata.tag")
            self.tag_artifacts(
                session,
                tag,
                [db_artifact],
                project,
            )

        # we want to tag the artifact also as "latest" if it's the first time we store it
        if tag != mlrun.common.constants.RESERVED_TAG_NAME_LATEST:
            self.tag_artifacts(
                session,
                mlrun.common.constants.RESERVED_TAG_NAME_LATEST,
                [db_artifact],
                project,
            )

        return uid

    def _get_parent_artifact_id(self, session, parent_uri: str) -> int:
        if parent_uri:
            _, uri = mlrun.datastore.parse_store_uri(parent_uri)
            project, key, iteration, tag, tree, uid = parse_artifact_uri(uri)
            parent_db_artifact = self.read_artifact(
                session=session,
                key=key,
                tag=tag,
                iter=iteration,
                producer_id=tree,
                uid=uid,
                project=project,
                raise_on_not_found=False,
                as_record=True,
            )
            if not parent_db_artifact:
                raise mlrun.errors.MLRunConflictError(
                    "Referenced artifact not found for URI: {references_uri}"
                )
            return parent_db_artifact.id

    def list_artifacts(
        self,
        session,
        project,
        name=None,
        tag=None,
        labels=None,
        since: typing.Optional[datetime] = None,
        until: typing.Optional[datetime] = None,
        kind=None,
        category: mlrun.common.schemas.ArtifactCategories = None,
        iter: typing.Optional[int] = None,
        best_iteration: bool = False,
        as_records: bool = False,
        uid: typing.Optional[str] = None,
        producer_id: typing.Optional[str] = None,
        producer_uri: typing.Optional[str] = None,
        most_recent: bool = False,
        parent_uri: typing.Optional[str] = None,
        format_: mlrun.common.formatters.ArtifactFormat = mlrun.common.formatters.ArtifactFormat.full,
        offset: typing.Optional[int] = None,
        limit: typing.Optional[int] = None,
        partition_by: typing.Optional[
            mlrun.common.schemas.ArtifactPartitionByField
        ] = None,
        rows_per_partition: typing.Optional[int] = 1,
        partition_sort_by: typing.Optional[
            mlrun.common.schemas.SortField
        ] = mlrun.common.schemas.SortField.updated,
        partition_order: typing.Optional[
            mlrun.common.schemas.OrderType
        ] = mlrun.common.schemas.OrderType.desc,
    ) -> typing.Union[list, ArtifactList]:
        if not project:
            raise mlrun.errors.MLRunMissingProjectError()

        if best_iteration and iter is not None:
            raise mlrun.errors.MLRunInvalidArgumentError(
                "Best iteration cannot be used when iter is specified"
            )

        artifact_records = self._find_artifacts(
            session,
            project,
            tag=tag,
            labels=labels,
            since=since,
            until=until,
            name=name,
            kind=kind,
            category=category,
            iter=iter,
            uid=uid,
            producer_id=producer_id,
            producer_uri=producer_uri,
            best_iteration=best_iteration,
            most_recent=most_recent,
            parent_uri=parent_uri,
            attach_tags=not as_records,
            offset=offset,
            limit=limit,
            partition_by=partition_by,
            rows_per_partition=rows_per_partition,
            partition_sort_by=partition_sort_by,
            partition_order=partition_order,
        )
        if as_records:
            return artifact_records

        artifacts = ArtifactList()
        for artifact, artifact_tag in artifact_records:
            artifact_struct = artifact.full_object
            self._set_tag_in_artifact_struct(artifact_struct, artifact_tag)
            self._set_parent_uri(artifact_struct, artifact.parent, parent_uri)
            artifact_struct["spec"]["has_children"] = bool(artifact.child_artifacts)
            artifacts.append(
                mlrun.common.formatters.ArtifactFormat.format_obj(
                    artifact_struct, format_
                )
            )

        return artifacts

    def list_artifacts_for_producer_id(
        self,
        session,
        project: str,
        producer_id: str,
        artifact_identifiers: list[tuple] = "",
    ) -> ArtifactList:
        if not project:
            raise mlrun.errors.MLRunMissingProjectError()
        artifact_records = self._find_artifacts_for_producer_id(
            session,
            producer_id=producer_id,
            project=project,
            artifact_identifiers=artifact_identifiers,
        )

        artifacts = ArtifactList()
        for artifact, artifact_tag in artifact_records:
            artifact_struct = artifact.full_object
            self._set_tag_in_artifact_struct(artifact_struct, artifact_tag)
            self._set_parent_uri(artifact_struct, artifact.parent)
            artifact_struct["spec"]["has_children"] = bool(artifact.child_artifacts)
            artifacts.append(artifact_struct)

        return artifacts

    def read_artifact(
        self,
        session,
        key: str,
        project: str,
        tag: typing.Optional[str] = None,
        iter: typing.Optional[int] = None,
        producer_id: typing.Optional[str] = None,
        uid: typing.Optional[str] = None,
        raise_on_not_found: bool = True,
        format_: mlrun.common.formatters.ArtifactFormat = mlrun.common.formatters.ArtifactFormat.full,
        as_record: bool = False,
    ):
        if not project:
            raise mlrun.errors.MLRunMissingProjectError()
        query = self._query(session, ArtifactV2, key=key, project=project)
        enrich_tag = False

        if uid:
            query = query.filter(ArtifactV2.uid == uid)
        if producer_id:
            query = query.filter(ArtifactV2.producer_id == producer_id)

        if tag == mlrun.common.constants.RESERVED_TAG_NAME_LATEST and uid:
            # Make a best-effort attempt to find the "latest" tag. It will be present in the response if the
            # latest tag exists, otherwise, it will not be included.
            # This is due to 'latest' being a special case and is enriched in the client side
            latest_query = query.join(
                ArtifactV2.Tag, ArtifactV2.Tag.obj_id == ArtifactV2.id
            ).filter(
                ArtifactV2.Tag.name == mlrun.common.constants.RESERVED_TAG_NAME_LATEST
            )
            if latest_query.one_or_none():
                enrich_tag = True
        elif tag:
            # If a specific tag is provided, handle all cases where UID may or may not be included.
            # The case for UID with the "latest" tag is already covered above.
            # Here, we join with the tags table to check for a match with the specified tag.
            enrich_tag = True
            query = query.join(
                ArtifactV2.Tag, ArtifactV2.Tag.obj_id == ArtifactV2.id
            ).filter(ArtifactV2.Tag.name == tag)

        # keep the query without the iteration filter for later error handling
        query_without_iter = query
        if iter is not None:
            query = query.filter(ArtifactV2.iteration == iter)

        db_artifact = query.one_or_none()

        if not db_artifact:
            # if the artifact was not found and iter==0, we might be looking for a link artifact
            # in this case, we need to look for the artifact with the best iteration
            fail = True
            if iter == 0:
                query_without_iter = query_without_iter.filter(
                    ArtifactV2.best_iteration
                )
                db_artifact = query_without_iter.one_or_none()
                if db_artifact is not None:
                    # we found something, so we can continue
                    fail = False

            if fail:
                if raise_on_not_found:
                    artifact_uri = generate_artifact_uri(project, key, tag, iter, uid)
                    raise mlrun.errors.MLRunNotFoundError(
                        f"Artifact {artifact_uri} not found"
                    )
                return None

        if as_record:
            return db_artifact

        artifact = db_artifact.full_object
        artifact["spec"]["has_children"] = bool(db_artifact.child_artifacts)
        artifact["metadata"]["iter"] = db_artifact.iteration
        self._set_parent_uri(artifact, db_artifact.parent)

        # If connected to a tag add it to metadata
        if enrich_tag:
            self._set_tag_in_artifact_struct(artifact, tag)

        return mlrun.common.formatters.ArtifactFormat.format_obj(artifact, format_)

    def del_artifact(
        self,
        session,
        key,
        project,
        tag="",
        uid=None,
        producer_id=None,
        iter=None,
    ):
        if not project:
            raise mlrun.errors.MLRunMissingProjectError()
        self._delete_tagged_object(
            session,
            ArtifactV2,
            project=project,
            tag=tag,
            uid=uid,
            key=key,
            producer_id=producer_id,
            iteration=iter,
        )

    def del_artifacts(
        self,
        session,
        project,
        name="",
        tag="*",
        labels=None,
        ids=None,
        producer_id=None,
    ):
        if not project:
            raise mlrun.errors.MLRunMissingProjectError()
        distinct_keys_and_uids = self._find_artifacts(
            session=session,
            project=project,
            name=name,
            ids=ids,
            tag=tag,
            labels=labels,
            producer_id=producer_id,
            with_entities=[ArtifactV2.key, ArtifactV2.uid],
        )
        total_artifacts = len(distinct_keys_and_uids)
        max_deletions = config.artifacts.limits.max_deletions

        if total_artifacts > max_deletions:
            raise mlrun.errors.MLRunInternalServerError(
                f"Cannot delete {total_artifacts} artifacts. The maximum allowed artifacts deletions "
                f"is {max_deletions}. Refine the filter and try again with a smaller batch."
            )

        logger.info("Deleting artifacts", total_artifacts=total_artifacts)

        failed_deletions_count = 0
        failed_deletions_count_integrity = 0

        for key, uid in distinct_keys_and_uids:
            try:
                self._delete_tagged_object(
                    session,
                    ArtifactV2,
                    project=project,
                    uid=uid,
                    key=key,
                    producer_id=producer_id,
                )
            except IntegrityError as exc:
                logger.error(
                    "Failed to delete model artifact due to db integrity",
                    project=project,
                    key=key,
                    uid=uid,
                    err=err_to_str(exc),
                )
                failed_deletions_count_integrity += 1
            except Exception as exc:
                logger.error(
                    "Failed to delete artifact",
                    project=project,
                    key=key,
                    uid=uid,
                    err=err_to_str(exc),
                )
                failed_deletions_count += 1
                continue

        if failed_deletions_count or failed_deletions_count_integrity:
            if failed_deletions_count_integrity:
                raise mlrun.errors.MLRunConflictError(
                    f"Failed to delete {failed_deletions_count + failed_deletions_count_integrity} artifacts, "
                    f"while {failed_deletions_count_integrity} of them failed due to integrity constraints "
                )
            raise mlrun.errors.MLRunInternalServerError(
                f"Failed to delete {failed_deletions_count} artifacts."
            )
        logger.info("Successfully deleted artifacts", total_artifacts=total_artifacts)

    def list_artifact_tags(
        self, session, project, category: mlrun.common.schemas.ArtifactCategories = None
    ) -> list[str]:
        """
        List all tags for artifacts in the DB

        :param session: DB session
        :param project: Project name
        :param category: Artifact category to filter by

        :return: a list of distinct tags
        """
        query = (
            self._query(session, ArtifactV2.Tag.name)
            .select_from(ArtifactV2)
            .join(ArtifactV2.Tag, ArtifactV2.Tag.obj_id == ArtifactV2.id)
            .filter(ArtifactV2.project == project)
            .group_by(ArtifactV2.Tag.name)
        )
        if category:
            query = self._add_artifact_category_query(category, query).with_hint(
                ArtifactV2, "USE INDEX (idx_project_kind)"
            )

        # the query returns a list of tuples, we need to extract the tag from each tuple
        return [tag for (tag,) in query]

    def validate_artifact_removal_preconditions(
        self,
        session,
        key: str,
        project: str,
        tag: str = "",
        iter: Optional[str] = None,
        producer_id: Optional[str] = None,
        uid: Optional[str] = None,
    ) -> Optional[dict[str, Any]]:
        """
        Validate whether an artifact can be safely removed from the system.

        This method checks if the specified artifact is currently in use by other resources,
        such as model endpoints. If it is, the deletion will be blocked, and an appropriate
        exception should be raised (MLRunConflictError).

        :param session:     Active SQLAlchemy DB session for querying.
        :param key:         Artifact key.
        :param tag:         Specific tag for the artifact.
        :param iter:        Artifact iteration number, if applicable.
        :param project:     Project to which the artifact belongs.
        :param producer_id: Identifier of the artifact's producer.
        :param uid:         UID of the artifact object.

        :return: An artifact dictionary.
        :raises MLRunConflictError: If the artifact is in use and cannot be deleted.
        """
        try:
            db_artifact = self.read_artifact(
                session=session,
                key=key,
                tag=tag,
                iter=iter,
                project=project,
                producer_id=producer_id,
                uid=uid,
                as_record=True,
            )
        except mlrun.errors.MLRunNotFoundError:
            return None
        except sqlalchemy.exc.MultipleResultsFound as exc:
            logger.error(
                "Failed to delete artifact because multiple artifacts were found",
                key=key,
                project=project,
                tag=tag,
                iter=iter,
                producer_id=producer_id,
                uid=uid,
                err=err_to_str(exc),
            )

            error_message = (
                "Failed to delete artifact, multiple artifacts matching the search criteria were found. "
                "Refine your request to specify a single artifact or use another endpoint to delete "
                "multiple artifacts instead."
            )
            raise mlrun.errors.MLRunBadRequestError(error_message) from exc

        dependent_endpoints_count = (
            session.query(ModelEndpoint)
            .filter(ModelEndpoint.model_id == db_artifact.id)
            .count()
        )
        if dependent_endpoints_count:
            raise mlrun.errors.MLRunConflictError(
                f"Failed deleting artifact {db_artifact.key} in project {db_artifact.project}, iteration "
                f"{db_artifact.iteration}, producer_id {db_artifact.producer_id} and {db_artifact.uid} uid. "
                f"The artifact is used by {dependent_endpoints_count} endpoints"
            )
        if db_artifact.child_artifacts:
            raise mlrun.errors.MLRunConflictError(
                f"Failed deleting artifact {db_artifact.key} in project {db_artifact.project}, iteration "
                f"{db_artifact.iteration}, producer_id {db_artifact.producer_id} and {db_artifact.uid} uid. "
                f"The artifact has {len(db_artifact.child_artifacts)} child artifacts. Delete them before proceeding."
            )
        return mlrun.common.formatters.ArtifactFormat.format_obj(
            db_artifact.full_object, mlrun.common.formatters.ArtifactFormat.minimal
        )

    @retry_on_conflict
    def overwrite_artifacts_with_tag(
        self,
        session: Session,
        project: str,
        tag: str,
        identifiers: list[mlrun.common.schemas.ArtifactIdentifier],
    ):
        # query all artifacts which match the identifiers
        artifacts = []
        for identifier in identifiers:
            artifacts += self._list_artifacts_for_tagging(
                session,
                project_name=project,
                identifier=identifier,
            )

        # TODO: remove duplicates artifacts entries

        # delete related tags from artifacts identifiers
        # not committing the session here because we want to do it atomic with the next query
        self._delete_artifacts_tags(session, project, artifacts, commit=False)

        # tag artifacts with tag
        self.tag_artifacts(session, tag, artifacts, project)

    @retry_on_conflict
    def append_tag_to_artifacts(
        self,
        session: Session,
        project: str,
        tag: str,
        identifiers: list[mlrun.common.schemas.ArtifactIdentifier],
    ):
        # query all artifacts which match the identifiers
        artifacts = []
        for identifier in identifiers:
            artifacts += self._list_artifacts_for_tagging(
                session,
                project_name=project,
                identifier=identifier,
            )
        self.tag_artifacts(session, tag, artifacts, project)

    def delete_tag_from_artifacts(
        self,
        session: Session,
        project: str,
        tag: str,
        identifiers: list[mlrun.common.schemas.ArtifactIdentifier],
    ):
        # query all artifacts which match the identifiers
        artifacts = []
        for identifier in identifiers:
            artifacts += self._list_artifacts_for_tagging(
                session,
                project_name=project,
                identifier=identifier,
            )
        self._delete_artifacts_tags(session, project, artifacts, tags=[tag])

    def tag_artifacts(
        self,
        session: sqlalchemy.orm.Session,
        tag_name: str,
        artifacts: list[ArtifactV2],
        project: str,
    ):
        def delete_previous_tags_and_get_existing(_session, _artifact):
            # we want to get the artifacts that we need to remove its previous latest (or its specific) tag
            # when we have different producers trying to create the same exact tag.
            # we want the latest one, to remove the tag from previous artifacts with the same key
            query = (
                self._query(
                    _session,
                    _artifact.Tag,
                    name=tag_name,
                    project=project,
                    obj_name=_artifact.key,
                )
                .join(
                    ArtifactV2,
                )
                .filter(
                    ArtifactV2.producer_id != _artifact.producer_id,
                )
            )

            # delete the tags
            for old_tag in query:
                logger.debug(
                    "Deleting old tag",
                    old_tag=old_tag,
                    **log_kwargs,
                )
                _session.delete(old_tag)
            _session.commit()

            # search for an existing tag with the same name, and points to artifacts with the same key, producer id,
            # and iteration. this means that the same producer created this artifact,
            # and we can update the existing tag
            tag_query = (
                self._query(
                    _session,
                    _artifact.Tag,
                    name=tag_name,
                    project=project,
                    obj_name=_artifact.key,
                )
                .join(
                    ArtifactV2,
                )
                .filter(
                    ArtifactV2.producer_id == _artifact.producer_id,
                    ArtifactV2.iteration == _artifact.iteration,
                )
            )
            return tag_query.one_or_none()

        log_kwargs = {"project": project, "session": session.hash_key, "tag": tag_name}
        artifacts_keys = [artifact.key for artifact in artifacts]
        if not artifacts_keys:
            logger.debug(
                "No artifacts to tag",
                artifacts=artifacts,
                **log_kwargs,
            )
            return

        logger.debug(
            "Locking artifacts in db before tagging artifacts",
            artifacts_keys=artifacts_keys,
            **log_kwargs,
        )

        # to avoid multiple runs trying to tag the same artifacts simultaneously,
        # lock the artifacts with the same keys for the entire transaction (using with_for_update).
        self._query(
            session,
            ArtifactV2,
            project=project,
        ).with_entities(ArtifactV2.id).filter(
            ArtifactV2.key.in_(artifacts_keys),
        ).order_by(ArtifactV2.id.asc()).populate_existing().with_for_update().all()

        logger.debug(
            "Acquired artifacts db lock",
            artifacts_keys=artifacts_keys,
            **log_kwargs,
        )

        objects = []
        for artifact in artifacts:
            # to make sure we can list tags that were created during this session in parallel by different processes,
            # we need to use a new session. if there is an existing tag, we'll definitely get it, so we can update it
            # instead of creating a new tag.
            tag = framework.db.session.run_function_with_new_db_session(
                delete_previous_tags_and_get_existing, artifact
            )
            if not tag:
                # create the new tag
                tag = artifact.Tag(
                    project=project,
                    name=tag_name,
                    obj_name=artifact.key,
                )
            tag.obj_id = artifact.id
            objects.append(tag)
            session.add(tag)

        # commit the changes, unlocking the flow for other processes
        self._commit(session, objects)
        logger.debug(
            "Released artifacts db lock after tagging artifacts",
            artifacts_keys=artifacts_keys,
            **log_kwargs,
        )

    def _delete_project_artifacts(self, session: Session, project: str):
        logger.debug("Removing project artifacts from db", project=project)
        self._delete_multi_objects(
            session=session,
            main_table=ArtifactV2,
            project=project,
        )

    def _mark_best_iteration_artifact(
        self,
        session,
        project,
        key,
        link_artifact,
        uid=None,
    ):
        artifacts_to_commit = []

        # get the artifact record from the db
        link_iteration = link_artifact.get("spec", {}).get("link_iteration")
        link_tree = link_artifact.get("spec", {}).get("link_tree") or link_artifact.get(
            "metadata", {}
        ).get("tree")
        link_key = link_artifact.get("spec", {}).get("link_key")
        if link_key:
            key = link_key

        # Lock the artifacts with the same project and key (and producer_id when available) to avoid unexpected
        # deadlocks and conform to our lock-once-when-starting logic - ML-6869
        lock_query = self._query(
            session,
            ArtifactV2,
            project=project,
            key=key,
        ).with_entities(ArtifactV2.id)
        if link_tree:
            lock_query = lock_query.filter(ArtifactV2.producer_id == link_tree)

        lock_query.order_by(
            ArtifactV2.id.asc()
        ).populate_existing().with_for_update().all()

        # get the best iteration artifact record
        query = self._query(session, ArtifactV2).filter(
            ArtifactV2.project == project,
            ArtifactV2.key == key,
            ArtifactV2.iteration == link_iteration,
        )
        if link_tree:
            query = query.filter(ArtifactV2.producer_id == link_tree)
        if uid:
            query = query.filter(ArtifactV2.uid == uid)

        best_iteration_artifact_record = query.one_or_none()
        if not best_iteration_artifact_record:
            raise mlrun.errors.MLRunNotFoundError(
                f"Best iteration artifact not found - {project}/{key}:{link_iteration}",
            )

        # get the previous best iteration artifact
        query = self._query(session, ArtifactV2).filter(
            ArtifactV2.project == project,
            ArtifactV2.key == key,
            ArtifactV2.best_iteration,
            ArtifactV2.iteration != link_iteration,
        )
        if link_tree:
            query = query.filter(ArtifactV2.producer_id == link_tree)

        previous_best_iteration_artifacts = query.one_or_none()
        if previous_best_iteration_artifacts:
            # remove the previous best iteration flag
            previous_best_iteration_artifacts.best_iteration = False
            artifacts_to_commit.append(previous_best_iteration_artifacts)

        # update the artifact record with best iteration
        best_iteration_artifact_record.best_iteration = True
        artifacts_to_commit.append(best_iteration_artifact_record)

        self._upsert(session, artifacts_to_commit)

        return best_iteration_artifact_record.uid

    def _update_artifact_record_from_dict(
        self,
        session: Session,
        artifact_record,
        artifact_dict: dict,
        project: str,
        key: str,
        uid: str,
        iter: typing.Optional[int] = None,
        best_iteration: bool = False,
        producer_id: typing.Optional[str] = None,
    ):
        artifact_record.project = project
        kind = artifact_dict.get("kind") or "artifact"
        artifact_record.kind = kind
        artifact_record.producer_id = producer_id or artifact_dict["metadata"].get(
            "tree"
        )
        artifact_record.producer_uri = (
            artifact_dict.get("spec", {}).get("producer", {}).get("uri", None)
        )
        updated_datetime = datetime.now(UTC)
        artifact_record.updated = updated_datetime
        created = (
            str(artifact_record.created)
            if artifact_record.created
            else artifact_dict["metadata"].pop("created", None)
        )
        # make sure we have a datetime object with timezone both in the artifact record and in the artifact dict
        created_datetime = mlrun.utils.enrich_datetime_with_tz_info(
            created
        ) or datetime.now(UTC)
        artifact_record.created = created_datetime

        # if iteration is not given, we assume it is a single iteration artifact, and thus we set the iteration to 0
        artifact_record.iteration = iter or 0
        if best_iteration or iter == 0:
            artifact_record.best_iteration = True

        artifact_record.uid = uid

        artifact_dict["metadata"]["updated"] = str(updated_datetime)
        artifact_dict["metadata"]["created"] = str(created_datetime)
        artifact_dict["kind"] = kind

        db_key = artifact_dict.get("spec", {}).get("db_key")
        if not db_key:
            artifact_dict.setdefault("spec", {})["db_key"] = key
        else:
            validate_artifact_key_name(db_key, "artifact.db_key")

        # remove the tag from the metadata, as it is stored in a separate table
        artifact_dict["metadata"].pop("tag", None)

        # add reference id and pop the parent uri berfore saving to db
        parent_uri = artifact_dict.get("spec", {}).pop("parent_uri", None)
        parent_id = self._get_parent_artifact_id(session, parent_uri)
        artifact_record.parent_id = parent_id

        artifact_record.full_object = artifact_dict

        # labels are stored in a separate table
        labels = artifact_dict["metadata"].pop("labels", {}) or {}
        update_labels(artifact_record, labels)

    def _list_artifacts_for_tagging(
        self,
        session: Session,
        project_name: str,
        identifier: mlrun.common.schemas.ArtifactIdentifier,
    ):
        artifacts = self.list_artifacts(
            session,
            project=project_name,
            name=identifier.key,
            kind=identifier.kind,
            iter=identifier.iter,
            uid=identifier.uid,
            producer_id=identifier.producer_id,
            as_records=True,
        )

        # in earlier versions, the uid actually stored the producer id of the artifacts, so in case we didn't find
        # any artifacts we should try to look for artifacts with the given uid as producer id
        if not artifacts and identifier.uid and not identifier.producer_id:
            artifacts = self.list_artifacts(
                session,
                project=project_name,
                name=identifier.key,
                kind=identifier.kind,
                iter=identifier.iter,
                producer_id=identifier.uid,
                as_records=True,
            )

        return artifacts

    @staticmethod
    def _set_tag_in_artifact_struct(artifact, tag):
        artifact["metadata"]["tag"] = tag

    def _set_parent_uri(
        self, artifact: dict, parent: ArtifactV2, parent_uri: Optional[str] = None
    ):
        (
            _,
            _,
            _,
            parent_tag,
            _,
            _,
        ) = self._get_parent_artifact_params_from_uri(parent_uri)
        artifact_spec = artifact.setdefault("spec", {})
        if parent:
            artifact_spec["parent_uri"] = mlrun.datastore.get_store_uri(
                kind=f"{parent.kind}s",
                uri=generate_artifact_uri(
                    project=parent.project,
                    key=parent.key,
                    iter=parent.iteration,
                    tree=parent.producer_id,
                    uid=parent.uid,
                    tag=self._get_obj_tag_prioritizing_user_tag(
                        parent.tags or [], parent_tag
                    )
                    or None,
                ),
            )
        else:
            artifact_spec["parent_uri"] = None

    def _delete_artifacts_tags(
        self,
        session,
        project: str,
        artifacts: list[ArtifactV2],
        tags: typing.Optional[list[str]] = None,
        commit: bool = True,
    ):
        artifacts_ids = [artifact.id for artifact in artifacts]
        # Delete all tags except the "latest" tag, or filter by specific tags if provided
        query = session.query(ArtifactV2.Tag).filter(
            ArtifactV2.Tag.project == project,
            ArtifactV2.Tag.obj_id.in_(artifacts_ids),
            ArtifactV2.Tag.name != mlrun.common.constants.RESERVED_TAG_NAME_LATEST,
        )
        if tags:
            query = query.filter(ArtifactV2.Tag.name.in_(tags))
        for tag in query:
            session.delete(tag)
        if commit:
            session.commit()

    def _find_artifacts(
        self,
        session: Session,
        project: str,
        ids: typing.Optional[typing.Union[list[str], str]] = None,
        tag: typing.Optional[str] = None,
        labels: typing.Optional[typing.Union[list[str], str]] = None,
        since: typing.Optional[datetime] = None,
        until: typing.Optional[datetime] = None,
        name: typing.Optional[str] = None,
        kind: mlrun.common.schemas.ArtifactCategories = None,
        category: mlrun.common.schemas.ArtifactCategories = None,
        iter: typing.Optional[int] = None,
        uid: typing.Optional[str] = None,
        producer_id: typing.Optional[str] = None,
        producer_uri: typing.Optional[str] = None,
        best_iteration: bool = False,
        most_recent: bool = False,
        attach_tags: bool = False,
        parent_uri: typing.Optional[str] = None,
        offset: typing.Optional[int] = None,
        limit: typing.Optional[int] = None,
        with_entities: typing.Optional[list[Any]] = None,
        partition_by: typing.Optional[
            mlrun.common.schemas.ArtifactPartitionByField
        ] = None,
        rows_per_partition: typing.Optional[int] = 1,
        partition_sort_by: typing.Optional[
            mlrun.common.schemas.SortField
        ] = mlrun.common.schemas.SortField.updated,
        partition_order: typing.Optional[
            mlrun.common.schemas.OrderType
        ] = mlrun.common.schemas.OrderType.desc,
    ) -> typing.Union[list[Any],]:
        """
        Find artifacts by the given filters.

        :param session: DB session
        :param project: Project name
        :param ids: Artifact IDs to filter by
        :param tag: Tag to filter by
        :param labels: Labels to filter by
        :param since: Filter artifacts that were updated after this time
        :param until: Filter artifacts that were updated before this time
        :param name: Artifact name to filter by
        :param kind: Artifact kind to filter by
        :param category: Artifact category to filter by (if kind is not given)
        :param iter: Artifact iteration to filter by
        :param uid: Artifact UID to filter by
        :param producer_id: Artifact producer ID to filter by
        :param producer_uri: The producer URI (usually a run URI) to filter artifacts by. The producer URI is
            typically used to filter artifacts produced by a specific run or workflow.
        :param best_iteration: Filter by best iteration artifacts
        :param most_recent: Filter by most recent artifacts
        :param attach_tags: Whether to return a list of tuples of (ArtifactV2, tag_name). If False, only ArtifactV2
        :param limit: Maximum number of artifacts to return
        :param with_entities: List of columns to return
        :param partition_by: Field to group results by. When `partition_by` is specified, the `partition_sort_by`
            parameter must be provided as well.
        :param rows_per_partition: How many top rows (per sorting defined by `partition_sort_by` and `partition_order`)
            to return per group. Default value is 1.
        :param partition_sort_by: What field to sort the results by, within each partition defined by `partition_by`.
            Currently the only allowed values are `created` and `updated`. Default is `updated`.
        :param partition_order: Order of sorting within partitions - `asc` or `desc`. Default is `desc`.
        :param offset: SQL query offset.
        :param limit: SQL query limit.

        :return: May return:
            1. a list of tuples of (ArtifactV2, tag_name)
            2. a list of ArtifactV2 - if attach_tags is False
            3. a list of unique columns sets - if with_entities is given
        """
        if category and kind:
            message = "Category and Kind filters can't be given together"
            logger.warning(message, kind=kind, category=category)
            raise ValueError(message)

        tag_id_alias = "tag_id"
        tag_name_alias = "tag_name"

        # Create a subquery that selects only the artifact IDs along with tag metadata.
        # The tag name and tag ID are explicitly aliased as 'name' and 'tag_id' so they can be
        # referenced in window functions, ordering, and outer queries (especially for sorting by tag).
        query = session.query(ArtifactV2).with_entities(
            ArtifactV2.id,
            ArtifactV2.Tag.name.label(tag_name_alias),
            ArtifactV2.Tag.id.label(tag_id_alias),
        )

        # If the query matches the default UI list artifacts request, we bypass the DB optimizer and use the index
        # `idx_project_bi_updated` because we know it provides optimal results for this specific query.
        if self._is_default_list_artifacts_query(
            project,
            ids,
            tag,
            labels,
            since,
            until,
            name,
            kind,
            category,
            iter,
            uid,
            producer_id,
            producer_uri,
            best_iteration,
            most_recent,
            attach_tags,
            offset,
            limit,
            with_entities,
            partition_by,
            rows_per_partition,
            partition_sort_by,
            partition_order,
            parent_uri,
        ):
            query = query.with_hint(
                ArtifactV2,
                "USE INDEX (idx_project_bi_updated)",
                dialect_name="mysql",
            )

        if project:
            query = query.filter(ArtifactV2.project == project)
        if ids and ids != "*":
            query = query.filter(ArtifactV2.id.in_(ids))
        if uid:
            query = query.filter(ArtifactV2.uid == uid)
        if name:
            query = self._add_artifact_name_query(query, name)
        if iter is not None:
            query = query.filter(ArtifactV2.iteration == iter)
        if best_iteration:
            query = query.filter(ArtifactV2.best_iteration == best_iteration)
        if producer_id:
            query = query.filter(ArtifactV2.producer_id == producer_id)
        if producer_uri:
            # We check if the producer uri is a substring of the artifact producer uri because it
            # may contain additional information (like the run iteration) that we don't want to filter by.
            query = query.filter(ArtifactV2.producer_uri.like(f"%{producer_uri}%"))
        if labels:
            labels = label_set(labels)
            query = self._add_labels_filter(session, query, ArtifactV2, labels)
        if since or until:
            query = generate_time_range_query(
                query=query,
                field=ArtifactV2.updated,
                since=since,
                until=until,
            )
        if kind:
            query = query.filter(ArtifactV2.kind == kind)
        elif category:
            query = self._add_artifact_category_query(category, query)
        if most_recent:
            query = self._attach_most_recent_artifact_query(session, query)

        # Order the results before applying the limit to ensure that the limit is applied to the correctly
        # ordered results.
        # If the updated fields are the same, we need a secondary field to sort by.
        # Default sorting criteria is by updated first and ID second
        order_criteria = [ArtifactV2.updated.desc(), ArtifactV2.id.desc()]

        # join on tags
        if tag and tag != "*":
            # If a tag is given, we can just join (faster than outer join) and filter on the tag
            query = query.join(ArtifactV2.Tag, ArtifactV2.Tag.obj_id == ArtifactV2.id)
            query = query.filter(ArtifactV2.Tag.name == tag)
            if project:
                query = query.filter(ArtifactV2.Tag.project == project)
        else:
            # If no tag is given, we need to outer join to get all artifacts, even if they don't have tags
            query = query.outerjoin(
                ArtifactV2.Tag, ArtifactV2.Tag.obj_id == ArtifactV2.id
            )

        if partition_by:
            self._assert_partition_by_parameters(
                mlrun.common.schemas.ArtifactPartitionByField,
                partition_by,
                partition_sort_by,
            )
            query = self._create_partitioned_query(
                session,
                query,
                ArtifactV2,
                partition_by,
                rows_per_partition,
                partition_sort_by,
                partition_order,
                with_tagged=True,
            )
        if parent_uri:
            query = self._add_artifact_parent_query(query=query, parent_uri=parent_uri)

        if limit:
            # When specific tag is not given - we need a consistent way to sort artifacts that have multiple tags.
            # Therefore, we add sorting by latest tag first, then by tag ID as the last criteria.
            if tag == "*" or not tag:
                # Third sort by tag ID to ensure consistent ordering when an artifact has multiple tags.
                # Put "latest" tag first, then others by tag_id desc
                latest_first_case = case(
                    (text(f"{tag_name_alias} = 'latest'"), 0),
                    else_=1,
                )

                order_criteria.append(latest_first_case)
                # Use raw SQL text to refer to the "tag_id" alias we defined earlier.
                # This is necessary because SQLAlchemy does not allow direct reference
                # to aliased columns (like "tag_id") in order_by() using ORM column objects.
                order_criteria.append(text(f"{tag_id_alias} DESC"))

            query = self._paginate_query(
                query.order_by(*order_criteria),
                offset,
                limit,
            )

        # limit operation loads all the results before performing the actual limiting,
        # therefore, we compile the above query as a sub query only for filtering out the relevant ids,
        # then join the outer query on the subquery to select the correct columns of the table.
        subquery = query.subquery()
        outer_query = session.query(ArtifactV2, subquery.c.tag_name)
        if with_entities:
            outer_query = outer_query.with_entities(*with_entities, subquery.c.tag_name)

        outer_query = outer_query.join(subquery, ArtifactV2.id == subquery.c.id)

        # Join may lose order, make sure order is applied on outer as well
        # When specific tag is not given - we need a consistent way to sort artifacts that have multiple tags.
        # Therefore, we add sorting by latest tag first, then by tag ID as the last criteria.
        if tag == "*" or not tag:
            # Put "latest" tag first, then others by tag_id desc
            latest_first_case = case(
                (subquery.c.tag_name == "latest", 0),
                else_=1,
            )

            # Reset order criteria to default values
            if limit:
                order_criteria = [ArtifactV2.updated.desc(), ArtifactV2.id.desc()]

            order_criteria.append(latest_first_case)
            # Safe ordering by tag_id alias
            order_criteria.append(subquery.c[tag_id_alias].desc())

        outer_query = outer_query.order_by(*order_criteria)

        if not limit:
            outer_query = self._paginate_query(outer_query, offset, limit=None)

        if not with_entities:
            # early load the parent artifact
            outer_query = outer_query.options(selectinload(ArtifactV2.parent))

        results = outer_query.all()
        if not attach_tags:
            # we might have duplicate records due to the tagging mechanism, so we need to deduplicate
            artifacts = set()
            for *artifact, _ in results:
                artifacts.add(tuple(artifact) if with_entities else artifact[0])

            return list(artifacts)

        return results

    def _is_default_list_artifacts_query(
        self,
        project: str,
        ids: typing.Optional[typing.Union[list[str], str]] = None,
        tag: typing.Optional[str] = None,
        labels: typing.Optional[typing.Union[list[str], str]] = None,
        since: typing.Optional[datetime] = None,
        until: typing.Optional[datetime] = None,
        name: typing.Optional[str] = None,
        kind: mlrun.common.schemas.ArtifactCategories = None,
        category: mlrun.common.schemas.ArtifactCategories = None,
        iter: typing.Optional[int] = None,
        uid: typing.Optional[str] = None,
        producer_id: typing.Optional[str] = None,
        producer_uri: typing.Optional[str] = None,
        best_iteration: bool = False,
        most_recent: bool = False,
        attach_tags: bool = False,
        offset: typing.Optional[int] = None,
        limit: typing.Optional[int] = None,
        with_entities: typing.Optional[list[Any]] = None,
        partition_by: typing.Optional[
            mlrun.common.schemas.ArtifactPartitionByField
        ] = None,
        rows_per_partition: typing.Optional[int] = 1,
        partition_sort_by: typing.Optional[
            mlrun.common.schemas.SortField
        ] = mlrun.common.schemas.SortField.updated,
        partition_order: typing.Optional[
            mlrun.common.schemas.OrderType
        ] = mlrun.common.schemas.OrderType.desc,
        parent_uri: typing.Optional[str] = None,
    ) -> bool:
        parameters = inspect.signature(self._find_artifacts).parameters
        default_list_params = {
            name: parameter.default for name, parameter in parameters.items()
        }
        default_list_params.update(
            {
                "limit": 1001,
                "best_iteration": True,
                "tag": "latest",
            }
        )

        # The project and category parameters are ignored since they are variable in the default query.
        # The offset parameter varies with pagination, whereas the limit remains constant, so we only validate
        # the limit and the offset is also ignored here.
        current_params = {
            "ids": ids,
            "tag": tag,
            "labels": labels,
            "since": since,
            "until": until,
            "name": name,
            "kind": kind,
            "iter": iter,
            "uid": uid,
            "producer_id": producer_id,
            "producer_uri": producer_uri,
            "best_iteration": best_iteration,
            "most_recent": most_recent,
            "limit": limit,
            "with_entities": with_entities,
            "partition_by": partition_by,
            "rows_per_partition": rows_per_partition,
            "partition_sort_by": partition_sort_by,
            "partition_order": partition_order,
        }

        # Check if all current parameters match their default values
        return all(
            default_list_params[key] == value
            or (default_list_params[key] is None and value in (None, [], {}, ()))
            for key, value in current_params.items()
        )

    def _find_artifacts_for_producer_id(
        self,
        session: Session,
        producer_id: str,
        project: str,
        artifact_identifiers: list[tuple] = "",
    ) -> list[tuple[ArtifactV2, str]]:
        """
        Find a producer's artifacts matching the given (key, tag, iteration, uid) tuples.
        :param session:                 DB session
        :param producer_id:             The artifact producer ID to filter by
        :param project:                 Project name to filter by
        :param artifact_identifiers: List of tuples of (key, tag, iteration, uid)
        :return: A list of tuples of (ArtifactV2, tag_name)
        """
        query = session.query(ArtifactV2, ArtifactV2.Tag.name)
        query = query.options(selectinload(ArtifactV2.parent))
        if project:
            query = query.filter(ArtifactV2.project == project)
        if producer_id:
            query = query.filter(ArtifactV2.producer_id == producer_id)

        # To get all artifacts, even if they don't have tags
        query = query.outerjoin(ArtifactV2.Tag, ArtifactV2.Tag.obj_id == ArtifactV2.id)

        tuples_filter = []
        for key, tag, iteration, uid in artifact_identifiers:
            base_filter = ArtifactV2.key == key

            # Prioritize filtering by UID if provided
            if uid is not None:
                base_filter = base_filter & (ArtifactV2.uid == uid)
            else:
                iteration = iteration or 0
                base_filter = base_filter & (ArtifactV2.iteration == iteration)
                if tag is not None:
                    base_filter = base_filter & (ArtifactV2.Tag.name == tag)
            tuples_filter.append(base_filter)

        query = query.filter(or_(*tuples_filter))
        return query.all()

    def _add_artifact_name_query(self, query, name=None):
        if not name:
            return query

        if name.startswith("~"):
            return self._partial_querying(
                query=query,
                name=name,
                column=ArtifactV2.key,
            )

        return query.filter(ArtifactV2.key == name)

    def _partial_querying(self, query: Query, name: str, column: Any):
        # Escape special chars (_,%) since we still need to do a like query.
        exact_name = self._escape_characters_for_like_query(name)
        # Use Like query to find substring matches
        return query.filter(column.ilike(f"%{exact_name[1:]}%", escape="\\"))

    def _add_artifact_parent_query(self, query: Query, parent_uri: str):
        """
        Augments a SQLAlchemy query to filter artifacts based on a given parent artifact URI or shorthand notation.

        This function supports filtering artifacts that are linked (via `parent_id`) to a specific parent artifact.
        The parent artifact can be referenced using:
          - A full store URI (e.g., `store://artifacts/<project>/<key>:<tag>`),
          - A shorthand `key:tag` format,
          - Or a simple key.

        Partial matching behavior:
        - **Key (name)** and **tag** filters use a SQL `ILIKE` clause for case-insensitive substring matching.
          For example, filtering by `parent_key="m1"` will match parent keys such as `"m11"` or `"M1"`.
        - This allows flexibility in referencing parent artifacts without requiring the full exact name or tag.

        :param query: SQLAlchemy query object to be augmented with parent artifact filters.
        :param parent_uri: A string identifying the parent artifact. Can be a full MLRun store URI, a `key:tag` pair,
                           or just a key.
        :return: A SQLAlchemy query object with added filters for the parent artifact.
        """
        (
            parent_project,
            parent_key,
            parent_iteration,
            parent_tag,
            parent_tree,
            parent_uid,
        ) = self._get_parent_artifact_params_from_uri(parent_uri)

        ref_alias = aliased(ArtifactV2)

        # Join on reference_artifact_id -> ArtifactV2.id
        query = query.join(ref_alias, ArtifactV2.parent_id == ref_alias.id)

        if parent_project:
            query = query.filter(ref_alias.project == parent_project)
        if parent_key:
            parent_key = (
                f"~{parent_key}" if not parent_key.startswith("~") else parent_key
            )
            query = self._partial_querying(
                query=query,
                name=parent_key,
                column=ref_alias.key,
            )
        if parent_iteration:
            query = query.filter(ref_alias.iteration == parent_iteration)
        if parent_tree:
            query = query.filter(ref_alias.producer_id == parent_tree)
        if parent_uid:
            query = query.filter(ref_alias.uid == parent_uid)
        if parent_tag:
            ref_tag = aliased(ArtifactV2.Tag)
            query = query.join(ref_tag, ref_tag.obj_id == ref_alias.id)
            parent_tag = (
                f"~{parent_tag}" if not parent_tag.startswith("~") else parent_tag
            )
            query = self._partial_querying(
                query=query,
                name=parent_tag,
                column=ref_tag.name,
            )
        return query

    @staticmethod
    def _get_parent_artifact_params_from_uri(
        parent_uri: str,
    ) -> tuple[
        Optional[str],
        Optional[str],
        Optional[int],
        Optional[str],
        Optional[str],
        Optional[str],
    ]:
        (
            parent_project,
            parent_key,
            parent_iteration,
            parent_tag,
            parent_tree,
            parent_uid,
        ) = [None] * 6
        if mlrun.datastore.is_store_uri(parent_uri):
            # Parse the parent URI to extract project, key, iteration, tag, tree, and uid
            _, uri = mlrun.datastore.parse_store_uri(parent_uri)
            (
                parent_project,
                parent_key,
                parent_iteration,
                parent_tag,
                parent_tree,
                parent_uid,
            ) = parse_artifact_uri(uri)
        elif parent_uri and ":" in parent_uri:
            parent_key, parent_tag = parent_uri.split(":", maxsplit=1)
        else:
            parent_key = parent_uri

        return (
            parent_project,
            parent_key,
            parent_iteration,
            parent_tag,
            parent_tree,
            parent_uid,
        )

    @staticmethod
    def _add_artifact_category_query(category, query):
        kinds, exclude = category.to_kinds_filter()
        if exclude:
            query = query.filter(ArtifactV2.kind.notin_(kinds))
        else:
            query = query.filter(ArtifactV2.kind.in_(kinds))
        return query

    def _get_existing_artifact(
        self,
        session,
        project: str,
        key: str,
        uid: typing.Optional[str] = None,
        producer_id: typing.Optional[str] = None,
        iteration: typing.Optional[int] = None,
    ):
        query = self._query(session, ArtifactV2, key=key, project=project)
        if uid:
            query = query.filter(ArtifactV2.uid == uid)
        if producer_id:
            query = query.filter(ArtifactV2.producer_id == producer_id)
        if iteration is not None:
            query = query.filter(ArtifactV2.iteration == iteration)
        return query.one_or_none()

    def _should_update_artifact(
        self,
        existing_artifact: ArtifactV2,
        uid=None,
        iteration=None,
    ):
        # we should create a new artifact if we got a new iteration or the calculated uid is different.
        # otherwise we should update the existing artifact
        if uid is not None and existing_artifact.uid != uid:
            return False
        if iteration is not None and existing_artifact.iteration != iteration:
            return False
        return True

    def _update_artifact_latest_tag_on_deletion(self, session, object_record):
        """Update the 'latest' tag for an ArtifactV2 object, moving it to the most recent artifact if necessary."""

        # Step 1: Find the "latest" tag
        latest_tag = self._find_artifact_latest_tag(session, object_record)

        if not latest_tag:
            logger.debug(
                "No 'latest' tag found for artifact",
                artifact_uid=object_record.uid,
            )
            return

        # Check if we should check for other 'latest' tags (only when iteration != 0) for hyperparameters
        if object_record.iteration != 0:
            # Step 2: Check if there are other "latest" tags in the other iterations
            other_latest = self._has_latest_tag_in_different_iterations(
                session, object_record
            )

            if other_latest:
                logger.debug(
                    "'latest' tag exists in other iterations for the same producer_id. "
                    "Not moving the 'latest' tag",
                    artifact_uid=object_record.uid,
                    producer_id=object_record.producer_id,
                )
                return

        # Step 3: Move the "latest" tag to the most recently updated artifacts with the same key
        most_recent_artifact_ids = self._find_previous_most_recent_artifact_ids(
            session, object_record
        )

        if most_recent_artifact_ids:
            logger.debug(
                "Moving 'latest' tag to the most recent artifacts",
                artifact_id=most_recent_artifact_ids,
            )
            latest_tag.obj_id = most_recent_artifact_ids[0]
            session.add(latest_tag)

            # if there are more than one recent artifacts (hyperparam run), we need to add the latest tag to all of them
            for recent_artifact_id in most_recent_artifact_ids[1:]:
                tag = ArtifactV2.Tag(
                    project=object_record.project,
                    name=mlrun.common.constants.RESERVED_TAG_NAME_LATEST,
                    obj_name=object_record.key,
                )
                tag.obj_id = recent_artifact_id
                session.add(tag)
        else:
            logger.warning(
                "No recent artifact found to move 'latest' tag",
                artifact_uid=object_record.uid,
            )

    @staticmethod
    def _find_artifact_latest_tag(session, object_record):
        """Find the 'latest' tag for an artifact object."""
        return (
            session.query(ArtifactV2.Tag)
            .filter(
                ArtifactV2.Tag.obj_id == object_record.id,
                ArtifactV2.Tag.name == mlrun.common.constants.RESERVED_TAG_NAME_LATEST,
                ArtifactV2.Tag.project == object_record.project,
            )
            .one_or_none()
        )

    @staticmethod
    def _has_latest_tag_in_different_iterations(session, object_record):
        """Check if other iterations for the same producer_id have the 'latest' tag."""
        return (
            session.query(
                exists().where(
                    ArtifactV2.Tag.obj_id != object_record.id,
                    ArtifactV2.Tag.name
                    == mlrun.common.constants.RESERVED_TAG_NAME_LATEST,
                    ArtifactV2.Tag.project == object_record.project,
                    ArtifactV2.Tag.obj_name == object_record.key,
                    ArtifactV2.Tag.obj_id.in_(
                        session.query(ArtifactV2.id).filter(
                            ArtifactV2.producer_id == object_record.producer_id,
                            ArtifactV2.iteration != object_record.iteration,
                            ArtifactV2.project == object_record.project,
                            ArtifactV2.key == object_record.key,
                        )
                    ),
                )
            ).scalar()  # Returns True if exists, False otherwise
        )

    @staticmethod
    def _find_previous_most_recent_artifact_ids(session, object_record):
        """Find the most recent artifact ids based on the update timestamp, excluding the current artifact."""

        # get only the fields that we care about to reduce the amount of data we load into memory
        query = session.query(
            ArtifactV2.id,
            ArtifactV2.iteration,
            ArtifactV2.producer_id,
        ).filter(
            ArtifactV2.id != object_record.id,
            ArtifactV2.project == object_record.project,
            ArtifactV2.key == object_record.key,
        )

        # Find of the most recent artifact based on the update timestamp.
        result = query.order_by(ArtifactV2.updated.desc()).first()
        if not result:
            return None

        artifact_id, artifact_iteration, artifact_producer_id = result

        if artifact_iteration != 0:
            # latest artifact is a part of a hyperparam run, so we need to add the latest tag to all the artifacts
            # with the same producer_id, key and project, that don't have the latest tag, or don't have a tag at all
            query = (
                session.query(ArtifactV2.id)
                .filter(
                    ArtifactV2.producer_id == artifact_producer_id,
                    ArtifactV2.key == object_record.key,
                    ArtifactV2.project == object_record.project,
                )
                .outerjoin(ArtifactV2.Tag, ArtifactV2.Tag.obj_id == ArtifactV2.id)
                .filter(
                    or_(
                        ArtifactV2.Tag.name.is_(None),
                        ArtifactV2.Tag.name
                        != mlrun.common.constants.RESERVED_TAG_NAME_LATEST,
                    )
                )
            )
            return [result[0] for result in query.all()]

        return [artifact_id]

    # ---- Functions ----
    @retry_on_conflict
    def store_function(
        self,
        session,
        function,
        name,
        project: str,
        tag="",
        versioned=False,
    ) -> str:
        if not project:
            raise mlrun.errors.MLRunMissingProjectError()
        logger.debug(
            "Storing function to DB",
            name=name,
            project=project,
            tag=tag,
            versioned=versioned,
            metadata=function.get("metadata"),
        )
        function = deepcopy(function)
        tag = (
            tag
            or get_in(function, "metadata.tag")
            or mlrun.common.constants.RESERVED_TAG_NAME_LATEST
        )
        hash_key = fill_function_hash(function, tag)

        # clear tag from object in case another function will "take" that tag
        update_in(function, "metadata.tag", "")

        # versioned means whether we want to version this function object so that it will queryable by its hash key
        # to enable that we set the uid to the hash key so it will have a unique record (Unique constraint of function
        # is the set (project, name, uid))
        # when it's not enabled it means we want to have one unique function object for the set (project, name, tag)
        # that will be reused on every store function (cause we don't want to version each version e.g. create a new
        # record) so we set the uid to be unversioned-{tag}
        if versioned:
            uid = hash_key
        else:
            uid = f"{unversioned_tagged_object_uid_prefix}{tag}"

        updated = datetime.now(UTC)
        update_in(function, "metadata.updated", updated)
        body_name = function.get("metadata", {}).get("name")
        if body_name and body_name != name:
            raise mlrun.errors.MLRunInvalidArgumentError(
                f"Conflict between requested name and name in function body, function name is {name} while body_name is"
                f" {body_name}"
            )
        if not body_name:
            function.setdefault("metadata", {})["name"] = name
        if function_node_selector := get_in(function, "spec.node_selector"):
            mlrun.k8s_utils.validate_node_selectors(function_node_selector)
        fn = self._get_class_instance_by_uid(session, Function, name, project, uid)
        if not fn:
            fn = Function(
                name=name,
                project=project,
                uid=uid,
            )
        fn.updated = updated
        labels = get_in(function, "metadata.labels", {})
        update_labels(fn, labels)
        # avoiding data duplications as the attributes below are given in the function object
        # and we store them on a specific columns
        fn.kind = function.pop("kind", None)
        fn.state = function.get("status", {}).pop("state", None)
        fn.struct = function
        self._upsert(session, [fn])
        self.tag_objects_v2(session, [fn], project, tag)
        return hash_key

    def list_functions(
        self,
        session: Session,
        project: typing.Union[str, list[str]],
        name: typing.Optional[str] = None,
        tag: typing.Optional[str] = None,
        kind: typing.Optional[str] = None,
        labels: typing.Optional[list[str]] = None,
        hash_key: typing.Optional[str] = None,
        states: typing.Optional[list[mlrun.common.schemas.FunctionState]] = None,
        format_: mlrun.common.formatters.FunctionFormat = mlrun.common.formatters.FunctionFormat.full,
        offset: typing.Optional[int] = None,
        limit: typing.Optional[int] = None,
        since: typing.Optional[datetime] = None,
        until: typing.Optional[datetime] = None,
    ) -> list[dict]:
        if not project:
            raise mlrun.errors.MLRunMissingProjectError()
        functions = []
        for function, function_tag in self._find_functions(
            session=session,
            name=name,
            project=project,
            labels=labels,
            tag=tag,
            hash_key=hash_key,
            since=since,
            until=until,
            kind=kind,
            states=states,
            offset=offset,
            limit=limit,
        ):
            function_dict = function.struct
            self._enrich_function_struct_from_model(function, function_dict)

            if not function_tag:
                # function status should be added only to tagged functions
                # TODO: remove explicit cleaning; we also
                #  will need to understand how to display functions in UI, because if we do not remove the status here,
                #  UI shows two function as `ready` which belong to the same Nuclio function
                function_dict["status"] = None
            else:
                function_dict["metadata"]["tag"] = function_tag
                function_dict.setdefault("status", {})
                function_dict["status"]["state"] = function.state

            functions.append(
                mlrun.common.formatters.FunctionFormat.format_obj(
                    function_dict, format_
                )
            )
        return functions

    def get_function(
        self,
        session,
        project: str,
        name: typing.Optional[str] = None,
        tag: typing.Optional[str] = None,
        hash_key: typing.Optional[str] = None,
        format_: typing.Optional[str] = None,
    ) -> dict:
        """
        In version 1.4.0 we added a normalization to the function name before storing.
        To be backwards compatible and allow users to query old non-normalized functions,
        we're providing a fallback to get_function:
        normalize the requested name and try to retrieve it from the database.
        If no answer is received, we will check to see if the original name contained underscores,
        if so, the retrieval will be repeated and the result (if it exists) returned.
        """
        if not project:
            raise mlrun.errors.MLRunMissingProjectError()
        normalized_function_name = mlrun.utils.normalize_name(name)
        try:
            return self._get_function(
                session, normalized_function_name, project, tag, hash_key, format_
            )
        except mlrun.errors.MLRunNotFoundError as exc:
            if "_" in name:
                logger.warning(
                    "Failed to get underscore-named function, trying without normalization",
                    function_name=name,
                )
                return self._get_function(
                    session, name, project, tag, hash_key, format_
                )
            else:
                raise exc

    def delete_function(self, session: Session, project: str, name: str):
        logger.debug("Removing function from db", project=project, name=name)

        # deleting tags and labels, because in sqlite the relationships aren't necessarily cascading
        self._delete_function_tags(session, project, name, commit=False)
        self._delete_class_labels(
            session, Function, project=project, name=name, commit=False
        )
        self._delete(session, Function, project=project, name=name)

    def delete_functions(
        self, session: Session, project: str, names: typing.Union[str, list[str]]
    ) -> None:
        logger.debug("Removing functions from db", project=project, name=names)

        self._delete_multi_objects(
            session=session,
            main_table=Function,
            project=project,
            main_table_identifier=Function.name,
            main_table_identifier_values=names,
        )

    def update_function(
        self,
        session,
        name,
        updates: dict,
        project: str,
        tag: str = "",
        hash_key: str = "",
    ):
        if not project:
            raise mlrun.errors.MLRunMissingProjectError()
        query = self._query(session, Function, name=name, project=project)
        uid = self._get_function_uid(
            session=session, name=name, tag=tag, hash_key=hash_key, project=project
        )
        if uid:
            query = query.filter(Function.uid == uid)
        function = query.one_or_none()
        if function:
            struct = function.struct
            for key, val in updates.items():
                update_in(struct, key, val)
            function.kind = (
                struct.pop("kind", None) if not function.kind else function.kind
            )
            function.state = struct.get("status", {}).pop("state", None)
            function.struct = struct
            self._upsert(session, [function])

    def update_function_external_invocation_url(
        self,
        session,
        name: str,
        url: str,
        project: str,
        tag: str = "",
        hash_key: str = "",
        operation: mlrun.common.types.Operation = mlrun.common.types.Operation.ADD,
    ):
        """
        This function updates the external invocation URLs of a function within a project.
        It can add or remove URLs based on the specified `operation` which can be
        either ADD or REMOVE of type :py:class:`~mlrun.types.Operation`
        """
        if not project:
            raise mlrun.errors.MLRunMissingProjectError()
        normalized_function_name = mlrun.utils.normalize_name(name)
        function, _ = self._get_function_db_object(
            session,
            normalized_function_name,
            project,
            tag=tag or mlrun.common.constants.RESERVED_TAG_NAME_LATEST,
            hash_key=hash_key,
        )
        if not function:
            logger.debug(
                "Function is not found, skipping external invocation urls update",
                project=project,
                name=name,
                url=url,
            )
            return

        # remove trailing slashes from the URL
        url = url.rstrip("/")

        struct = function.struct
        existing_invocation_urls = struct["status"].get("external_invocation_urls", [])
        updated = False
        if (
            operation == mlrun.common.types.Operation.ADD
            and url not in existing_invocation_urls
        ):
            logger.debug(
                "Adding new external invocation url to function",
                project=project,
                name=name,
                url=url,
            )
            updated = True
            existing_invocation_urls.append(url)
            struct["status"]["external_invocation_urls"] = existing_invocation_urls
        elif (
            operation == mlrun.common.types.Operation.REMOVE
            and url in existing_invocation_urls
        ):
            logger.debug(
                "Removing an external invocation url from function",
                project=project,
                name=name,
                url=url,
            )
            updated = True
            struct["status"]["external_invocation_urls"].remove(url)

        # update the function record only if the external invocation URLs were updated
        if updated:
            function.struct = struct
            self._upsert(session, [function])

    def _get_function(
        self,
        session,
        name: typing.Optional[str] = None,
        project: typing.Optional[str] = None,
        tag: typing.Optional[str] = None,
        hash_key: typing.Optional[str] = None,
        format_: str = mlrun.common.formatters.FunctionFormat.full,
    ):
        tag, computed_tag = self._compute_function_tag(tag, hash_key)

        obj, uid = self._get_function_db_object(session, name, project, tag, hash_key)
        tag_function_uid = None if not tag and hash_key else uid
        if obj:
            function = obj.struct
            self._enrich_function_struct_from_model(obj, function)

            # If connected to a tag add it to metadata
            if tag_function_uid:
                function["metadata"]["tag"] = computed_tag
                function["metadata"]["uid"] = tag_function_uid
            function.setdefault("status", {})
            function["status"]["state"] = obj.state

            return mlrun.common.formatters.FunctionFormat.format_obj(function, format_)
        else:
            function_uri = generate_object_uri(project, name, tag, hash_key)
            raise mlrun.errors.MLRunNotFoundError(f"Function not found {function_uri}")

    def _get_function_db_object(
        self,
        session,
        name: typing.Optional[str] = None,
        project: typing.Optional[str] = None,
        tag: typing.Optional[str] = None,
        hash_key: typing.Optional[str] = None,
    ) -> tuple[Function, str]:
        query = self._query(session, Function, name=name, project=project)
        uid = self._get_function_uid(
            session=session,
            name=name,
            tag=tag,
            hash_key=hash_key,
            project=project,
        )
        if uid:
            query = query.filter(Function.uid == uid)
        return query.one_or_none(), uid

    def _get_function_uid(
        self, session, name: str, tag: str, hash_key: str, project: str
    ):
        tag, computed_tag = self._compute_function_tag(tag, hash_key)
        if hash_key and (not tag or unversioned_tagged_object_uid_prefix in hash_key):
            return hash_key
        else:
            tag_function_uid = self._resolve_class_tag_uid(
                session, Function, project, name, computed_tag
            )
            if tag_function_uid is None:
                function_uri = generate_object_uri(project, name, tag)
                raise mlrun.errors.MLRunNotFoundError(
                    f"Function tag not found {function_uri}"
                )
            return tag_function_uid

    @staticmethod
    def _compute_function_tag(tag: str, hash_key: str):
        if hash_key and unversioned_tagged_object_uid_prefix in hash_key:
            computed_tag = tag or hash_key.split("-", maxsplit=1)[1]
            tag = computed_tag
        else:
            computed_tag = tag or mlrun.common.constants.RESERVED_TAG_NAME_LATEST
        return tag, computed_tag

    def _enrich_function_struct_from_model(
        self, function: Function, function_struct: dict
    ):
        function_struct["kind"] = function.kind
        function_struct["metadata"]["project"] = function.project

        # updated field is saved in struct as timestamps with fsp=6, while the corresponding column
        # in the database have fsp=3. Since 'ORDER BY' is applied to the column, we return the value from
        # the column (not from the struct) to ensure the ordering is correct.
        # In SQLite, the updated column return timestamps with fsp=6.
        if field_value := getattr(function, "updated", None):
            # Handle cases where milliseconds/microseconds are missing in timestamp, because isoformat by default
            # ignores them if they are zero
            function_struct["metadata"]["updated"] = self._add_utc_timezone(
                field_value
            ).isoformat(timespec="microseconds")

    def _delete_project_functions(self, session: Session, project: str):
        logger.debug("Removing project functions from db", project=project)
        self._delete_multi_objects(
            session=session,
            main_table=Function,
            project=project,
        )

    def _list_project_function_names(
        self, session: Session, project: str, limit: Optional[int] = None
    ) -> list[str]:
        q = self._query(session, distinct(Function.name), project=project)
        if limit:
            q = q.limit(limit)
        return [name for (name,) in q.all()]

    def _delete_resources_tags(self, session: Session, project: str):
        for tagged_class in _tagged:
            self._delete(session, tagged_class, project=project)

    def _delete_resources_labels(self, session: Session, project: str):
        for labeled_class in _labeled:
            if hasattr(labeled_class, "project"):
                self._delete(session, labeled_class, project=project)

    def _delete_function_tags(self, session, project, function_name, commit=True):
        query = session.query(Function.Tag).filter(
            Function.Tag.project == project, Function.Tag.obj_name == function_name
        )
        for obj in query:
            session.delete(obj)
        if commit:
            session.commit()

    def _list_function_tags(self, session, project, function_id):
        query = (
            session.query(Function.Tag.name)
            .filter(Function.Tag.project == project, Function.Tag.obj_id == function_id)
            .distinct()
        )
        return [row[0] for row in query]

    # ---- Schedules ----
    @retry_on_conflict
    def store_schedule(
        self,
        session: Session,
        project: str,
        name: str,
        kind: mlrun.common.schemas.ScheduleKinds = None,
        scheduled_object: Any = None,
        cron_trigger: mlrun.common.schemas.ScheduleCronTrigger = None,
        labels: typing.Optional[dict] = None,
        last_run_uri: typing.Optional[str] = None,
        concurrency_limit: typing.Optional[int] = None,
        next_run_time: typing.Optional[datetime] = None,
    ) -> tuple[mlrun.common.schemas.ScheduleRecord, bool]:
        schedule = self._get_schedule_record(
            session=session, project=project, name=name, raise_on_not_found=False
        )
        is_update = schedule is not None

        if not is_update:
            schedule = self._create_schedule_db_record(
                project=project,
                name=name,
                kind=kind,
                scheduled_object=scheduled_object,
                cron_trigger=cron_trigger,
                concurrency_limit=concurrency_limit,
                labels=labels,
                next_run_time=next_run_time,
            )

        self._update_schedule_body(
            schedule=schedule,
            scheduled_object=scheduled_object,
            cron_trigger=cron_trigger,
            labels=labels,
            last_run_uri=last_run_uri,
            concurrency_limit=concurrency_limit,
            next_run_time=next_run_time,
        )

        logger.debug(
            "Storing schedule to db",
            project=schedule.project,
            name=schedule.name,
            kind=schedule.kind,
            cron_trigger=schedule.cron_trigger,
            labels=schedule.labels,
            concurrency_limit=schedule.concurrency_limit,
            scheduled_object=schedule.scheduled_object,
        )

        self._upsert(session, [schedule])

        schedule = self._transform_schedule_record_to_scheme(schedule)
        return schedule, is_update

    def create_schedule(
        self,
        session: Session,
        project: str,
        name: str,
        kind: mlrun.common.schemas.ScheduleKinds,
        scheduled_object: Any,
        cron_trigger: mlrun.common.schemas.ScheduleCronTrigger,
        concurrency_limit: int,
        labels: typing.Optional[dict] = None,
        next_run_time: typing.Optional[datetime] = None,
    ) -> mlrun.common.schemas.ScheduleRecord:
        schedule_record = self._create_schedule_db_record(
            project=project,
            name=name,
            kind=kind,
            scheduled_object=scheduled_object,
            cron_trigger=cron_trigger,
            concurrency_limit=concurrency_limit,
            labels=labels,
            next_run_time=next_run_time,
        )

        logger.debug(
            "Saving schedule to db",
            project=schedule_record.project,
            name=schedule_record.name,
            kind=schedule_record.kind,
            cron_trigger=schedule_record.cron_trigger,
            concurrency_limit=schedule_record.concurrency_limit,
            next_run_time=schedule_record.next_run_time,
        )
        self._upsert(session, [schedule_record])

        schedule = self._transform_schedule_record_to_scheme(schedule_record)
        return schedule

    @staticmethod
    def _create_schedule_db_record(
        project: str,
        name: str,
        kind: mlrun.common.schemas.ScheduleKinds,
        scheduled_object: Any,
        cron_trigger: mlrun.common.schemas.ScheduleCronTrigger,
        concurrency_limit: int,
        labels: typing.Optional[dict] = None,
        next_run_time: typing.Optional[datetime] = None,
    ) -> Schedule:
        if concurrency_limit is None:
            concurrency_limit = config.httpdb.scheduling.default_concurrency_limit
        if next_run_time is not None:
            # We receive the next_run_time with localized timezone info (e.g +03:00). All the timestamps should be
            # saved in the DB in UTC timezone, therefore we transform next_run_time to UTC as well.
            next_run_time = next_run_time.astimezone(pytz.utc)

        schedule = Schedule(
            project=project,
            name=name,
            kind=kind.value,
            creation_time=datetime.now(UTC),
            concurrency_limit=concurrency_limit,
            next_run_time=next_run_time,
            # these are properties of the object that map manually (using getters and setters) to other column of the
            # table and therefore Pycharm yells that they're unexpected
            scheduled_object=scheduled_object,
            cron_trigger=cron_trigger,
        )

        update_labels(schedule, labels or {})
        return schedule

    def update_schedule(
        self,
        session: Session,
        project: str,
        name: str,
        scheduled_object: Any = None,
        cron_trigger: mlrun.common.schemas.ScheduleCronTrigger = None,
        labels: typing.Optional[dict] = None,
        last_run_uri: typing.Optional[str] = None,
        concurrency_limit: typing.Optional[int] = None,
        next_run_time: typing.Optional[datetime] = None,
    ):
        schedule = self._get_schedule_record(session, project, name)

        self._update_schedule_body(
            schedule=schedule,
            scheduled_object=scheduled_object,
            cron_trigger=cron_trigger,
            labels=labels,
            last_run_uri=last_run_uri,
            concurrency_limit=concurrency_limit,
            next_run_time=next_run_time,
        )

        logger.debug(
            "Updating schedule in db",
            project=project,
            name=name,
            cron_trigger=cron_trigger,
            labels=labels,
            concurrency_limit=concurrency_limit,
            next_run_time=next_run_time,
        )
        self._upsert(session, [schedule])

    @staticmethod
    def _update_schedule_body(
        schedule: Schedule,
        scheduled_object: Any = None,
        cron_trigger: mlrun.common.schemas.ScheduleCronTrigger = None,
        labels: typing.Optional[dict] = None,
        last_run_uri: typing.Optional[str] = None,
        concurrency_limit: typing.Optional[int] = None,
        next_run_time: typing.Optional[datetime] = None,
    ):
        # explicitly ensure the updated fields are not None, as they can be empty strings/dictionaries etc.
        if scheduled_object is not None:
            schedule.scheduled_object = scheduled_object

        if cron_trigger is not None:
            schedule.cron_trigger = cron_trigger

        if labels is not None:
            update_labels(schedule, labels)

        if last_run_uri is not None:
            schedule.last_run_uri = last_run_uri

        if concurrency_limit is not None:
            schedule.concurrency_limit = concurrency_limit

        if next_run_time is not None:
            # We receive the next_run_time with localized timezone info (e.g +03:00). All the timestamps should be
            # saved in the DB in UTC timezone, therefore we transform next_run_time to UTC as well.
            schedule.next_run_time = next_run_time.astimezone(pytz.utc)

    def list_schedules(
        self,
        session: Session,
        project: typing.Optional[typing.Union[str, list[str]]] = None,
        name: typing.Optional[str] = None,
        labels: typing.Optional[list[str]] = None,
        kind: mlrun.common.schemas.ScheduleKinds = None,
        next_run_time_since: Optional[datetime] = None,
        next_run_time_until: Optional[datetime] = None,
        as_records: bool = False,
        limit: typing.Optional[int] = None,
    ) -> list[mlrun.common.schemas.ScheduleRecord]:
        logger.debug("Getting schedules from db", project=project, name=name, kind=kind)
        query = self._query(session, Schedule, kind=kind)
        query = self._filter_query_by_resource_project(query, Schedule, project)
        if next_run_time_since or next_run_time_until:
            query = generate_time_range_query(
                query=query,
                field=Schedule.next_run_time,
                since=next_run_time_since,
                until=next_run_time_until,
            )
        if limit:
            query = query.limit(limit)

        if name is not None:
            query = query.filter(generate_query_predicate_for_name(Schedule.name, name))
        labels = label_set(labels)
        query = self._add_labels_filter(session, query, Schedule, labels)

        if as_records:
            return query

        schedules = [
            self._transform_schedule_record_to_scheme(db_schedule)
            for db_schedule in query
        ]
        return schedules

    def get_schedule(
        self, session: Session, project: str, name: str, raise_on_not_found: bool = True
    ) -> typing.Optional[mlrun.common.schemas.ScheduleRecord]:
        logger.debug("Getting schedule from db", project=project, name=name)
        schedule_record = self._get_schedule_record(
            session, project, name, raise_on_not_found
        )
        if not schedule_record:
            return
        schedule = self._transform_schedule_record_to_scheme(schedule_record)
        return schedule

    def delete_schedule(self, session: Session, project: str, name: str):
        logger.debug("Removing schedule from db", project=project, name=name)
        self._delete_class_labels(
            session, Schedule, project=project, name=name, commit=False
        )
        self._delete(session, Schedule, project=project, name=name)

    def delete_schedules(
        self, session: Session, project: str, names: typing.Union[str, list[str]]
    ) -> None:
        logger.debug("Removing schedules from db", project=project, name=names)
        self._delete_multi_objects(
            session=session,
            main_table=Schedule,
            project=project,
            main_table_identifier=Schedule.name,
            main_table_identifier_values=names,
        )

    def align_schedule_labels(self, session: Session):
        schedules_update = []
        for db_schedule in self.list_schedules(session=session, as_records=True):
            schedule_record = self._transform_schedule_record_to_scheme(db_schedule)
            db_schedule_labels = {
                label.name: label.value for label in db_schedule.labels
            }
            merged_labels = (
                framework.utils.helpers.merge_schedule_and_schedule_object_labels(
                    labels=db_schedule_labels,
                    scheduled_object=schedule_record.scheduled_object,
                )
            )
            self._update_schedule_body(
                schedule=db_schedule,
                scheduled_object=schedule_record.scheduled_object,
                labels=merged_labels,
            )
            schedules_update.append(db_schedule)
        self._upsert(session, schedules_update)

    def delete_project_schedules(self, session: Session, project: str):
        logger.debug("Removing project schedules from db", project=project)
        self._delete_multi_objects(
            session=session,
            main_table=Schedule,
            project=project,
        )

    def _delete_multi_objects(
        self,
        session: Session,
        main_table: framework.db.sqldb.base.BaseModel,
        project: str,
        related_tables: typing.Optional[list[framework.db.sqldb.base.BaseModel]] = None,
        main_table_identifier: typing.Optional[Column] = None,
        main_table_identifier_values: typing.Optional[
            typing.Union[str, list[str]]
        ] = None,
        additional_filter: typing.Optional[BinaryExpression] = None,
    ) -> int:
        """
        Delete multiple objects from the DB, including related tables.
        :param session: SQLAlchemy session.
        :param main_table: The main table to delete from.
        :param project: The project to delete from.
        :param related_tables: Related tables to delete from, will be joined with the main table by the identifiers
            since in SQLite the deletion is not always cascading.
        :param main_table_identifier: The main table attribute to filter by.
        :param main_table_identifier_values: The values corresponding to main_table_identifier to filter by.

        :return: The amount of deleted rows from the main table.
        """
        related_tables = related_tables or []

        def skip_deletion():
            logger.debug(
                "No identifier values provided, skipping deletion",
                project=project,
                tables=[main_table] + related_tables,
            )
            return 0

        # TODO: add project permissions handling like in the list methods
        if project != "*":
            where_clause = main_table.project == project
            # To allow deleting all project resources - don't require main_table_identifier
            if main_table_identifier:
                if not main_table_identifier_values:
                    return skip_deletion()

                where_clause = and_(
                    where_clause,
                    main_table_identifier.in_(main_table_identifier_values),
                )
        else:
            if not main_table_identifier_values or not main_table_identifier:
                return skip_deletion()
            where_clause = main_table_identifier.in_(main_table_identifier_values)
        if additional_filter is not None:
            where_clause = and_(where_clause, additional_filter)
        for cls in related_tables:
            logger.debug(
                "Removing objects",
                cls=cls,
                project=project,
                main_table_identifier=main_table_identifier,
            )

            # The select is mandatory for sqlalchemy 1.4 because
            # query.delete does not support multiple-table criteria within DELETE
            subquery = select(cls.id).join(main_table).where(where_clause).subquery()
            stmt = (
                delete(cls)
                .where(cls.id.in_(aliased(subquery)))
                .execution_options(synchronize_session=False)
            )

            # Execute the delete statement
            execution_obj = session.execute(stmt)
            logger.debug(
                "Removed rows from related table",
                rowcount=execution_obj.rowcount,
                cls=cls,
                main_table=main_table,
                project=project,
            )

        total_deleted = self._delete_table_in_batches(session, main_table, where_clause)
        logger.info(
            "Completed deletion",
            deletions_count=total_deleted,
            main_table=main_table,
            project=project,
            main_table_identifier=main_table_identifier,
        )
        return total_deleted

    @staticmethod
    def _delete_table_in_batches(
        session: Session,
        table: framework.db.sqldb.base.BaseModel,
        where_clause,
    ) -> int:
        """
        Delete rows from a table in batches based on ID ordering.
        :param session: SQLAlchemy session.
        :param table: SQLAlchemy ORM model/table to delete from.
        :param where_clause: SQLAlchemy WHERE clause.
        :return: Total number of deleted rows.
        """
        last_id = 0
        total_deleted = 0
        batch_size = mlrun.mlconf.httpdb.projects.resource_deletion_batch_size

        while True:
            ids_to_delete = (
                session.query(table.id)
                .filter(where_clause, table.id > last_id)
                .order_by(table.id)
                .limit(batch_size)
                .all()
            )

            if not ids_to_delete:
                break

            id_values = [row.id for row in ids_to_delete]

            delete_stmt = (
                delete(table)
                .where(table.id.in_(id_values))
                .execution_options(synchronize_session=False)
            )
            result = session.execute(delete_stmt)
            session.commit()

            last_id = id_values[-1]
            total_deleted += result.rowcount

            logger.debug(
                "Deleted batch from table",
                batch_size=len(id_values),
                total_deleted=total_deleted,
                last_id=last_id,
                table=table,
            )
        return total_deleted

    def _get_schedule_record(
        self, session: Session, project: str, name: str, raise_on_not_found: bool = True
    ) -> Schedule:
        query = self._query(session, Schedule, project=project, name=name)
        schedule_record = query.one_or_none()
        if not schedule_record and raise_on_not_found:
            raise mlrun.errors.MLRunNotFoundError(
                f"Schedule not found: project={project}, name={name}"
            )
        return schedule_record

    def _delete_project_feature_vectors(self, session: Session, project: str):
        logger.debug("Removing project feature-vectors from db", project=project)
        self._delete_multi_objects(
            session=session,
            main_table=FeatureVector,
            project=project,
        )

    def _list_project_feature_vector_names(
        self, session: Session, project: str, limit: Optional[int] = None
    ) -> list[str]:
        q = self._query(session, distinct(FeatureVector.name), project=project)
        if limit:
            q = q.limit(limit)
        return [name for (name,) in q.all()]

    def tag_objects_v2(
        self,
        session,
        objs,
        project: str,
        name: str,
        obj_name_attribute: Union[str, list[str]] = "name",
        obj_name_suffix: Optional[str] = None,
    ):
        tags = []
        obj_name_attribute = (
            [obj_name_attribute]
            if isinstance(obj_name_attribute, str)
            else obj_name_attribute
        )
        for obj in objs:
            obj_name = "-".join(
                [
                    getattr(obj, attr) if getattr(obj, attr) else ""
                    for attr in obj_name_attribute
                ]
            )
            if obj_name_suffix:
                obj_name += f"-{obj_name_suffix}"
            query = self._query(
                session, obj.Tag, name=name, project=project, obj_name=obj_name
            )

            tag = query.one_or_none()
            if not tag:
                tag = obj.Tag(project=project, name=name, obj_name=obj_name)
            tag.obj_id = obj.id
            tags.append(tag)
        self._upsert(session, tags)

    # ---- Projects ----
    def create_project(self, session: Session, project: mlrun.common.schemas.Project):
        logger.debug("Creating project in DB", project_name=project.metadata.name)
        created = datetime.utcnow()
        project.metadata.created = created
        # TODO: handle taking out the functions/workflows/artifacts out of the project and save them separately
        project_record = Project(
            name=project.metadata.name,
            description=project.spec.description,
            source=project.spec.source,
            state=project.status.state,
            created=created,
            owner=project.spec.owner,
            default_function_node_selector=project.spec.default_function_node_selector,
            full_object=project.dict(),
        )
        labels = project.metadata.labels or {}
        update_labels(project_record, labels)

        objects_to_store = [project_record]
        self._append_project_summary(project, objects_to_store)
        self._upsert(session, objects_to_store)

    @staticmethod
    def _append_project_summary(project, objects_to_store):
        summary = mlrun.common.schemas.ProjectSummary(
            name=project.metadata.name,
        )
        project_summary = ProjectSummary(
            project=project.metadata.name,
            summary=summary.dict(),
            updated=datetime.now(UTC),
        )
        objects_to_store.append(project_summary)

    @retry_on_conflict
    def store_project(
        self, session: Session, name: str, project: mlrun.common.schemas.Project
    ):
        logger.debug(
            "Storing project in DB",
            name=name,
            project_metadata=project.metadata,
            project_owner=project.spec.owner,
            project_desired_state=project.spec.desired_state,
            default_function_node_selector=project.spec.default_function_node_selector,
            project_status=project.status,
        )
        self._normalize_project_parameters(project)

        project_record = self._get_project_record(
            session, name, raise_on_not_found=False
        )
        if not project_record:
            self.create_project(session, project)
        else:
            self._update_project_record_from_project(session, project_record, project)

    @staticmethod
    def _normalize_project_parameters(project: mlrun.common.schemas.Project):
        # remove leading & trailing whitespaces from the project parameters keys and values to prevent duplications
        if project.spec.params:
            project.spec.params = {
                str(key).strip(): value.strip() if isinstance(value, str) else value
                for key, value in project.spec.params.items()
            }

    def patch_project(
        self,
        session: Session,
        name: str,
        project: dict,
        patch_mode: mlrun.common.schemas.PatchMode = mlrun.common.schemas.PatchMode.replace,
    ):
        logger.debug("Patching project in DB", name=name, patch_mode=patch_mode)
        project_record = self._get_project_record(session, name)
        self._patch_project_record_from_project(
            session, name, project_record, project, patch_mode
        )

    def get_project(
        self,
        session: Session,
        name: typing.Optional[str] = None,
        project_id: typing.Optional[int] = None,
    ) -> mlrun.common.schemas.ProjectOut:
        project_record = self._get_project_record(session, name, project_id)

        return self._transform_project_record_to_schema(session, project_record)

    def delete_project(
        self,
        session: Session,
        name: str,
        deletion_strategy: mlrun.common.schemas.DeletionStrategy = mlrun.common.schemas.DeletionStrategy.default(),
    ):
        logger.debug(
            "Deleting project from DB", name=name, deletion_strategy=deletion_strategy
        )
        self._delete_project_summary(session, name)
        self._delete(session, Project, name=name)

    def list_projects(
        self,
        session: Session,
        owner: typing.Optional[str] = None,
        format_: mlrun.common.formatters.ProjectFormat = mlrun.common.formatters.ProjectFormat.full,
        labels: typing.Optional[list[str]] = None,
        state: mlrun.common.schemas.ProjectState = None,
        names: typing.Optional[list[str]] = None,
    ) -> mlrun.common.schemas.ProjectsOutput:
        query = self._query(session, Project, owner=owner, state=state)

        # if format is name_only, we don't need to query the full project object, we can just query the name
        # and return it as a list of strings
        if format_ == mlrun.common.formatters.ProjectFormat.name_only:
            query = self._query(session, Project.name, owner=owner, state=state)

        # attach filters to the query
        if labels:
            query = self._add_labels_filter(session, query, Project, labels)
        if names is not None:
            query = query.filter(Project.name.in_(names))

        project_records = query.all()

        # format the projects according to the requested format
        projects = []
        for project_record in project_records:
            if format_ == mlrun.common.formatters.ProjectFormat.name_only:
                # can't use formatter as we haven't queried the entire object anyway
                projects.append(project_record.name)
            else:
                projects.append(
                    mlrun.common.formatters.ProjectFormat.format_obj(
                        self._transform_project_record_to_schema(
                            session, project_record
                        ),
                        format_,
                    )
                )
        return mlrun.common.schemas.ProjectsOutput(projects=projects)

    def get_project_summary(
        self,
        session,
        project: str,
    ) -> typing.Optional[mlrun.common.schemas.ProjectSummary]:
        project_summary_record = self._query(
            session,
            ProjectSummary,
            project=project,
        ).one_or_none()
        if not project_summary_record:
            raise mlrun.errors.MLRunNotFoundError(
                f"Project summary not found: {project=}"
            )

        project_summary_record.summary["name"] = project_summary_record.project
        project_summary_record.summary["updated"] = project_summary_record.updated
        return mlrun.common.schemas.ProjectSummary(**project_summary_record.summary)

    def list_project_summaries(
        self,
        session: Session,
        owner: typing.Optional[str] = None,
        labels: typing.Optional[list[str]] = None,
        state: mlrun.common.schemas.ProjectState = None,
        names: typing.Optional[list[str]] = None,
    ):
        project_query = self._query(session, Project.name)
        if owner:
            project_query = project_query.filter(Project.owner == owner)
        if state:
            project_query = project_query.filter(Project.state == state)
        if labels:
            project_query = self._add_labels_filter(
                session, project_query, Project, labels
            )
        if names:
            project_query = project_query.filter(Project.name.in_(names))

        project_subquery = project_query.subquery()
        project_alias = aliased(Project, project_subquery)

        query = self._query(session, ProjectSummary)
        query = query.join(project_alias, ProjectSummary.project == project_alias.name)

        project_summaries = query.all()
        project_summaries_results = []
        for project_summary in project_summaries:
            # project_summary.updated is timezone naive, make it utc
            project_summary.summary["updated"] = project_summary.updated.replace(
                tzinfo=UTC
            )
            project_summaries_results.append(
                mlrun.common.schemas.ProjectSummary(**project_summary.summary)
            )

        return project_summaries_results

    def refresh_project_summaries(
        self,
        session: Session,
        project_summaries: list[mlrun.common.schemas.ProjectSummary],
    ):
        """
        This method updates the summaries of projects that have associated projects in the database
        and removes project summaries that no longer have associated projects.
        """

        summary_dicts = {summary.name: summary.dict() for summary in project_summaries}

        # Create a query for project summaries with associated projects
        existing_summaries_query = (
            session.query(ProjectSummary)
            .outerjoin(Project, Project.name == ProjectSummary.project)
            .filter(ProjectSummary.project.in_(summary_dicts.keys()))
        )

        associated_summaries = existing_summaries_query.filter(
            Project.id.is_not(None)
        ).all()

        orphaned_summaries = existing_summaries_query.filter(Project.id.is_(None)).all()

        # Update the summaries of projects that have associated projects
        for project_summary in associated_summaries:
            project_summary.summary = summary_dicts.get(project_summary.project)
            project_summary.updated = datetime.now(UTC)
            session.add(project_summary)

        # To avoid race conditions where a project might be deleted after its summary is queried
        # but before the transaction completes, we delete project summaries that do not have
        # any associated projects.
        if orphaned_summaries:
            projects_names = [summary.project for summary in orphaned_summaries]
            logger.debug(
                "Deleting project summaries that do not have associated projects",
                projects=projects_names,
            )

            for summary in orphaned_summaries:
                session.delete(summary)

        self._commit(session, associated_summaries + orphaned_summaries)

    def _delete_project_summary(
        self,
        session: Session,
        name: str,
    ):
        logger.debug("Deleting project summary from DB", name=name)
        self._delete(session, ProjectSummary, project=name)

    async def get_project_resources_counters(
        self,
        projects_with_creation_time: list[tuple[str, datetime]],
    ) -> tuple[
        dict[str, int],
        dict[str, int],
        dict[str, int],
        dict[str, int],
        dict[str, int],
        dict[str, int],
        dict[str, int],
        dict[str, int],
        dict[str, int],
        dict[str, int],
        dict[str, int],
        dict[str, int],
        dict[str, int],
        dict[str, int],
        dict[str, int],
        dict[str, int],
        dict[str, int],
        dict[str, int],
        dict[str, int],
    ]:
        results = await asyncio.gather(
            fastapi.concurrency.run_in_threadpool(
                framework.db.session.run_function_with_new_db_session,
                self._calculate_artifact_counters_by_category,
            ),
            fastapi.concurrency.run_in_threadpool(
                framework.db.session.run_function_with_new_db_session,
                self._calculate_schedules_counters,
            ),
            fastapi.concurrency.run_in_threadpool(
                framework.db.session.run_function_with_new_db_session,
                self._calculate_feature_sets_counters,
            ),
            fastapi.concurrency.run_in_threadpool(
                framework.db.session.run_function_with_new_db_session,
                self._calculate_runs_counters,
            ),
            fastapi.concurrency.run_in_threadpool(
                framework.db.session.run_function_with_new_db_session,
                self._calculate_alert_activations_counters,
                projects_with_creation_time,
            ),
            fastapi.concurrency.run_in_threadpool(
                framework.db.session.run_function_with_new_db_session,
                self._calculate_mm_functions_counters,
            ),
            fastapi.concurrency.run_in_threadpool(
                framework.db.session.run_function_with_new_db_session,
                self._calculate_mep_counters,
            ),
        )
        (
            category_to_project_artifact_count,
            (
                project_to_schedule_count,
                project_to_schedule_pending_jobs_count,
                project_to_schedule_pending_workflows_count,
            ),
            project_to_feature_set_count,
            (
                project_to_recent_completed_runs_count,
                project_to_recent_failed_runs_count,
                project_to_running_runs_count,
            ),
            (
                project_to_endpoint_alerts_count,
                project_to_job_alerts_count,
                project_to_other_alerts_count,
            ),
            (
                project_to_running_mm_functions,
                project_to_failed_mm_functions_count,
            ),
            (
                project_to_real_time_mep_count,
                project_to_batch_mep_count,
            ),
        ) = results
        # TODO: counters by artifact categories should be expanded to include all categories (currently only models
        #       and other)
        return (
            category_to_project_artifact_count.get(
                mlrun.common.schemas.ArtifactCategories.other,
                collections.defaultdict(lambda: 0),
            ),
            project_to_schedule_count,
            project_to_schedule_pending_jobs_count,
            project_to_schedule_pending_workflows_count,
            project_to_feature_set_count,
            category_to_project_artifact_count.get(
                mlrun.common.schemas.ArtifactCategories.model,
                collections.defaultdict(lambda: 0),
            ),
            project_to_recent_completed_runs_count,
            project_to_recent_failed_runs_count,
            project_to_running_runs_count,
            project_to_endpoint_alerts_count,
            project_to_job_alerts_count,
            project_to_other_alerts_count,
            category_to_project_artifact_count.get(
                mlrun.common.schemas.ArtifactCategories.dataset,
                collections.defaultdict(lambda: 0),
            ),
            category_to_project_artifact_count.get(
                mlrun.common.schemas.ArtifactCategories.document,
                collections.defaultdict(lambda: 0),
            ),
            category_to_project_artifact_count.get(
                mlrun.common.schemas.ArtifactCategories.llm_prompt,
                collections.defaultdict(lambda: 0),
            ),
            project_to_running_mm_functions,
            project_to_failed_mm_functions_count,
            project_to_real_time_mep_count,
            project_to_batch_mep_count,
        )

    @staticmethod
    def _filter_query_by_resource_project(
        query: sqlalchemy.orm.query.Query,
        resource: type[framework.db.sqldb.base.BaseModel],
        project: typing.Optional[typing.Union[str, list[str]]] = None,
    ) -> sqlalchemy.orm.query.Query:
        if isinstance(project, list):
            query = query.filter(resource.project.in_(project))
        elif project and project != "*":
            query = query.filter(resource.project == project)
        return query

    @staticmethod
    def _calculate_functions_counters(session) -> dict[str, int]:
        functions_count_per_project = (
            session.query(Function.project, func.count(distinct(Function.name)))
            .group_by(Function.project)
            .all()
        )
        project_to_function_count = {
            result[0]: result[1] for result in functions_count_per_project
        }
        return project_to_function_count

    @staticmethod
    def _calculate_schedules_counters(
        session,
    ) -> [dict[str, int], dict[str, int], dict[str, int]]:
        schedules_count_per_project = (
            session.query(Schedule.project, func.count(distinct(Schedule.name)))
            .group_by(Schedule.project)
            .all()
        )
        project_to_schedule_count = {
            result[0]: result[1] for result in schedules_count_per_project
        }

        next_day = datetime.now(UTC) + timedelta(hours=24)

        # We check the workflow label because the schedule kind
        # is not used properly (not setting pipelines kind for workflow schedules)
        # TODO: fix the schedule kind to be pipeline when scheduling workflows
        workflow_label_exists = (
            select(Schedule.Label.parent)
            .where(
                (Schedule.Label.parent == Schedule.id)
                & (Schedule.Label.name == mlrun_constants.MLRunInternalLabels.workflow)
            )
            .exists()
        )

        query = (
            session.query(
                Schedule.project.label("project_name"),
                Schedule.name.label("schedule_name"),
                case((workflow_label_exists, True), else_=False).label(
                    "has_workflow_label"
                ),
            )
            .filter(Schedule.next_run_time < next_day)
            .filter(Schedule.next_run_time >= datetime.now(UTC))
            .all()
        )

        project_to_schedule_pending_jobs_count = collections.defaultdict(int)
        project_to_schedule_pending_workflows_count = collections.defaultdict(int)

        for result in query:
            project_name, schedule_name, is_workflow = result
            if is_workflow:
                project_to_schedule_pending_workflows_count[project_name] += 1
            else:
                project_to_schedule_pending_jobs_count[project_name] += 1

        return (
            project_to_schedule_count,
            project_to_schedule_pending_jobs_count,
            project_to_schedule_pending_workflows_count,
        )

    @staticmethod
    def _calculate_feature_sets_counters(session) -> dict[str, int]:
        feature_sets_count_per_project = (
            session.query(FeatureSet.project, func.count(distinct(FeatureSet.name)))
            .group_by(FeatureSet.project)
            .all()
        )
        project_to_feature_set_count = {
            result[0]: result[1] for result in feature_sets_count_per_project
        }
        return project_to_feature_set_count

    def _calculate_mm_functions_counters(
        self, session
    ) -> tuple[dict[str, int], dict[str, int]]:
        query = session.query(
            Function.project,
            Function.state,
            func.count(),
        )
        query = query.join(
            Function.Tag, Function.id == Function.Tag.obj_id
        )  # filter duplications

        labels = label_set(
            [f"{ModelMonitoringAppLabel.KEY}={ModelMonitoringAppLabel.VAL}"]
        )
        query = self._add_labels_filter(
            session, query, Function, labels
        )  # keep only model-monitoring functions

        query = query.filter(
            Function.state.in_(
                [
                    mlrun.common.schemas.FunctionState.ready,
                    mlrun.common.schemas.FunctionState.error,
                ]
            )
        )  # keep only relevant states

        query = query.group_by(Function.project, Function.state)
        results = query.all()

        project_to_failed_mm_functions_count = {}
        project_to_running_mm_functions_count = {}
        for project, state, count in results:
            if state == mlrun.common.schemas.FunctionState.ready:
                project_to_running_mm_functions_count[project] = count
            elif state == mlrun.common.schemas.FunctionState.error:
                project_to_failed_mm_functions_count[project] = count

        return (
            project_to_running_mm_functions_count,
            project_to_failed_mm_functions_count,
        )

    @staticmethod
    def _calculate_mep_counters(session) -> tuple[dict[str, int], dict[str, int]]:
        query = session.query(
            ModelEndpoint.project,
            ModelEndpoint.endpoint_type,
            func.count(),
        ).group_by(ModelEndpoint.project, ModelEndpoint.endpoint_type)
        results = query.all()

        project_to_real_time_mep_count = {}
        project_to_batch_mep_count = {}
        for project, endpoint_type, count in results:
            if endpoint_type == EndpointType.BATCH_EP:
                project_to_batch_mep_count[project] = count
            else:
                project_to_real_time_mep_count[project] = (
                    project_to_real_time_mep_count.get(project, 0) + count
                )

        return project_to_real_time_mep_count, project_to_batch_mep_count

    @staticmethod
    def _calculate_artifact_counters_by_category(
        session: Session,
    ) -> dict[str, dict[str, int]]:
        query = session.query(
            ArtifactV2.project, ArtifactV2.kind, func.count(distinct(ArtifactV2.key))
        ).group_by(ArtifactV2.project, ArtifactV2.kind)

        category_to_project_artifact_count = {}
        for project, kind, count in query.all():
            category = mlrun.common.schemas.ArtifactCategories.from_kind(kind)
            category_to_project_artifact_count.setdefault(category, {})
            category_to_project_artifact_count[category].setdefault(project, 0)
            category_to_project_artifact_count[category][project] += count

        return category_to_project_artifact_count

    @staticmethod
    def _calculate_runs_counters(
        session,
    ) -> tuple[
        dict[str, int],
        dict[str, int],
        dict[str, int],
    ]:
        """
        Calculate per-project run counters for recent activity and current status.

        This method counts only top-level runs (``iteration == 0``), excluding child runs
        from hyperparameter tuning, which are not considered separate jobs.

        :param session: The active DB session used to query the runs.

        :return: A tuple containing:
            - A dictionary of recently completed runs (last 24h) per project.
            - A dictionary of recently failed or aborted runs (last 24h) per project.
            - A dictionary of currently running runs (non-terminal states) per project.
        """
        running_runs_count_per_project = (
            session.query(Run.project, func.count())
            .filter(Run.iteration == 0)
            .filter(
                Run.state.in_(
                    mlrun.common.runtimes.constants.RunStates.non_terminal_states()
                )
            )
            .group_by(Run.project)
            .all()
        )

        project_to_running_runs_count = {
            result[0]: result[1] for result in running_runs_count_per_project
        }

        one_day_ago = datetime.now() - timedelta(hours=24)
        recent_failed_runs_count_per_project = (
            session.query(Run.project, func.count())
            .filter(Run.start_time >= one_day_ago)
            .filter(Run.iteration == 0)
            .filter(
                Run.state.in_(
                    [
                        mlrun.common.runtimes.constants.RunStates.error,
                        mlrun.common.runtimes.constants.RunStates.aborted,
                    ]
                )
            )
            .group_by(Run.project)
            .all()
        )
        project_to_recent_failed_runs_count = {
            result[0]: result[1] for result in recent_failed_runs_count_per_project
        }

        recent_completed_runs_count_per_project = (
            session.query(Run.project, func.count())
            .filter(Run.start_time >= one_day_ago)
            .filter(Run.iteration == 0)
            .filter(
                Run.state.in_(
                    [
                        mlrun.common.runtimes.constants.RunStates.completed,
                    ]
                )
            )
            .group_by(Run.project)
            .all()
        )
        project_to_recent_completed_runs_count = {
            result[0]: result[1] for result in recent_completed_runs_count_per_project
        }
        return (
            project_to_recent_completed_runs_count,
            project_to_recent_failed_runs_count,
            project_to_running_runs_count,
        )

    def _calculate_alert_activations_counters(
        self,
        session,
        projects_with_creation_time: list[tuple[str, datetime]],
    ) -> tuple[
        dict[str, int],
        dict[str, int],
        dict[str, int],
    ]:
        if mlrun.mlconf.httpdb.dsn.startswith(Dialects.SQLITE):
            logger.debug("Partition management not supported for SQLite")
            return {}, {}, {}

        project_to_endpoint_alerts_count = collections.defaultdict(int)
        project_to_job_alerts_count = collections.defaultdict(int)
        project_to_other_alerts_count = collections.defaultdict(int)

        last_day = mlrun.utils.datetime_now() - timedelta(hours=24)

        # construct a base query to count different types of alert activations, labels are added to improve readability
        query = session.query(
            AlertActivation.project,
            func.count(
                case(
                    (
                        AlertActivation.entity_kind
                        == mlrun.common.schemas.alert.EventEntityKind.MODEL_ENDPOINT_RESULT,
                        1,
                    ),
                    else_=None,
                )
            ).label("model_endpoint_alerts_count"),
            func.count(
                case(
                    (
                        AlertActivation.entity_kind
                        == mlrun.common.schemas.alert.EventEntityKind.JOB,
                        1,
                    ),
                    else_=None,
                )
            ).label("job_alerts_count"),
            func.count(
                case(
                    (
                        AlertActivation.entity_kind.notin_(
                            [
                                mlrun.common.schemas.alert.EventEntityKind.MODEL_ENDPOINT_RESULT,
                                mlrun.common.schemas.alert.EventEntityKind.JOB,
                            ]
                        ),
                        1,
                    ),
                    else_=None,
                )
            ).label("other_alerts_count"),
        )

        # filter by project, creation time, and activations within the last 24 hours
        query_results = (
            self._apply_alert_activation_project_filters(
                query, projects_with_creation_time
            )
            .filter(AlertActivation.activation_time > last_day)
            .group_by(AlertActivation.project)
            .all()
        )

        for project, endpoint_counter, job_counter, other_counter in query_results:
            project_to_endpoint_alerts_count[project] = endpoint_counter
            project_to_job_alerts_count[project] = job_counter
            project_to_other_alerts_count[project] = other_counter

        return (
            project_to_endpoint_alerts_count,
            project_to_job_alerts_count,
            project_to_other_alerts_count,
        )

    @staticmethod
    def _apply_alert_activation_project_filters(
        query: sqlalchemy.orm.query.Query,
        projects_with_creation_time: list[tuple[str, datetime]],
    ) -> sqlalchemy.orm.query.Query:
        project_filter_conditions = [
            and_(
                AlertActivation.project == project,
                AlertActivation.activation_time > created,
            )
            for project, created in projects_with_creation_time
        ]
        return query.filter(or_(*project_filter_conditions))

    def _update_project_record_from_project(
        self,
        session: Session,
        project_record: Project,
        project: mlrun.common.schemas.Project,
    ):
        project.metadata.created = project_record.created
        project_dict = project.dict()
        # TODO: handle taking out the functions/workflows/artifacts out of the project and save them separately
        project_record.full_object = project_dict
        project_record.description = project.spec.description
        project_record.source = project.spec.source
        project_record.owner = project.spec.owner
        project_record.state = project.status.state
        project_record.default_function_node_selector = (
            project.spec.default_function_node_selector
        )
        labels = project.metadata.labels or {}
        update_labels(project_record, labels)
        self._upsert(session, [project_record])

    def _patch_project_record_from_project(
        self,
        session: Session,
        name: str,
        project_record: Project,
        project: dict,
        patch_mode: mlrun.common.schemas.PatchMode,
    ):
        project.setdefault("metadata", {})["created"] = project_record.created
        strategy = patch_mode.to_mergedeep_strategy()
        project_record_full_object = project_record.full_object
        mergedeep.merge(project_record_full_object, project, strategy=strategy)

        # If a bad kind value was passed, it will fail here (return 422 to caller)
        project = mlrun.common.schemas.Project(**project_record_full_object)
        self.store_project(
            session,
            name,
            project,
        )

        project_record.full_object = project_record_full_object
        self._upsert(session, [project_record])

    def is_project_exists(self, session: Session, name: str):
        project_record = self._get_project_record(
            session, name, raise_on_not_found=False
        )
        if not project_record:
            return False
        return True

    def _get_project_record(
        self,
        session: Session,
        name: typing.Optional[str] = None,
        project_id: typing.Optional[int] = None,
        raise_on_not_found: bool = True,
    ) -> typing.Optional[Project]:
        if not any([project_id, name]):
            raise mlrun.errors.MLRunInvalidArgumentError(
                "One of 'name' or 'project_id' must be provided"
            )
        project_record = self._query(
            session, Project, name=name, id=project_id
        ).one_or_none()
        if not project_record:
            if not raise_on_not_found:
                return None
            raise mlrun.errors.MLRunNotFoundError(
                f"Project not found: name={name}, project_id={project_id}"
            )

        return project_record

    def verify_project_has_no_related_resources(self, session: Session, name: str):
        # it is enough to sample few resources, we do not need to retrieve all resources really.
        resource_limit = 5
        for resource_name, resource_list_function in [
            ("runs", self.list_runs),
            ("artifacts", self._find_artifacts),
            ("schedules", self.list_schedules),
            ("functions", self._list_project_function_names),
            ("feature_sets", self._list_project_feature_set_names),
            ("feature_vectors", self._list_project_feature_vector_names),
        ]:
            resources = resource_list_function(
                session, project=name, limit=resource_limit
            )
            self._verify_empty_list_of_project_related_resources(
                name, resources, resource_name
            )

        for cls in _with_notifications:
            notifications = self._get_db_notifications(
                session, cls, project=name, limit=resource_limit
            )
            self._verify_empty_list_of_project_related_resources(
                name, notifications, "notifications"
            )

    def delete_project_related_resources(self, session: Session, name: str):
        self.delete_model_endpoints(session, project=name)
        self._delete_project_artifacts(session, project=name)
        self.delete_run_notifications(session, project=name)
        self._delete_project_runs(session, project=name)
        self.delete_project_schedules(session, name)
        self._delete_project_functions(session, name)
        self._delete_project_feature_sets(session, name)
        self._delete_project_feature_vectors(session, name)
        self._delete_project_background_tasks(session, project=name)
        self._delete_project_datastore_profiles(session, project=name)

        # resources deletion should remove their tags and labels as well, but doing another try in case there are
        # orphan resources
        self._delete_resources_tags(session, name)
        self._delete_resources_labels(session, name)

    @staticmethod
    def _verify_empty_list_of_project_related_resources(
        project: str, resources: list, resource_name: str
    ):
        if resources:
            raise mlrun.errors.MLRunPreconditionFailedError(
                f"Project {project} can not be deleted since related resources found: {resource_name}"
            )

    def _get_record_by_name_tag_and_uid(
        self,
        session,
        cls,
        project: str,
        name: str,
        tag: typing.Optional[str] = None,
        uid: typing.Optional[str] = None,
        obj_name_attribute="name",
    ):
        kwargs = {obj_name_attribute: name, "project": project}
        query = self._query(session, cls, **kwargs)
        computed_tag = tag or mlrun.common.constants.RESERVED_TAG_NAME_LATEST
        object_tag_uid = None
        if tag or not uid:
            object_tag_uid = self._resolve_class_tag_uid(
                session, cls, project, name, computed_tag
            )
            if object_tag_uid is None:
                return None, None, None
            uid = object_tag_uid
        if uid:
            query = query.filter(cls.uid == uid)
        return computed_tag, object_tag_uid, query.one_or_none()

    # ---- Feature sets ----
    def create_feature_set(
        self,
        session,
        project,
        feature_set: mlrun.common.schemas.FeatureSet,
        versioned=True,
    ) -> str:
        (
            uid,
            tag,
            feature_set_dict,
        ) = self._validate_and_enrich_record_for_creation(
            session, feature_set, FeatureSet, project, versioned
        )

        db_feature_set = FeatureSet(project=project)
        self._update_db_record_from_object_dict(db_feature_set, feature_set_dict, uid)
        self._update_feature_set_spec(db_feature_set, feature_set_dict)

        self._upsert(session, [db_feature_set])
        self.tag_objects_v2(session, [db_feature_set], project, tag)

        return uid

    def patch_feature_set(
        self,
        session,
        project,
        name,
        feature_set_patch: dict,
        tag=None,
        uid=None,
        patch_mode: mlrun.common.schemas.PatchMode = mlrun.common.schemas.PatchMode.replace,
    ) -> str:
        feature_set_record = self._get_feature_set(session, project, name, tag, uid)
        if not feature_set_record:
            feature_set_uri = generate_object_uri(project, name, tag)
            raise mlrun.errors.MLRunNotFoundError(
                f"Feature-set not found {feature_set_uri}"
            )

        feature_set_struct = feature_set_record.dict(exclude_none=True)
        # using mergedeep for merging the patch content into the existing dictionary
        strategy = patch_mode.to_mergedeep_strategy()
        mergedeep.merge(feature_set_struct, feature_set_patch, strategy=strategy)

        versioned = feature_set_record.metadata.uid is not None

        # If a bad kind value was passed, it will fail here (return 422 to caller)
        feature_set = mlrun.common.schemas.FeatureSet(**feature_set_struct)
        return self.store_feature_set(
            session,
            project,
            name,
            feature_set,
            feature_set.metadata.tag,
            uid,
            versioned,
            always_overwrite=True,
        )

    def get_feature_set(
        self,
        session,
        project: str,
        name: str,
        tag: typing.Optional[str] = None,
        uid: typing.Optional[str] = None,
    ) -> mlrun.common.schemas.FeatureSet:
        feature_set = self._get_feature_set(session, project, name, tag, uid)
        if not feature_set:
            feature_set_uri = generate_object_uri(project, name, tag)
            raise mlrun.errors.MLRunNotFoundError(
                f"Feature-set not found {feature_set_uri}"
            )

        return feature_set

    def _get_feature_set(
        self,
        session,
        project: str,
        name: str,
        tag: typing.Optional[str] = None,
        uid: typing.Optional[str] = None,
    ):
        (
            computed_tag,
            feature_set_tag_uid,
            db_feature_set,
        ) = self._get_record_by_name_tag_and_uid(
            session, FeatureSet, project, name, tag, uid
        )
        if db_feature_set:
            feature_set = self._transform_feature_set_model_to_schema(db_feature_set)

            # If connected to a tag add it to metadata
            if feature_set_tag_uid:
                feature_set.metadata.tag = computed_tag
            return feature_set
        else:
            return None

    def _get_model_endpoint(
        self,
        session,
        project: str,
        name: str,
        function_name: typing.Optional[str] = None,
        function_tag: typing.Optional[str] = None,
        uid: typing.Optional[str] = None,
    ) -> typing.Union[ModelEndpoint, None]:
        self._check_model_endpoint_params(uid, function_name, function_tag)
        if uid:
            mep_record = self._get_class_instance_by_uid(
                session, ModelEndpoint, name, project, uid
            )
        else:
            mep_record = self._get_mep_latest_instance(
                session, ModelEndpoint, name, function_name, project, function_tag
            )
        if mep_record:
            return mep_record
        else:
            return None

    @staticmethod
    def _check_model_endpoint_params(uid: str, function_name: str, function_tag: str):
        if not uid and (not function_name or not function_tag):
            raise mlrun.errors.MLRunNotFoundError(
                "Either uid or function_name and function_tag must be provided"
            )

    def _get_records_to_tags_map(self, session, cls, project, tag, name=None):
        # Find object IDs by tag, project and object-name (which is a like query)
        tag_query = self._query(session, cls.Tag, project=project, name=tag)
        if name:
            tag_query = tag_query.filter(
                generate_query_predicate_for_name(cls.Tag.obj_name, name)
            )

        # Generate a mapping from each object id (note: not uid, it's the DB ID) to its associated tags.
        obj_id_tags = {}
        for row in tag_query:
            if row.obj_id in obj_id_tags:
                obj_id_tags[row.obj_id].append(row.name)
            else:
                obj_id_tags[row.obj_id] = [row.name]
        return obj_id_tags

    def _generate_records_with_tags_assigned(
        self, object_record, transform_fn, obj_id_tags, default_tag=None, format_=None
    ):
        # Using a similar mechanism here to assign tags to feature sets as is used in list_functions. Please refer
        # there for some comments explaining the logic.
        results = []
        if default_tag:
            results.append(transform_fn(object_record, default_tag, format_=format_))
        else:
            object_tags = obj_id_tags.get(object_record.id, [])
            if len(object_tags) == 0 and not object_record.uid.startswith(
                unversioned_tagged_object_uid_prefix
            ):
                new_object = transform_fn(object_record, format_=format_)
                results.append(new_object)
            else:
                for object_tag in object_tags:
                    results.append(
                        transform_fn(object_record, object_tag, format_=format_)
                    )
        return results

    @staticmethod
    def _generate_feature_set_digest(feature_set: mlrun.common.schemas.FeatureSet):
        return mlrun.common.schemas.FeatureSetDigestOutput(
            metadata=feature_set.metadata,
            spec=mlrun.common.schemas.FeatureSetDigestSpec(
                entities=feature_set.spec.entities,
                features=feature_set.spec.features,
            ),
        )

    def _generate_feature_or_entity_list_query(
        self,
        session,
        query_class,
        project: str,
        feature_set_keys,
        name: typing.Optional[str] = None,
        tag: typing.Optional[str] = None,
        labels: typing.Optional[list[str]] = None,
    ):
        # Query the actual objects to be returned
        query = (
            session.query(FeatureSet, query_class)
            .filter_by(project=project)
            .join(query_class)
        )

        if name:
            query = query.filter(
                generate_query_predicate_for_name(query_class.name, name)
            )
        if labels:
            query = self._add_labels_filter(session, query, query_class, labels)
        if tag:
            query = query.filter(FeatureSet.id.in_(feature_set_keys))

        return query

    @staticmethod
    def _dedup_and_append_feature_set(
        feature_set, feature_set_id_to_index, feature_set_digests_v2
    ):
        # dedup feature set list
        # we can rely on the object ID because SQLAlchemy already avoids duplication at the object
        # level, and the conversion from "model" to "schema" retains this property
        feature_set_obj_id = id(feature_set)
        feature_set_index = feature_set_id_to_index.get(feature_set_obj_id, None)
        if feature_set_index is None:
            feature_set_index = len(feature_set_id_to_index)
            feature_set_id_to_index[feature_set_obj_id] = feature_set_index
            feature_set_digests_v2.append(
                FeatureSetDigestOutputV2(
                    feature_set_index=feature_set_index,
                    metadata=feature_set.metadata,
                    spec=FeatureSetDigestSpecV2(
                        entities=feature_set.spec.entities,
                    ),
                )
            )
        return feature_set_index

    @staticmethod
    def _build_feature_mapping_from_feature_set(feature_set):
        result = {}
        for feature in feature_set.spec.features:
            result[feature.name] = feature
        return result

    @staticmethod
    def _build_entity_mapping_from_feature_set(feature_set):
        result = {}
        for entity in feature_set.spec.entities:
            result[entity.name] = entity
        return result

    def list_features_v2(
        self,
        session,
        project: str,
        name: typing.Optional[str] = None,
        tag: typing.Optional[str] = None,
        entities: typing.Optional[list[str]] = None,
        labels: typing.Optional[list[str]] = None,
    ) -> mlrun.common.schemas.FeaturesOutputV2:
        # We don't filter by feature-set name here, as the name parameter refers to features
        feature_set_id_tags = self._get_records_to_tags_map(
            session, FeatureSet, project, tag, name=None
        )

        query = self._generate_feature_or_entity_list_query(
            session, Feature, project, feature_set_id_tags.keys(), name, tag, labels
        )

        if entities:
            query = query.join(FeatureSet.entities).filter(Entity.name.in_(entities))

        features_with_feature_set_index: list[Feature] = []
        feature_set_digests_v2: list[FeatureSetDigestOutputV2] = []
        feature_set_digest_id_to_index: dict[int, int] = {}

        transform_feature_set_model_to_schema = MemoizationCache(
            self._transform_feature_set_model_to_schema
        ).memoize
        build_feature_mapping_from_feature_set = MemoizationCache(
            self._build_feature_mapping_from_feature_set
        ).memoize

        for row in query:
            feature_record = mlrun.common.schemas.FeatureRecord.from_orm(row.Feature)
            feature_name = feature_record.name

            feature_sets = self._generate_records_with_tags_assigned(
                row.FeatureSet,
                transform_feature_set_model_to_schema,
                feature_set_id_tags,
                tag,
            )

            for feature_set in feature_sets:
                # Get the feature from the feature-set full structure, as it may contain extra fields (which are not
                # in the DB)
                feature_name_to_feature = build_feature_mapping_from_feature_set(
                    feature_set
                )
                feature = feature_name_to_feature.get(feature_name)
                if not feature:
                    raise mlrun.errors.MLRunInternalServerError(
                        "Inconsistent data in DB - features in DB not in feature-set document"
                    )

                feature_set_index = self._dedup_and_append_feature_set(
                    feature_set, feature_set_digest_id_to_index, feature_set_digests_v2
                )
                features_with_feature_set_index.append(
                    feature.copy(update=dict(feature_set_index=feature_set_index))
                )

        return mlrun.common.schemas.FeaturesOutputV2(
            features=features_with_feature_set_index,
            feature_set_digests=feature_set_digests_v2,
        )

    def list_entities_v2(
        self,
        session,
        project: str,
        name: typing.Optional[str] = None,
        tag: typing.Optional[str] = None,
        labels: typing.Optional[list[str]] = None,
    ) -> mlrun.common.schemas.EntitiesOutputV2:
        feature_set_id_tags = self._get_records_to_tags_map(
            session, FeatureSet, project, tag, name=None
        )

        query = self._generate_feature_or_entity_list_query(
            session, Entity, project, feature_set_id_tags.keys(), name, tag, labels
        )

        entities_with_feature_set_index: list[Entity] = []
        feature_set_digests_v2: list[FeatureSetDigestOutputV2] = []
        feature_set_digest_id_to_index: dict[int, int] = {}

        transform_feature_set_model_to_schema = MemoizationCache(
            self._transform_feature_set_model_to_schema
        ).memoize
        build_entity_mapping_from_feature_set = MemoizationCache(
            self._build_entity_mapping_from_feature_set
        ).memoize

        for row in query:
            entity_record = mlrun.common.schemas.FeatureRecord.from_orm(row.Entity)
            entity_name = entity_record.name

            feature_sets = self._generate_records_with_tags_assigned(
                row.FeatureSet,
                transform_feature_set_model_to_schema,
                feature_set_id_tags,
                tag,
            )

            for feature_set in feature_sets:
                # Get the feature from the feature-set full structure, as it may contain extra fields (which are not
                # in the DB)
                entity_name_to_feature = build_entity_mapping_from_feature_set(
                    feature_set
                )
                entity = entity_name_to_feature.get(entity_name)
                if not entity:
                    raise mlrun.errors.MLRunInternalServerError(
                        "Inconsistent data in DB - entities in DB not in feature-set document"
                    )

                feature_set_index = self._dedup_and_append_feature_set(
                    feature_set, feature_set_digest_id_to_index, feature_set_digests_v2
                )
                entities_with_feature_set_index.append(
                    entity.copy(update=dict(feature_set_index=feature_set_index))
                )

        return mlrun.common.schemas.EntitiesOutputV2(
            entities=entities_with_feature_set_index,
            feature_set_digests=feature_set_digests_v2,
        )

    @staticmethod
    def _assert_partition_by_parameters(partition_by_enum_cls, partition_by, sort):
        if sort is None:
            raise mlrun.errors.MLRunInvalidArgumentError(
                "sort parameter must be provided when partition_by is used."
            )
        # For now, name is the only supported value. Remove once more fields are added.
        if partition_by not in partition_by_enum_cls:
            valid_enum_values = [
                enum_value.value for enum_value in partition_by_enum_cls
            ]
            raise mlrun.errors.MLRunInvalidArgumentError(
                f"Invalid partition_by given: '{partition_by.value}'. Must be one of {valid_enum_values}"
            )

    @staticmethod
    def _create_partitioned_query(
        session,
        query,
        cls,
        partition_by: typing.Union[
            mlrun.common.schemas.FeatureStorePartitionByField,
            mlrun.common.schemas.RunPartitionByField,
            mlrun.common.schemas.ArtifactPartitionByField,
        ],
        rows_per_partition: int,
        partition_sort_by: mlrun.common.schemas.SortField,
        partition_order: mlrun.common.schemas.OrderType,
        max_partitions: int = 0,
        with_tagged: bool = False,
    ):
        partition_field = partition_by.to_partition_by_db_field(cls)
        sort_by_field = partition_sort_by.to_db_field(cls)

        row_number_column = (
            func.row_number()
            .over(
                partition_by=partition_field,
                order_by=partition_order.to_order_by_predicate(sort_by_field),
            )
            .label("row_number")
        )

        # Retrieve only the ID from the subquery to minimize the inner table,
        # in the final step we inner join the inner table with the full table.
        query = query.with_entities(
            cls.id,
            *(cls.Tag.name.label("tag_name"), cls.Tag.id.label("tag_id"))
            if with_tagged
            else (),
        ).add_column(row_number_column)
        if max_partitions > 0:
            max_partition_value = (
                func.max(sort_by_field)
                .over(
                    partition_by=partition_field,
                )
                .label("max_partition_value")
            )
            query = query.add_column(max_partition_value)

        # Need to generate a subquery so we can filter based on the row_number, since it
        # is a window function using over().
        subquery = query.subquery()
        if max_partitions == 0:
            result_query = session.query(cls)
            if with_tagged:
                result_query = result_query.add_columns(
                    subquery.c.tag_name,
                    subquery.c.tag_id,
                )
            result_query = result_query.join(subquery, cls.id == subquery.c.id).filter(
                subquery.c.row_number <= rows_per_partition
            )
            return result_query

        result_query = session.query(subquery).filter(
            subquery.c.row_number <= rows_per_partition
        )

        # We query on max-partitions, so need to do another sub-query and order per the latest updated time of
        # a run in the partition.
        partition_rank = (
            func.dense_rank()
            .over(order_by=subquery.c.max_partition_value.desc())
            .label("partition_rank")
        )
        subquery = result_query.add_column(partition_rank).subquery()
        result_query = (
            session.query(cls)
            .join(subquery, cls.id == subquery.c.id)
            .filter(subquery.c.partition_rank <= max_partitions)
        )
        return result_query

    def list_feature_sets(
        self,
        session,
        project: str,
        name: typing.Optional[str] = None,
        tag: typing.Optional[str] = None,
        state: typing.Optional[str] = None,
        entities: typing.Optional[list[str]] = None,
        features: typing.Optional[list[str]] = None,
        labels: typing.Optional[list[str]] = None,
        partition_by: mlrun.common.schemas.FeatureStorePartitionByField = None,
        rows_per_partition: int = 1,
        partition_sort_by: mlrun.common.schemas.SortField = None,
        partition_order: mlrun.common.schemas.OrderType = mlrun.common.schemas.OrderType.desc,
        format_: mlrun.common.formatters.FeatureSetFormat = mlrun.common.formatters.FeatureSetFormat.full,
    ) -> mlrun.common.schemas.FeatureSetsOutput:
        obj_id_tags = self._get_records_to_tags_map(
            session, FeatureSet, project, tag, name
        )

        # Query the actual objects to be returned
        query = self._query(session, FeatureSet, project=project, state=state)

        if name is not None:
            query = query.filter(
                generate_query_predicate_for_name(FeatureSet.name, name)
            )
        if tag:
            query = query.filter(FeatureSet.id.in_(obj_id_tags.keys()))
        if entities:
            query = query.join(FeatureSet.entities).filter(Entity.name.in_(entities))
        if features:
            query = query.join(FeatureSet.features).filter(Feature.name.in_(features))
        if labels:
            query = self._add_labels_filter(session, query, FeatureSet, labels)

        if partition_by:
            self._assert_partition_by_parameters(
                mlrun.common.schemas.FeatureStorePartitionByField,
                partition_by,
                partition_sort_by,
            )
            query = self._create_partitioned_query(
                session,
                query,
                FeatureSet,
                partition_by,
                rows_per_partition,
                partition_sort_by,
                partition_order,
            )

        feature_sets = []
        for feature_set_record in query:
            feature_sets.extend(
                self._generate_records_with_tags_assigned(
                    feature_set_record,
                    self._transform_feature_set_model_to_schema,
                    obj_id_tags,
                    tag,
                    format_=format_,
                )
            )
        return mlrun.common.schemas.FeatureSetsOutput(feature_sets=feature_sets)

    def list_feature_sets_tags(
        self,
        session,
        project: str,
    ):
        query = (
            session.query(FeatureSet.name, FeatureSet.Tag.name)
            .filter(FeatureSet.Tag.project == project)
            .join(FeatureSet, FeatureSet.Tag.obj_id == FeatureSet.id)
            .distinct()
        )
        return [(project, row[0], row[1]) for row in query]

    @staticmethod
    def _update_feature_set_features(
        feature_set: FeatureSet, feature_dicts: list[dict]
    ):
        new_features = set(feature_dict["name"] for feature_dict in feature_dicts)
        current_features = set(feature.name for feature in feature_set.features)

        features_to_remove = current_features.difference(new_features)
        features_to_add = new_features.difference(current_features)

        feature_set.features = [
            feature
            for feature in feature_set.features
            if feature.name not in features_to_remove
        ]

        for feature_dict in feature_dicts:
            feature_name = feature_dict["name"]
            if feature_name in features_to_add:
                labels = feature_dict.get("labels") or {}
                feature = Feature(
                    name=feature_dict["name"],
                    value_type=feature_dict["value_type"],
                    labels=[],
                )
                update_labels(feature, labels)
                feature_set.features.append(feature)
            elif feature_name not in features_to_remove:
                # get the existing feature from the feature set
                feature = next(
                    (
                        feature
                        for feature in feature_set.features
                        if feature.name == feature_name
                    ),
                    None,
                )
                if feature:
                    # update it with the new labels in case they were changed
                    labels = feature_dict.get("labels") or {}
                    update_labels(feature, labels)

    @staticmethod
    def _update_feature_set_entities(feature_set: FeatureSet, entity_dicts: list[dict]):
        new_entities = set(entity_dict["name"] for entity_dict in entity_dicts)
        current_entities = set(entity.name for entity in feature_set.entities)

        entities_to_remove = current_entities.difference(new_entities)
        entities_to_add = new_entities.difference(current_entities)

        feature_set.entities = [
            entity
            for entity in feature_set.entities
            if entity.name not in entities_to_remove
        ]

        for entity_dict in entity_dicts:
            if entity_dict["name"] in entities_to_add:
                labels = entity_dict.get("labels") or {}
                entity = Entity(
                    name=entity_dict["name"],
                    value_type=entity_dict["value_type"],
                    labels=[],
                )
                update_labels(entity, labels)
                feature_set.entities.append(entity)

    def _update_feature_set_spec(
        self, feature_set: FeatureSet, new_feature_set_dict: dict
    ):
        feature_set_spec = new_feature_set_dict.get("spec")
        features = feature_set_spec.pop("features", [])
        entities = feature_set_spec.pop("entities", [])
        self._update_feature_set_features(feature_set, features)
        self._update_feature_set_entities(feature_set, entities)

    @staticmethod
    def _common_object_validate_and_perform_uid_change(
        object_dict: dict,
        tag,
        versioned,
        existing_uid=None,
    ):
        uid = fill_object_hash(object_dict, "uid", tag)
        if not versioned:
            uid = f"{unversioned_tagged_object_uid_prefix}{tag}"
            object_dict["metadata"]["uid"] = uid

        # If object was referenced by UID, the request cannot modify it
        if existing_uid and uid != existing_uid:
            raise mlrun.errors.MLRunInvalidArgumentError(
                "Changing uid for an object referenced by its uid"
            )
        return uid

    @staticmethod
    def _update_db_record_from_object_dict(
        db_object,
        common_object_dict: dict,
        uid,
    ):
        db_object.name = common_object_dict["metadata"]["name"]
        updated_datetime = datetime.now(UTC)
        db_object.updated = updated_datetime
        if not db_object.created:
            db_object.created = common_object_dict["metadata"].pop(
                "created", None
            ) or datetime.now(UTC)
        db_object.state = common_object_dict.get("status", {}).get("state")
        db_object.uid = uid

        common_object_dict["metadata"]["updated"] = str(updated_datetime)
        common_object_dict["metadata"]["created"] = str(db_object.created)

        # In case of an unversioned object, we don't want to return uid to user queries. However,
        # the uid DB field has to be set, since it's used for uniqueness in the DB.
        if uid.startswith(unversioned_tagged_object_uid_prefix):
            common_object_dict["metadata"].pop("uid", None)

        db_object.full_object = common_object_dict

        # labels are stored in a separate table
        labels = common_object_dict["metadata"].pop("labels", {}) or {}
        update_labels(db_object, labels)

    @retry_on_conflict
    def store_feature_set(
        self,
        session,
        project,
        name,
        feature_set: mlrun.common.schemas.FeatureSet,
        tag=None,
        uid=None,
        versioned=True,
        always_overwrite=False,
    ) -> str:
        return self._store_tagged_object(
            session,
            FeatureSet,
            project,
            name,
            feature_set,
            tag=tag,
            uid=uid,
            versioned=versioned,
            always_overwrite=always_overwrite,
        )

    def _store_tagged_object(
        self,
        session,
        cls,
        project,
        name,
        tagged_object: typing.Union[
            mlrun.common.schemas.FeatureVector,
            mlrun.common.schemas.FeatureSet,
        ],
        tag=None,
        uid=None,
        versioned=True,
        always_overwrite=False,
    ):
        original_uid = uid

        # record with the given tag/uid
        _, _, existing_tagged_object = self._get_record_by_name_tag_and_uid(
            session, cls, project, name, tag, uid
        )

        tagged_object_dict = tagged_object.dict(exclude_none=True)

        # get the computed uid
        uid = self._common_object_validate_and_perform_uid_change(
            tagged_object_dict, tag, versioned, original_uid
        )

        if existing_tagged_object:
            if uid == existing_tagged_object.uid or always_overwrite:
                db_tagged_object = existing_tagged_object
            else:
                # In case an object with the given tag (or 'latest' which is the default) and name, but different uid
                # was found - Check If an object with the same computed uid but different tag already exists
                # and re-tag it.
                if self._re_tag_existing_object(session, cls, project, name, tag, uid):
                    return uid

                db_tagged_object = cls(project=project)

            self._update_db_record_from_object_dict(
                db_tagged_object, tagged_object_dict, uid
            )

            if cls == FeatureSet:
                self._update_feature_set_spec(db_tagged_object, tagged_object_dict)
            self._upsert(session, [db_tagged_object])
            if tag:
                self.tag_objects_v2(session, [db_tagged_object], project, tag)
            return uid

        # Object with the given tag/uid doesn't exist
        # Check if this is a re-tag of existing object - search by uid only
        if self._re_tag_existing_object(session, cls, project, name, tag, uid):
            return uid

        tagged_object.metadata.tag = tag
        return self._create_tagged_object(
            session, project, cls, tagged_object, versioned
        )

    def _create_tagged_object(
        self,
        session,
        project,
        cls,
        tagged_object: typing.Union[
            mlrun.common.schemas.FeatureVector,
            mlrun.common.schemas.FeatureSet,
            dict,
        ],
        versioned=True,
    ):
        uid, tag, tagged_object_dict = self._validate_and_enrich_record_for_creation(
            session, tagged_object, cls, project, versioned
        )

        db_tagged_object = cls(project=project)

        self._update_db_record_from_object_dict(
            db_tagged_object, tagged_object_dict, uid
        )
        if cls == FeatureSet:
            self._update_feature_set_spec(db_tagged_object, tagged_object_dict)

        self._upsert(session, [db_tagged_object])
        self.tag_objects_v2(session, [db_tagged_object], project, tag)

        return uid

    def _re_tag_existing_object(
        self,
        session,
        cls,
        project,
        name,
        tag,
        uid,
        obj_name_attribute: str = "name",
    ):
        _, _, existing_object = self._get_record_by_name_tag_and_uid(
            session,
            cls,
            project,
            name,
            None,
            uid,
            obj_name_attribute=obj_name_attribute,
        )
        if existing_object:
            self.tag_objects_v2(
                session,
                [existing_object],
                project,
                tag,
                obj_name_attribute=obj_name_attribute,
            )
            return existing_object

        return None

    def _validate_and_enrich_record_for_creation(
        self,
        session,
        new_object,
        db_class,
        project,
        versioned,
    ):
        object_type = new_object.__class__.__name__

        object_dict = new_object.dict(exclude_none=True)
        hash_key = fill_object_hash(object_dict, "uid", new_object.metadata.tag)

        if versioned:
            uid = hash_key
        else:
            uid = f"{unversioned_tagged_object_uid_prefix}{new_object.metadata.tag}"
            object_dict["metadata"]["uid"] = uid

        existing_object = self._get_class_instance_by_uid(
            session, db_class, new_object.metadata.name, project, uid
        )
        if existing_object:
            object_uri = generate_object_uri(
                project, new_object.metadata.name, new_object.metadata.tag
            )
            raise mlrun.errors.MLRunConflictError(
                f"Adding an already-existing {object_type} - {object_uri}"
            )

        return uid, new_object.metadata.tag, object_dict

    def _delete_project_feature_sets(self, session: Session, project: str):
        logger.debug("Removing project feature-sets from db", project=project)
        self._delete_multi_objects(
            session=session,
            main_table=FeatureSet,
            project=project,
        )

    def _list_project_feature_set_names(
        self, session: Session, project: str, limit: Optional[int] = None
    ) -> list[str]:
        q = self._query(session, distinct(FeatureSet.name), project=project)
        if limit:
            q = q.limit(limit)
        return [name for (name,) in q.all()]

    def delete_feature_set(self, session, project, name, tag=None, uid=None):
        self._delete_tagged_object(
            session,
            FeatureSet,
            project=project,
            tag=tag,
            uid=uid,
            name=name,
        )

    # ---- Feature Vectors ----
    def create_feature_vector(
        self,
        session,
        project,
        feature_vector: mlrun.common.schemas.FeatureVector,
        versioned=True,
    ) -> str:
        (
            uid,
            tag,
            feature_vector_dict,
        ) = self._validate_and_enrich_record_for_creation(
            session, feature_vector, FeatureVector, project, versioned
        )

        db_feature_vector = FeatureVector(project=project)

        self._update_db_record_from_object_dict(
            db_feature_vector, feature_vector_dict, uid
        )

        self._upsert(session, [db_feature_vector])
        self.tag_objects_v2(session, [db_feature_vector], project, tag)

        return uid

    def get_feature_vector(
        self,
        session,
        project: str,
        name: str,
        tag: typing.Optional[str] = None,
        uid: typing.Optional[str] = None,
    ) -> mlrun.common.schemas.FeatureVector:
        feature_vector = self._get_feature_vector(session, project, name, tag, uid)
        if not feature_vector:
            feature_vector_uri = generate_object_uri(project, name, tag)
            raise mlrun.errors.MLRunNotFoundError(
                f"Feature-vector not found {feature_vector_uri}"
            )

        return feature_vector

    def _get_feature_vector(
        self,
        session,
        project: str,
        name: str,
        tag: typing.Optional[str] = None,
        uid: typing.Optional[str] = None,
    ):
        (
            computed_tag,
            feature_vector_tag_uid,
            db_feature_vector,
        ) = self._get_record_by_name_tag_and_uid(
            session, FeatureVector, project, name, tag, uid
        )
        if db_feature_vector:
            feature_vector = self._transform_feature_vector_model_to_schema(
                db_feature_vector
            )

            # If connected to a tag add it to metadata
            if feature_vector_tag_uid:
                feature_vector.metadata.tag = computed_tag
            return feature_vector
        else:
            return None

    def list_feature_vectors(
        self,
        session,
        project: str,
        name: typing.Optional[str] = None,
        tag: typing.Optional[str] = None,
        state: typing.Optional[str] = None,
        labels: typing.Optional[list[str]] = None,
        partition_by: mlrun.common.schemas.FeatureStorePartitionByField = None,
        rows_per_partition: int = 1,
        partition_sort_by: mlrun.common.schemas.SortField = None,
        partition_order: mlrun.common.schemas.OrderType = mlrun.common.schemas.OrderType.desc,
    ) -> mlrun.common.schemas.FeatureVectorsOutput:
        obj_id_tags = self._get_records_to_tags_map(
            session, FeatureVector, project, tag, name
        )

        # Query the actual objects to be returned
        query = self._query(session, FeatureVector, project=project, state=state)

        if name is not None:
            query = query.filter(
                generate_query_predicate_for_name(FeatureVector.name, name)
            )
        if tag:
            query = query.filter(FeatureVector.id.in_(obj_id_tags.keys()))
        if labels:
            query = self._add_labels_filter(session, query, FeatureVector, labels)

        if partition_by:
            self._assert_partition_by_parameters(
                mlrun.common.schemas.FeatureStorePartitionByField,
                partition_by,
                partition_sort_by,
            )
            query = self._create_partitioned_query(
                session,
                query,
                FeatureVector,
                partition_by,
                rows_per_partition,
                partition_sort_by,
                partition_order,
            )

        feature_vectors = []
        for feature_vector_record in query:
            feature_vectors.extend(
                self._generate_records_with_tags_assigned(
                    feature_vector_record,
                    self._transform_feature_vector_model_to_schema,
                    obj_id_tags,
                    tag,
                )
            )
        return mlrun.common.schemas.FeatureVectorsOutput(
            feature_vectors=feature_vectors
        )

    def list_feature_vectors_tags(
        self,
        session,
        project: str,
    ):
        query = (
            session.query(FeatureVector.name, FeatureVector.Tag.name)
            .filter(FeatureVector.Tag.project == project)
            .join(FeatureVector, FeatureVector.Tag.obj_id == FeatureVector.id)
            .distinct()
        )
        return [(project, row[0], row[1]) for row in query]

    @retry_on_conflict
    def store_feature_vector(
        self,
        session,
        project,
        name,
        feature_vector: mlrun.common.schemas.FeatureVector,
        tag=None,
        uid=None,
        versioned=True,
        always_overwrite=False,
    ) -> str:
        return self._store_tagged_object(
            session,
            FeatureVector,
            project,
            name,
            feature_vector,
            tag=tag,
            uid=uid,
            versioned=versioned,
            always_overwrite=always_overwrite,
        )

    def patch_feature_vector(
        self,
        session,
        project,
        name,
        feature_vector_update: dict,
        tag=None,
        uid=None,
        patch_mode: mlrun.common.schemas.PatchMode = mlrun.common.schemas.PatchMode.replace,
    ) -> str:
        feature_vector_record = self._get_feature_vector(
            session, project, name, tag, uid
        )
        if not feature_vector_record:
            feature_vector_uri = generate_object_uri(project, name, tag)
            raise mlrun.errors.MLRunNotFoundError(
                f"Feature-vector not found {feature_vector_uri}"
            )

        feature_vector_struct = feature_vector_record.dict(exclude_none=True)
        # using mergedeep for merging the patch content into the existing dictionary
        strategy = patch_mode.to_mergedeep_strategy()
        mergedeep.merge(feature_vector_struct, feature_vector_update, strategy=strategy)

        versioned = feature_vector_record.metadata.uid is not None

        feature_vector = mlrun.common.schemas.FeatureVector(**feature_vector_struct)
        return self.store_feature_vector(
            session,
            project,
            name,
            feature_vector,
            feature_vector.metadata.tag,
            uid,
            versioned,
            always_overwrite=True,
        )

    def delete_feature_vector(self, session, project, name, tag=None, uid=None):
        self._delete_tagged_object(
            session,
            FeatureVector,
            project=project,
            tag=tag,
            uid=uid,
            name=name,
        )

    def _delete_tagged_object(
        self,
        session,
        cls,
        project,
        tag=None,
        uid=None,
        name=None,
        key=None,
        **kwargs,
    ):
        # TODO: Tag is now cascaded in the DB level so this should not be needed anymore
        if tag and uid:
            raise mlrun.errors.MLRunInvalidArgumentError(
                "Both uid and tag specified when deleting an object."
            )

        # "key" is only used for artifact objects, and "name" is used for all other tagged objects.
        # thus only one should be passed
        if name and key:
            raise mlrun.errors.MLRunInvalidArgumentError(
                "Both name and key specified when deleting an object."
            )
        if not name and not key:
            raise mlrun.errors.MLRunInvalidArgumentError(
                "Neither name nor key specified when deleting an object."
            )

        obj_name = name or key
        object_id = None

        if uid or tag:
            # try to find the object by given arguments
            query = self._query(
                session,
                cls,
                project=project,
                uid=uid,
                name=name,
                key=key,
                **kwargs,
            )

            # join on tags if given
            if tag and tag != "*":
                query = query.join(cls.Tag, cls.Tag.obj_id == cls.id)
                query = query.filter(cls.Tag.name == tag)

            object_record = query.one_or_none()

            if object_record is None:
                # object not found, nothing to delete
                return None, None

            # get the object id from the object record
            object_id = object_record.id
            if cls == ArtifactV2:
                if object_record.child_artifacts:
                    # todo : If, in the future, this delete_artifacts API is extended to delete the artifact
                    #  data as well, we should delete this check.
                    raise IntegrityError(
                        "artifact has child artifacts", params=None, orig=Exception()
                    )
                self._update_artifact_latest_tag_on_deletion(session, object_record)

        if object_id:
            self._delete(session, cls, id=object_id)
        else:
            # If we got here, neither tag nor uid were provided - delete all references by name.
            identifier = {"name": obj_name} if name else {"key": obj_name}
            self._delete(session, cls, project=project, **identifier)

    def _resolve_class_tag_uid(self, session, cls, project, obj_name, tag_name):
        for tag in self._query(
            session, cls.Tag, project=project, obj_name=obj_name, name=tag_name
        ):
            return self._query(session, cls).get(tag.obj_id).uid
        return None

    def _resolve_class_tag_uids(
        self, session, cls, project, tag_name, obj_name=None
    ) -> list[str]:
        uids = []

        query = self._query(session, cls.Tag, project=project, name=tag_name)
        if obj_name:
            query = query.filter(
                generate_query_predicate_for_name(cls.Tag.obj_name, obj_name)
            )

        for tag in query:
            # TODO: query db in a single call
            obj = self._query(session, cls).get(tag.obj_id)
            if obj:
                uids.append(obj.uid)
        return uids

    def _attach_most_recent_artifact_query(self, session, query):
        # Create a sub query of latest uid (by updated) per (project,key)
        subq = (
            session.query(
                ArtifactV2.project,
                ArtifactV2.key,
                func.max(ArtifactV2.updated).label("max_updated"),
            )
            .group_by(
                ArtifactV2.project,
                ArtifactV2.key,
            )
            .subquery()
        )

        # Join current query with sub query on (project, key)
        return query.join(
            subq,
            and_(
                ArtifactV2.project == subq.c.project,
                ArtifactV2.key == subq.c.key,
                ArtifactV2.updated == subq.c.max_updated,
            ),
        )

    def _query(self, session, cls, **kw):
        kw = {k: v for k, v in kw.items() if v is not None}
        return session.query(cls).filter_by(**kw)

    def _get_count(self, session, cls):
        return session.query(
            func.count(sqlalchemy_inspect(cls).primary_key[0])
        ).scalar()

    def _get_class_instance_by_uid(
        self, session, cls, name: Optional[str], project: str, uid: str
    ):
        query = (
            self._query(session, cls, name=name, project=project, uid=uid)
            if name
            else self._query(session, cls, project=project, uid=uid)
        )
        return query.one_or_none()

    def _get_mep_latest_instance(
        self,
        session,
        cls,
        name: str,
        function_name: Optional[str],
        project: str,
        function_tag: Optional[str],
        _get_query: bool = False,
    ):
        query = (
            session.query(ModelEndpoint)
            .options(
                selectinload(ModelEndpoint.function).options(
                    load_only(
                        Function.name, Function.state, Function.project, Function.uid
                    ),
                    selectinload(Function.tags),
                ),
                selectinload(ModelEndpoint.model).options(
                    load_only(
                        ArtifactV2.key,
                        ArtifactV2.project,
                        ArtifactV2.iteration,
                        ArtifactV2.producer_id,
                        ArtifactV2.uid,
                        ArtifactV2.kind,
                    )
                ),
                selectinload(ModelEndpoint.tags),
            )
            .filter(cls.project == project, cls.name == name)
        )

        # Apply function name filter (must join Function first)
        if function_name:
            query = query.join(Function).filter(Function.name == function_name)

        # Apply function tag filter (must join Function.tags first)
        if function_tag:
            query = query.join(Function.tags).filter(Function.Tag.name == function_tag)

        # Apply latest tag filter
        query = query.join(cls.tags).filter(
            cls.Tag.name == mlrun.common.constants.RESERVED_TAG_NAME_LATEST
        )

        if _get_query:
            return query

        return query.first()  # Use `.first()` instead of `.one_or_none()` for safety

    def _get_mep_instances(
        self,
        session,
        cls,
        name: str,
        project: str,
        function_name: Optional[str],
        function_tag: Optional[str],
        _get_query=False,
    ) -> typing.Union[sqlalchemy.orm.Query, list[ModelEndpoint]]:
        query = (
            session.query(ModelEndpoint)
            .options(
                selectinload(ModelEndpoint.function).options(
                    load_only(
                        Function.name, Function.state, Function.project, Function.uid
                    ),
                    selectinload(Function.tags),
                ),
                selectinload(ModelEndpoint.model).options(
                    load_only(
                        ArtifactV2.key,
                        ArtifactV2.project,
                        ArtifactV2.iteration,
                        ArtifactV2.producer_id,
                        ArtifactV2.uid,
                        ArtifactV2.kind,
                    )
                ),
                selectinload(ModelEndpoint.tags),
            )
            .filter(cls.project == project, cls.name == name)
        )

        # Apply function name filter (must join Function table first)
        if function_name:
            query = query.join(Function).filter(Function.name == function_name)

        # Apply function tag filter
        if function_tag:
            query = query.join(Function.tags).filter(Function.Tag.name == function_tag)

        if _get_query:
            return query

        return query.all()  # Return list instead of a single result

    def _get_run(
        self,
        session,
        uid,
        project,
        iteration,
        with_for_update=False,
        with_notifications=False,
        populate_existing=False,
    ):
        query = self._query(session, Run, uid=uid, project=project, iteration=iteration)
        if with_notifications:
            query = query.outerjoin(Run.Notification)
        if with_for_update:
            query = query.populate_existing().with_for_update()
        elif populate_existing:
            query = query.populate_existing()

        return query.one_or_none()

    def _delete_empty_labels(self, session, cls):
        session.query(cls).filter(cls.parent == NULL).delete()
        session.commit()

    def _upsert(self, session, objects, ignore=False, silent=False):
        if not objects:
            return
        for object_ in objects:
            session.add(object_)
        self._commit(session, objects, ignore, silent)

    def _upsert_batch(self, session, objects, ignore=False, silent=False):
        if not objects:
            return

        session.add_all(objects)
        self._commit(session, objects, ignore, silent)

    def _upsert_object_and_flush_to_get_field(self, session, object_, field):
        # Add the object to the session
        session.add(object_)
        # Flush the session to generate the database values
        session.flush()

        # Dynamically retrieve the specified field's value
        field_value = getattr(object_, field, None)

        self._commit(session, [object_])
        return field_value

    @staticmethod
    def _commit(session, objects, ignore=False, silent=False):
        def _try_commit_obj():
            try:
                session.commit()
            except SQLAlchemyError as sql_err:
                session.rollback()
                classes = list(set([object_.__class__.__name__ for object_ in objects]))

                # if the database is locked, we raise a retryable error
                if "database is locked" in str(sql_err):
                    logger.warning(
                        "Database is locked. Retrying",
                        classes_to_commit=classes,
                        err=str(sql_err),
                    )
                    raise mlrun.errors.MLRunRuntimeError(
                        "Failed committing changes, database is locked"
                    ) from sql_err

                # the error is not retryable, so we try to identify weather there was a conflict or not
                # either way - we wrap the error with a fatal error so the retry mechanism will stop
                if not silent:
                    logger.warning(
                        "Failed committing changes to DB",
                        classes=classes,
                        err=err_to_str(sql_err),
                    )
                if not ignore:
                    # get the identifiers of the objects that failed to commit, for logging purposes
                    identifiers = ",".join(
                        object_.get_identifier_string() for object_ in objects
                    )
                    # check if the error is a conflict error
                    if any([message in str(sql_err) for message in conflict_messages]):
                        mlrun_error = mlrun.errors.MLRunConflictError(
                            f"Conflict - at least one of the objects already exists: {identifiers}"
                        )
                    else:
                        error = "Failed committing changes to DB"
                        if "Out of range value for column" in str(sql_err):
                            column = sql_err.orig.args[1].split("'")[1]
                            mlrun_error = mlrun.errors.MLRunRuntimeError(
                                f"{error}, column='{column}' exceeds the allowed range. "
                                + f"classes={classes} objects={identifiers}"
                            )
                        else:
                            mlrun_error = mlrun.errors.MLRunRuntimeError(
                                f"{error}. classes={classes} objects={identifiers}"
                            )
                    # we want to keep the exception stack trace, but we also want the retry mechanism to stop
                    # so, we raise a new indicative exception from the original sql exception (this keeps
                    # the stack trace intact), and then wrap it with a fatal error (which stops the retry mechanism).
                    # Note - this way, the exception is raised from this code section, and not from the retry function.
                    try:
                        raise mlrun_error from sql_err
                    except (
                        mlrun.errors.MLRunRuntimeError,
                        mlrun.errors.MLRunConflictError,
                    ) as exc:
                        raise mlrun.errors.MLRunFatalFailureError(
                            original_exception=exc
                        )

        if config.httpdb.db.commit_retry_timeout:
            mlrun.utils.helpers.retry_until_successful(
                config.httpdb.db.commit_retry_interval,
                config.httpdb.db.commit_retry_timeout,
                logger,
                False,
                _try_commit_obj,
            )

    def _find_runs(self, session, uid, project, labels=None):
        labels = label_set(labels)
        query = self._query(session, Run)
        query = self._filter_query_by_resource_project(query, Run, project)

        if uid:
            # uid may be either a single uid (string) or a list of uids
            uid = mlrun.utils.helpers.as_list(uid)
            query = query.filter(Run.uid.in_(uid))
        return self._add_labels_filter(session, query, Run, labels)

    def _get_db_notifications(
        self,
        session,
        cls,
        name: typing.Optional[str] = None,
        parent_id: typing.Optional[int] = None,
        project: typing.Optional[str] = None,
        limit: typing.Optional[int] = None,
    ):
        q = self._query(
            session, cls.Notification, name=name, parent_id=parent_id, project=project
        )
        if limit:
            q = q.limit(limit)
        return q.all()

    @staticmethod
    def _escape_characters_for_like_query(value: str) -> str:
        return (
            value.translate(value.maketrans({"_": r"\_", "%": r"\%"})) if value else ""
        )

    def _find_functions(
        self,
        session: Session,
        name: str,
        project: typing.Optional[typing.Union[str, list[str]]] = None,
        labels: typing.Union[str, list[str], None] = None,
        tag: typing.Optional[str] = None,
        hash_key: typing.Optional[str] = None,
        since: typing.Optional[datetime] = None,
        until: typing.Optional[datetime] = None,
        kind: typing.Optional[str] = None,
        states: typing.Optional[list[mlrun.common.schemas.FunctionState]] = None,
        offset: typing.Optional[int] = None,
        limit: typing.Optional[int] = None,
    ) -> list[tuple[Function, str]]:
        """
        Query functions from the DB by the given filters.

        :param session: The DB session.
        :param name: The name of the function to query.
        :param project: The project of the function to query.
        :param labels: The labels of the function to query.
        :param tag: The tag of the function to query.
        :param hash_key: The hash key of the function to query.
        :param since: Filter functions that were updated after this time
        :param until: Filter functions that were updated before this time
        :param kind: The kind of the function to query.
        :param states: The states of the function to query.
        :param offset: SQL query offset.
        :param limit: SQL query limit.
        """
        query = session.query(Function, Function.Tag.name)
        query = self._filter_query_by_resource_project(query, Function, project)

        if name:
            query = query.filter(generate_query_predicate_for_name(Function.name, name))

        if hash_key is not None:
            query = query.filter(Function.uid == hash_key)

        if kind is not None:
            query = query.filter(Function.kind == kind)

        if states is not None:
            query = query.filter(Function.state.in_(states))

        if since or until:
            query = generate_time_range_query(
                query=query, field=Function.updated, since=since, until=until
            )

        if not tag:
            # If no tag is given, we need to outer join to get all functions, even if they don't have tags.
            query = query.outerjoin(Function.Tag, Function.id == Function.Tag.obj_id)
        else:
            # Only get functions that have tags with join (faster than outer join)
            query = query.join(Function.Tag, Function.id == Function.Tag.obj_id)
            if tag != "*":
                # Filter on the specific tag
                query = query.filter(Function.Tag.name == tag)

        # filter out untagged unversioned functions, or in other words:
        # include only functions that are tagged OR their uid does not start with `unversioned-`
        query = query.filter(
            or_(
                Function.Tag.name != NULL,
                Function.uid.notlike(f"{unversioned_tagged_object_uid_prefix}%"),
            )
        )

        labels = label_set(labels)
        query = self._add_labels_filter(session, query, Function, labels)

        # If the updated fields are the same, we need a secondary field to sort by.
        # Third sort by tag ID to ensure consistent ordering when a function has multiple tags.
        query = query.order_by(
            Function.updated.desc(), Function.id.desc(), Function.Tag.id.desc()
        )

        query = self._paginate_query(query, offset, limit)
        return query

    def _find_model_endpoints(
        self,
        session: Session,
        project: str,
        names: Optional[list[str]] = None,
        function_name: Optional[str] = None,
        function_tag: Optional[str] = None,
        model_name: Optional[str] = None,
        model_tag: Optional[str] = None,
        top_level: Optional[bool] = None,
        modes: Optional[list[EndpointMode]] = None,
        labels: Optional[list[str]] = None,
        start: Optional[datetime] = None,
        end: Optional[datetime] = None,
        uids: Optional[list[str]] = None,
        latest_only: Optional[bool] = None,
        offset: Optional[int] = None,
        limit: Optional[int] = None,
        order_by: Optional[str] = None,
    ) -> sqlalchemy.orm.query.Query:
        """
        Query model_endpoints from the DB by the given filters.

        :param session: The DB session.
        :param project: The project of the model endpoint to query.
        :param names: The list of model endpoint names to query.
        :param function_name: The function name of the model endpoint.
        :param function_tag: The function tag associated with the model endpoint.
        :param model_name: The model name of the model endpoint.
        :param model_tag: The model tag associated with the model endpoint.
        :param top_level: If True, filters for top-level model endpoints.
        :param modes: Specifies the mode of the model endpoint. Can be "real-time" (0), "batch" (1),
                      "batch_legacy" (2). If set to None, all are included.
        :param labels: The labels to filter model endpoints.
        :param start: Start date-time filter.
        :param end: End date-time filter.
        :param uids: The list of model endpoint UIDs to query.
        :param latest_only: If True, return only the latest model endpoint.
        :param offset: SQL query offset.
        :param limit: SQL query limit.
        :param order_by: Column name for ordering results.
        """
        # Query explanation:
        # - selectinload is used to efficiently load related objects in batches, avoiding unnecessary extra queries.
        # - load_only restricts the fields retrieved from the related entities to improve performance.
        # - This query ensures all necessary related data is fetched upfront with minimal database overhead.
        query = (
            session.query(ModelEndpoint)
            .options(
                selectinload(ModelEndpoint.function).options(
                    load_only(
                        Function.name,
                        Function.state,
                        Function.project,
                        Function.uid,
                    ),
                    selectinload(Function.tags),
                ),
                selectinload(ModelEndpoint.model).options(
                    load_only(
                        ArtifactV2.key,
                        ArtifactV2.project,
                        ArtifactV2.iteration,
                        ArtifactV2.producer_id,
                        ArtifactV2.uid,
                        ArtifactV2.kind,
                    )
                ),
                selectinload(ModelEndpoint.tags),
            )
            .filter(ModelEndpoint.project == project)
        )

        # Apply filters for direct attributes
        if names:
            query = query.filter(ModelEndpoint.name.in_(names))
        if uids:
            query = query.filter(ModelEndpoint.uid.in_(uids))
        if top_level:
            query = query.filter(
                ModelEndpoint.endpoint_type.in_(EndpointType.top_level_list())
            )
        if modes is not None:
            batch_legacy = EndpointMode.BATCH_LEGACY in modes
            real_time = EndpointMode.REAL_TIME in modes

            if batch_legacy and real_time:
                query = query.filter(
                    or_(
                        ModelEndpoint.mode.in_(modes),
                        ModelEndpoint.mode.is_(None),
                    )
                )
            elif batch_legacy:
                query = query.filter(
                    or_(
                        ModelEndpoint.mode.in_(modes),
                        and_(
                            ModelEndpoint.mode.is_(None),
                            ModelEndpoint.endpoint_type == EndpointType.BATCH_EP,
                        ),
                    )
                )
            elif real_time:
                query = query.filter(
                    or_(
                        ModelEndpoint.mode.in_(modes),
                        ModelEndpoint.endpoint_type != EndpointType.BATCH_EP,
                    )
                )
            else:
                query = query.filter(ModelEndpoint.mode.in_(modes))

        # Apply function-related filters
        if function_name or function_tag:
            query = query.join(Function, ModelEndpoint.function_id == Function.id)
            if function_name:
                query = query.filter(Function.name == function_name)
            if function_tag:
                query = query.filter(
                    Function.tags.any(Function.Tag.name == function_tag)
                )

        if model_name or model_tag:
            query = query.join(ArtifactV2, ModelEndpoint.model_id == ArtifactV2.id)
            if model_name:
                query = query.filter(ArtifactV2.key == model_name)
            if model_tag:
                query = query.filter(
                    ArtifactV2.tags.any(ArtifactV2.Tag.name == model_tag)
                )

        if start or end:
            query = generate_time_range_query(
                query, ModelEndpoint.created, since=start, until=end
            )

        if latest_only:
            query = query.join(
                ModelEndpoint.Tag, ModelEndpoint.id == ModelEndpoint.Tag.obj_id
            )
            if not function_name:
                query = query.join(
                    Function,
                    ModelEndpoint.function_id == Function.id,
                    isouter=True,  # LEFT JOIN to Function
                )

        else:
            query = query.outerjoin(
                ModelEndpoint.Tag, ModelEndpoint.id == ModelEndpoint.Tag.obj_id
            )

        # Apply label filters
        query = self._add_labels_filter(
            session, query, ModelEndpoint, label_set(labels)
        )

        # Apply pagination
        query = self._paginate_query(query, offset, limit)

        # Apply ordering with proper error handling
        if order_by:
            try:
                query = query.order_by(getattr(ModelEndpoint, order_by).asc())
            except AttributeError as err:
                logger.warning("Skipping order by", error=mlrun.errors.err_to_str(err))

        return query

    def _delete(self, session, cls, query=None, **kw):
        query = query or session.query(cls).filter_by(**kw)
        for obj in query:
            session.delete(obj)
        session.commit()

    def _find_labels(self, session, cls, label_cls, labels):
        return session.query(cls).join(label_cls).filter(label_cls.name.in_(labels))

    def _add_labels_filter(self, session, query, cls, labels):
        if not labels:
            return query

        preds = []
        # Some specific handling is needed for the case of a query like "label=x&label=x=value". In this case
        # of course it should be reduced to "label=x=value". That's why we need to keep the labels that are queried
        # with values, and then remove it from the list of labels queried without value.
        label_names_with_values = set()
        label_names_no_values = set()

        for lbl in labels:
            if "=" in lbl:
                name, value = (v.strip() for v in lbl.split("=", 1))
                cond = and_(
                    generate_query_predicate_for_name(cls.Label.name, name),
                    generate_query_predicate_for_name(cls.Label.value, value),
                )
                preds.append(cond)
                label_names_with_values.add(name)
            else:
                label_names_no_values.add(lbl.strip())

        for name in label_names_no_values.difference(label_names_with_values):
            preds.append(generate_query_predicate_for_name(cls.Label.name, name))

        if len(preds) == 1:
            # A single label predicate is a common case, and there's no need to burden the DB with
            # a more complex query for that case.
            subq = session.query(cls.Label).filter(*preds).subquery("labels")
        else:
            # Basically do an "or" query on the predicates, and count how many rows each parent object has -
            # if it has as much rows as predicates, then it means it answers all the conditions.
            subq = (
                session.query(cls.Label)
                .filter(or_(*preds))
                .group_by(cls.Label.parent)
                .having(func.count(cls.Label.parent) == len(preds))
                .subquery("labels")
            )

        return query.join(subq)

    def _delete_class_labels(
        self,
        session: Session,
        cls: Any,
        project: str = "",
        name: str = "",
        key: str = "",
        uid: str = "",
        commit: bool = True,
    ):
        filters = []
        if project:
            filters.append(cls.project == project)
        if name:
            filters.append(cls.name == name)
        if key:
            filters.append(cls.key == key)
        if uid:
            filters.append(cls.uid == uid)
        query = session.query(cls.Label).join(cls).filter(*filters)

        for label in query:
            session.delete(label)
        if commit:
            session.commit()

    def _transform_schedule_record_to_scheme(
        self,
        schedule_record: Schedule,
    ) -> mlrun.common.schemas.ScheduleRecord:
        schedule = mlrun.common.schemas.ScheduleRecord.from_orm(schedule_record)
        schedule.creation_time = self._add_utc_timezone(schedule.creation_time)
        schedule.next_run_time = self._add_utc_timezone(schedule.next_run_time)
        return schedule

    @staticmethod
    def _add_utc_timezone(time_value: typing.Optional[datetime]):
        """
        sqlalchemy losing timezone information with sqlite so we're returning it
        https://stackoverflow.com/questions/6991457/sqlalchemy-losing-timezone-information-with-sqlite
        """
        if time_value:
            if time_value.tzinfo is None:
                return pytz.utc.localize(time_value)
        return time_value

    @staticmethod
    def _transform_feature_set_model_to_schema(
        feature_set_record: FeatureSet,
        tag=None,
        format_: mlrun.common.formatters.FeatureSetFormat = mlrun.common.formatters.FeatureSetFormat.full,
    ) -> mlrun.common.schemas.FeatureSet:
        feature_set_full_dict = feature_set_record.full_object
        feature_set_full_dict = mlrun.common.formatters.FeatureSetFormat.format_obj(
            feature_set_full_dict, format_
        )
        feature_set_resp = mlrun.common.schemas.FeatureSet(**feature_set_full_dict)

        feature_set_resp.metadata.tag = tag
        return feature_set_resp

    @staticmethod
    def _transform_feature_vector_model_to_schema(
        feature_vector_record: FeatureVector, tag=None, format_=None
    ) -> mlrun.common.schemas.FeatureVector:
        feature_vector_full_dict = feature_vector_record.full_object
        feature_vector_resp = mlrun.common.schemas.FeatureVector(
            **feature_vector_full_dict
        )

        feature_vector_resp.metadata.tag = tag
        feature_vector_resp.metadata.created = feature_vector_record.created
        return feature_vector_resp

    def _transform_model_endpoint_model_to_schema(
        self,
        model_endpoint_record: ModelEndpoint,
        format_: mlrun.common.formatters.ModelEndpointFormat = mlrun.common.formatters.ModelEndpointFormat.full,
    ) -> mlrun.common.schemas.ModelEndpoint:
        model_endpoint_full_dict = model_endpoint_record.struct
        model_endpoint_full_dict[ModelEndpointSchema.UPDATED] = (
            model_endpoint_record.updated
        )
        model_endpoint_full_dict[ModelEndpointSchema.CREATED] = (
            model_endpoint_record.created
        )
        model_endpoint_full_dict[ModelEndpointSchema.UID] = model_endpoint_record.uid

        model_endpoint_full_dict = self._fill_model_endpoint_with_function_data(
            model_endpoint_record,
            model_endpoint_full_dict,
            latest=bool(model_endpoint_record.tags),
        )
        model_endpoint_full_dict = self._fill_model_endpoint_with_model_data(
            model_endpoint_record, model_endpoint_full_dict
        )

        model_endpoint_full_dict = (
            mlrun.common.formatters.ModelEndpointFormat.format_obj(
                model_endpoint_full_dict, format_
            )
        )

        model_endpoint_resp = mlrun.common.schemas.ModelEndpoint.from_flat_dict(
            model_endpoint_full_dict, validate=False
        )
        model_endpoint_full_dict["_model_id"] = None
        return model_endpoint_resp

    def _fill_model_endpoint_with_function_data(
        self,
        model_endpoint_record: ModelEndpoint,
        model_endpoint_full_dict: dict,
        latest: bool,
    ) -> dict:
        if model_endpoint_record.function and latest:
            model_endpoint_full_dict[ModelEndpointSchema.FUNCTION_NAME] = (
                model_endpoint_record.function.name
            )
            function_tag_list = model_endpoint_record.function.tags
            model_endpoint_full_dict[ModelEndpointSchema.FUNCTION_TAG] = (
                self._get_obj_tag_prioritizing_user_tag(function_tag_list)
            )
            model_endpoint_full_dict[ModelEndpointSchema.STATE] = (
                model_endpoint_record.function.state
            )
            model_endpoint_full_dict[ModelEndpointSchema.FUNCTION_URI] = (
                generate_object_uri(
                    project=model_endpoint_record.function.project,
                    name=model_endpoint_record.function.name,
                    hash_key=model_endpoint_record.function.uid,
                )
            )
        else:
            model_endpoint_full_dict[ModelEndpointSchema.FUNCTION_NAME] = ""
            model_endpoint_full_dict[ModelEndpointSchema.FUNCTION_TAG] = ""
            model_endpoint_full_dict[ModelEndpointSchema.STATE] = "unknown"
            model_endpoint_full_dict[ModelEndpointSchema.FUNCTION_URI] = None
        return model_endpoint_full_dict

    @staticmethod
    def _get_obj_tag_prioritizing_user_tag(obj_tag_list, desired_tag=None) -> str:
        """
        Determine which tag to use from a list of function/model tags.

        Args:
            obj_tag_list (list): List of tag objects (with `.name`).
            desired_tag (str, optional): Specific tag name to prioritize.

        Returns:
            str: The selected tag name, or an empty string if no match is found.
        """
        obj_tag_list_names = [tag.name for tag in obj_tag_list]

        # Case 1: desired tag is explicitly in the list
        if desired_tag and desired_tag in obj_tag_list_names:
            return desired_tag

        latest = False
        first_tag = None
        for tag_name in obj_tag_list_names:
            if tag_name == mlrun.common.constants.RESERVED_TAG_NAME_LATEST:
                latest = True
            elif not first_tag:
                first_tag = tag_name
            if desired_tag and desired_tag in tag_name:
                return tag_name

        if first_tag:
            return first_tag
        if latest:
            return mlrun.common.constants.RESERVED_TAG_NAME_LATEST
        return ""

    @staticmethod
    def _fill_model_endpoint_with_model_data(
        model_endpoint_record: ModelEndpoint, model_endpoint_full_dict: dict
    ) -> dict:
        if model := model_endpoint_record.model:
            model_endpoint_full_dict[ModelEndpointSchema.MODEL_NAME] = model.key
            model_tags = model.tags
            model_endpoint_full_dict[ModelEndpointSchema.MODEL_TAGS] = (
                [tag.name for tag in model_tags] if model_tags else []
            )
            model_artifact_uri = mlrun.datastore.get_store_uri(
                kind=mlrun.utils.helpers.StorePrefix.Model
                if model.kind == mlrun.artifacts.ModelArtifact.kind
                else mlrun.utils.helpers.StorePrefix.LLMPrompt,
                uri=generate_artifact_uri(
                    project=model.project,
                    key=model.key,
                    iter=model.iteration,
                    tree=model.producer_id,
                    uid=model.uid,
                ),
            )

            model_endpoint_full_dict[ModelEndpointSchema.MODEL_URI] = model_artifact_uri
        else:
            model_endpoint_full_dict[ModelEndpointSchema.MODEL_NAME] = ""
            model_endpoint_full_dict[ModelEndpointSchema.MODEL_TAGS] = []
            model_endpoint_full_dict[ModelEndpointSchema.MODEL_URI] = None
        return model_endpoint_full_dict

    def _transform_project_record_to_schema(
        self, session: Session, project_record: Project
    ) -> mlrun.common.schemas.ProjectOut:
        # in projects that was created before 0.6.0 the full object wasn't created properly - fix that, and return
        if not project_record.full_object:
            project = mlrun.common.schemas.Project(
                metadata=mlrun.common.schemas.ProjectMetadata(
                    name=project_record.name,
                    created=project_record.created,
                ),
                spec=mlrun.common.schemas.ProjectSpec(
                    description=project_record.description,
                    source=project_record.source,
                    default_function_node_selector=project_record.default_function_node_selector,
                ),
                status=mlrun.common.schemas.ObjectStatus(
                    state=project_record.state,
                ),
            )
            self.store_project(session, project_record.name, project)
            return mlrun.common.schemas.ProjectOut(**project.dict())
        # TODO: handle transforming the functions/workflows/artifacts references to real objects
        return mlrun.common.schemas.ProjectOut(**project_record.full_object)

    def _transform_notification_record_to_spec_and_status(
        self,
        notification_record,
    ) -> tuple[dict, dict]:
        notification_spec = self._transform_notification_record_to_schema(
            notification_record
        ).to_dict()
        notification_status = {
            "status": notification_spec.pop("status", None),
            "sent_time": notification_spec.pop("sent_time", None),
            "reason": notification_spec.pop("reason", None),
        }
        return notification_spec, notification_status

    @staticmethod
    def _transform_notification_record_to_schema(
        notification_record,
    ) -> mlrun.model.Notification:
        return mlrun.model.Notification(
            kind=notification_record.kind,
            name=notification_record.name,
            message=notification_record.message,
            severity=notification_record.severity,
            when=notification_record.when.split(","),
            condition=notification_record.condition,
            secret_params=notification_record.secret_params,
            params=notification_record.params,
            status=notification_record.status,
            sent_time=notification_record.sent_time,
            reason=notification_record.reason,
        )

    def _move_and_reorder_table_items(
        self, session, moved_object, move_to=None, move_from=None
    ):
        # If move_to is None - delete object. If move_from is None - insert a new object
        moved_object.index = move_to

        if move_from == move_to:
            # It's just modifying the same object - update and exit.
            # using merge since primary key is changing
            session.merge(moved_object)
            session.commit()
            return

        modifier = 1
        if move_from is None:
            start, end = move_to, None
        elif move_to is None:
            start, end = move_from + 1, None
            modifier = -1
        else:
            if move_from < move_to:
                start, end = move_from + 1, move_to
                modifier = -1
            else:
                start, end = move_to, move_from - 1

        query = session.query(HubSource).filter(HubSource.index >= start)
        if end:
            query = query.filter(HubSource.index <= end)

        for source_record in query:
            source_record.index = source_record.index + modifier
            # using merge since primary key is changing
            session.merge(source_record)

        if move_to:
            # using merge since primary key is changing
            session.merge(moved_object)
        else:
            session.delete(moved_object)
        session.commit()

    @staticmethod
    def _transform_hub_source_record_to_schema(
        hub_source_record: HubSource,
    ) -> mlrun.common.schemas.IndexedHubSource:
        source_full_dict = hub_source_record.full_object
        hub_source = mlrun.common.schemas.HubSource(**source_full_dict)
        return mlrun.common.schemas.IndexedHubSource(
            index=hub_source_record.index, source=hub_source
        )

    @staticmethod
    def _transform_hub_source_schema_to_record(
        hub_source_schema: mlrun.common.schemas.IndexedHubSource,
        current_object: HubSource = None,
    ):
        now = datetime.now(UTC)
        if current_object:
            if current_object.name != hub_source_schema.source.metadata.name:
                raise mlrun.errors.MLRunInternalServerError(
                    "Attempt to update object while replacing its name"
                )
            created_timestamp = current_object.created
        else:
            created_timestamp = hub_source_schema.source.metadata.created or now
        updated_timestamp = hub_source_schema.source.metadata.updated or now

        hub_source_record = HubSource(
            id=current_object.id if current_object else None,
            name=hub_source_schema.source.metadata.name,
            index=hub_source_schema.index,
            created=created_timestamp,
            updated=updated_timestamp,
        )
        full_object = hub_source_schema.source.dict()
        full_object["metadata"]["created"] = str(created_timestamp)
        full_object["metadata"]["updated"] = str(updated_timestamp)
        # Make sure we don't keep any credentials in the DB. These are handled in the hub crud object.
        full_object["spec"].pop("credentials", None)

        hub_source_record.full_object = full_object
        return hub_source_record

    @staticmethod
    def _validate_and_adjust_hub_order(session, order):
        max_order = session.query(func.max(HubSource.index)).scalar()
        if not max_order or max_order < 0:
            max_order = 0

        if order == mlrun.common.schemas.hub.last_source_index:
            order = max_order + 1

        if order > max_order + 1:
            raise mlrun.errors.MLRunInvalidArgumentError(
                f"Order must not exceed the current maximal order + 1. max_order = {max_order}, order = {order}"
            )
        if order < 1:
            raise mlrun.errors.MLRunInvalidArgumentError(
                "Order of inserted source must be greater than 0 or "
                + f"{mlrun.common.schemas.hub.last_source_index} (for last). order = {order}"
            )
        return order

    # ---- Hub Sources ----
    def create_hub_source(
        self, session, ordered_source: mlrun.common.schemas.IndexedHubSource
    ):
        logger.debug(
            "Creating hub source in DB",
            index=ordered_source.index,
            name=ordered_source.source.metadata.name,
        )

        order = self._validate_and_adjust_hub_order(session, ordered_source.index)
        name = ordered_source.source.metadata.name
        source_record = self._query(session, HubSource, name=name).one_or_none()
        if source_record:
            raise mlrun.errors.MLRunConflictError(
                f"Hub source name already exists. name = {name}"
            )
        source_record = self._transform_hub_source_schema_to_record(ordered_source)

        self._move_and_reorder_table_items(
            session, source_record, move_to=order, move_from=None
        )

    @retry_on_conflict
    def store_hub_source(
        self,
        session,
        name,
        ordered_source: mlrun.common.schemas.IndexedHubSource,
    ):
        logger.debug("Storing hub source in DB", index=ordered_source.index, name=name)

        if name != ordered_source.source.metadata.name:
            raise mlrun.errors.MLRunInvalidArgumentError(
                "Conflict between resource name and metadata.name in the stored object"
            )
        order = self._validate_and_adjust_hub_order(session, ordered_source.index)

        source_record = self._query(session, HubSource, name=name).one_or_none()
        current_order = source_record.index if source_record else None
        if current_order == mlrun.common.schemas.hub.last_source_index:
            raise mlrun.errors.MLRunInvalidArgumentError(
                "Attempting to modify the global hub source."
            )
        source_record = self._transform_hub_source_schema_to_record(
            ordered_source, source_record
        )

        self._move_and_reorder_table_items(
            session, source_record, move_to=order, move_from=current_order
        )

    def list_hub_sources(self, session) -> list[mlrun.common.schemas.IndexedHubSource]:
        results = []
        query = self._query(session, HubSource).order_by(HubSource.index.desc())
        for record in query:
            ordered_source = self._transform_hub_source_record_to_schema(record)
            # Need this to make the list return such that the default source is last in the response.
            if ordered_source.index != mlrun.common.schemas.last_source_index:
                results.insert(0, ordered_source)
            else:
                results.append(ordered_source)
        return results

    def _list_hub_sources_without_transform(self, session) -> list[HubSource]:
        return self._query(session, HubSource).all()

    def delete_hub_source(self, session, name):
        logger.debug("Deleting hub source from DB", name=name)

        source_record = self._query(session, HubSource, name=name).one_or_none()
        if not source_record:
            return

        current_order = source_record.index
        if current_order == mlrun.common.schemas.hub.last_source_index:
            raise mlrun.errors.MLRunInvalidArgumentError(
                "Attempting to delete the global hub source."
            )

        self._move_and_reorder_table_items(
            session, source_record, move_to=None, move_from=current_order
        )

    def get_hub_source(
        self, session, name=None, index=None, raise_on_not_found=True
    ) -> typing.Optional[mlrun.common.schemas.IndexedHubSource]:
        source_record = self._query(
            session, HubSource, name=name, index=index
        ).one_or_none()
        if not source_record:
            log_method = logger.warning if raise_on_not_found else logger.debug
            message = f"Hub source not found. name = {name}"
            log_method(message)
            if raise_on_not_found:
                raise mlrun.errors.MLRunNotFoundError(message)

            return None

        return self._transform_hub_source_record_to_schema(source_record)

    # ---- Data Versions ----
    def get_current_data_version(
        self, session, raise_on_not_found=True
    ) -> typing.Optional[str]:
        current_data_version_record = (
            self._query(session, DataVersion)
            .order_by(DataVersion.created.desc())
            .limit(1)
            .one_or_none()
        )
        if not current_data_version_record:
            log_method = logger.warning if raise_on_not_found else logger.debug
            message = "No data version found"
            log_method(message)
            if raise_on_not_found:
                raise mlrun.errors.MLRunNotFoundError(message)
        if current_data_version_record:
            return current_data_version_record.version
        else:
            return None

    def create_data_version(self, session, version):
        logger.debug(
            "Creating data version in DB",
            version=version,
        )

        data_version_record = DataVersion(version=version)
        self._upsert(session, [data_version_record])

    def store_alert_template(
        self, session, template: mlrun.common.schemas.AlertTemplate
    ) -> mlrun.common.schemas.AlertTemplate:
        template_record = self._get_alert_template_record(
            session, template.template_name
        )
        if not template_record:
            return self._create_alert_template(session, template)
        template_record.full_object = template.dict()

        self._upsert(session, [template_record])
        return self._transform_alert_template_record_to_schema(
            self._get_alert_template_record(session, template.template_name)
        )

    def _create_alert_template(
        self, session, template: mlrun.common.schemas.AlertTemplate
    ) -> mlrun.common.schemas.AlertTemplate:
        template_record = self._transform_alert_template_schema_to_record(template)
        self._upsert(session, [template_record])
        return self._transform_alert_template_record_to_schema(template_record)

    def delete_alert_template(self, session, name: str):
        self._delete(session, AlertTemplate, name=name)

    def list_alert_templates(self, session) -> list[mlrun.common.schemas.AlertTemplate]:
        query = self._query(session, AlertTemplate)
        return list(map(self._transform_alert_template_record_to_schema, query.all()))

    def get_alert_template(
        self, session, name: str
    ) -> mlrun.common.schemas.AlertTemplate:
        return self._transform_alert_template_record_to_schema(
            self._get_alert_template_record(session, name)
        )

    def get_all_alerts(self, session) -> list[mlrun.common.schemas.AlertConfig]:
        query = self._query(session, AlertConfig)
        return list(map(self._transform_alert_config_record_to_schema, query.all()))

    def get_num_configured_alerts(self, session) -> int:
        return self._get_count(session, AlertConfig)

    def store_alert(
        self, session, alert: mlrun.common.schemas.AlertConfig
    ) -> mlrun.common.schemas.AlertConfig:
        alert_record, alert_state = self._get_alert_record(
            session, alert.name, alert.project, with_state=True
        )
        if not alert_record:
            return self.create_alert(session, alert)
        alert_record.full_object = alert.dict()

        self._delete_alert_notifications(
            session,
            name=alert.name,
            alert_id=alert.id,
            project=alert.project,
        )
        self._store_notifications(
            session,
            AlertConfig,
            alert.get_raw_notifications(),
            alert_record.id,
            alert.project,
        )

        self._upsert(session, [alert_record])
        # in case alert service was stopped while storing an alert, ensure that it has a state
        if not alert_state:
            self.create_alert_state(session, alert_record.id)
        return self._transform_alert_config_record_to_schema(alert_record)

    def create_alert(
        self,
        session,
        alert: mlrun.common.schemas.AlertConfig,
    ) -> mlrun.common.schemas.AlertConfig:
        alert_record = self._transform_alert_config_schema_to_record(alert)
        alert_id = self._upsert_object_and_flush_to_get_field(
            session, alert_record, "id"
        )

        self._store_notifications(
            session,
            AlertConfig,
            alert.get_raw_notifications(),
            alert_id,
            alert.project,
        )
        self.create_alert_state(session, alert_id)

        return self._transform_alert_config_record_to_schema(alert_record)

    def delete_alert(self, session, project: str, name: str):
        self._delete(session, AlertConfig, project=project, name=name)

    def list_alerts(
        self,
        session,
        project: typing.Optional[typing.Union[str, list[str]]] = None,
        exclude_updated: bool = False,
        limit: typing.Optional[int] = None,
        offset: typing.Optional[int] = None,
    ) -> list[mlrun.common.schemas.AlertConfig]:
        query = self._query(session, AlertConfig)

        # Construct the initial query for AlertConfig and join with AlertState to fetch associated states
        query = query.outerjoin(
            AlertState, AlertState.parent_id == AlertConfig.id
        ).add_entity(AlertState)

        query = self._filter_query_by_resource_project(query, AlertConfig, project)
        query = query.order_by(AlertConfig.id.asc())
        query = self._paginate_query(query, offset, limit)

        results = query.all()

        # Process each result, transforming and enriching the AlertConfig objects
        alerts = []
        for alert_config, alert_state in results:
            alert = self._transform_alert_config_record_to_schema(alert_config)
            # Enrich the alert with additional data using AlertState
            self.enrich_alert(
                session,
                alert,
                state=alert_state,
            )
            if exclude_updated:
                alert.updated = None
            alerts.append(alert)
        return alerts

    def delete_project_alerts(
        self,
        session,
        project: str,
        chunk_size: typing.Optional[int] = None,
    ) -> list[int]:
        """
        List all alert IDs associated with the specified project and delete them,
        along with their related notifications, while ensuring foreign key constraints are respected.

        Steps:
        1. Retrieve all alert ids for the given project.
        2. Delete the alerts from the database using ORM-based deletion to ensure cascading works.
        3. Commit everything at once to improve performance and maintain transactional integrity.

        :param session: SQLAlchemy session for database connection.
        :param project: Project identifier for which alerts need to be listed and deleted.
        :param chunk_size: Number of records to delete in each batch (default is 100).

        :return: List of deleted alert IDs.
        """
        chunk_size = (
            chunk_size or mlrun.mlconf.alerts.chunk_size_during_project_deletion
        )
        alert_ids = []
        last_id = None

        logger.debug(
            "Deleting project alerts from db in chunks",
            project=project,
            chunk_size=chunk_size,
        )

        while True:
            # Step 1: Retrieve alerts ids for the given project in chunks
            query = session.query(AlertConfig.id).filter(AlertConfig.project == project)
            if last_id is not None:
                query = query.filter(AlertConfig.id > last_id)

            alerts = query.order_by(AlertConfig.id).limit(chunk_size).all()

            if not alerts:
                # Exit the loop if there are no more alerts to delete
                break

            alert_ids_chunk = [alert[0] for alert in alerts]

            # Update last processed ID
            last_id = alert_ids_chunk[-1]

            # Collect all deleted alert IDs
            alert_ids.extend(alert_ids_chunk)

            # Step 2: Perform ORM-based deletion for alerts in the current chunk
            alerts_to_delete = (
                session.query(AlertConfig)
                .filter(AlertConfig.id.in_(alert_ids_chunk))
                .all()
            )
            for alert in alerts_to_delete:
                # Deleting via ORM ensures cascading works
                # TODO: fix the foreign key constraint by implementing db level cascade
                session.delete(alert)

            # Step 3: Commit all changes in one transaction for the current chunk
            session.commit()

        logger.debug(
            "Successfully deleted project alerts from db",
            project=project,
            number_of_deleted_alerts=len(alert_ids),
        )
        # Return the list of deleted alert IDs
        return alert_ids

    def get_alert(
        self,
        session,
        project: str,
        name: str,
        with_state=False,
    ) -> Optional[
        Union[
            mlrun.common.schemas.AlertConfig,
            tuple[mlrun.common.schemas.AlertConfig, AlertState],
        ]
    ]:
        if not with_state:
            return self._transform_alert_config_record_to_schema(
                self._get_alert_record(session, name, project, with_state)
            )
        alert, state = self._get_alert_record(session, name, project, with_state)
        return self._transform_alert_config_record_to_schema(alert), state

    def get_alert_by_id(
        self, session, alert_id: int
    ) -> mlrun.common.schemas.AlertConfig:
        return self._transform_alert_config_record_to_schema(
            self._get_alert_record_by_id(session, alert_id)
        )

    def enrich_alert(
        self,
        session,
        alert: mlrun.common.schemas.AlertConfig,
        state: Optional[AlertState] = None,
    ):
        if not state:
            state = self.get_alert_state(session, alert.id)
        alert.state = (
            mlrun.common.schemas.AlertActiveState.ACTIVE
            if state.active
            else mlrun.common.schemas.AlertActiveState.INACTIVE
        )
        alert.count = state.count
        alert.created = state.created

        # if this is a new alert, initialize the updated field with the created time
        if alert.updated is None:
            alert.updated = state.created

        def _enrich_notification(_notification):
            _notification = _notification.to_dict()
            # we don't want to return the secret
            del _notification["secret_params"]

            if not isinstance(_notification["when"], list):
                _notification["when"] = [_notification["when"]]
            return _notification

        notifications = [
            mlrun.common.schemas.notification.Notification(
                **_enrich_notification(notification)
            )
            for notification in self._get_db_notifications(
                session, AlertConfig, parent_id=alert.id
            )
        ]

        cooldowns = [
            notification.cooldown_period for notification in alert.notifications
        ]

        alert.notifications = [
            mlrun.common.schemas.alert.AlertNotification(
                cooldown_period=cooldown, notification=notification
            )
            for cooldown, notification in zip(cooldowns, notifications)
        ]

    @staticmethod
    def create_partitions(
        session: Session,
        table_name: str,
        partitioning_information_list: list[tuple[str, str]],
    ):
        """
        Creates partitions in the specified database table.

        :param session: SQLAlchemy session for database connection.
        :param table_name: Name of the table where partitions will be created.
        :param partitioning_information_list: List of tuples, each containing:
            - partition_name: The name for the partition.
            - partition_value:
                * MySQL: the "LESS THAN" boundary value for the partition.
                * Postgres: a string "lower,upper" defining the range.
        """
        pass

    @staticmethod
    def drop_partitions(
        session: Session,
        table_name: str,
        cutoff_partition_name: str,
    ):
        """
        Execute the drop operation for partitions older than the cutoff partition name.

        :param session: SQLAlchemy session.
        :param table_name: The name of the table with partitions.
        :param cutoff_partition_name: The cutoff partition name for dropping old partitions.
        """
        pass

    @staticmethod
    def get_partition_expression_for_table(
        session: Session,
        table_name: str,
    ) -> str:
        """
        Returns partitioning expression for a given table
        :param session: SQLAlchemy session.
        :param table_name: Name of the table.

        Output examples:
        - month(`activation_time`)
        - dayofmonth(`activation_time`)
        - yearweek(`activation_time`, 1)
        """
        pass

    @staticmethod
    def table_exists(
        session: Session,
        table_name: str,
    ) -> bool:
        """
        Checks if a table exists in the current database schema.

        :param session: SQLAlchemy session.
        :param table_name: Name of the table to check.

        :return: True if the table exists, False otherwise.
        """
        pass

    @staticmethod
    def _transform_alert_template_schema_to_record(
        alert_template: mlrun.common.schemas.AlertTemplate,
    ) -> AlertTemplate:
        template_record = AlertTemplate(
            id=alert_template.template_id,
            name=alert_template.template_name,
        )
        template_record.full_object = alert_template.dict()
        return template_record

    @staticmethod
    def _transform_alert_template_record_to_schema(
        template_record: AlertTemplate,
    ) -> mlrun.common.schemas.AlertTemplate:
        if template_record is None:
            return None

        template = mlrun.common.schemas.AlertTemplate(**template_record.full_object)
        template.template_id = template_record.id
        return template

    @staticmethod
    def _transform_alert_config_record_to_schema(
        alert_config_record: AlertConfig,
    ) -> typing.Optional[mlrun.common.schemas.AlertConfig]:
        if alert_config_record is None:
            return None

        alert = mlrun.common.schemas.AlertConfig(**alert_config_record.full_object)
        alert.id = alert_config_record.id
        return alert

    @staticmethod
    def _transform_alert_config_schema_to_record(
        alert: mlrun.common.schemas.AlertConfig,
    ) -> AlertConfig:
        alert_record = AlertConfig(
            id=alert.id,
            name=alert.name,
            project=alert.project,
        )
        alert_record.full_object = alert.dict()
        return alert_record

    def _get_alert_template_record(self, session, name: str) -> AlertTemplate:
        return self._query(session, AlertTemplate, name=name).one_or_none()

    def _get_alert_record(
        self, session, name: str, project: str, with_state: bool = False
    ) -> Optional[Union[AlertConfig, tuple[AlertConfig, AlertState]]]:
        query = session.query(AlertConfig)

        if with_state:
            query = query.outerjoin(
                AlertState, AlertState.parent_id == AlertConfig.id
            ).add_entity(AlertState)

        query = query.filter(AlertConfig.name == name, AlertConfig.project == project)

        result = query.one_or_none()

        if result is None:
            # Explicitly return None for both if needed
            return None if not with_state else (None, None)
        return result

    def _get_alert_record_by_id(self, session, alert_id: int) -> AlertConfig:
        return self._query(session, AlertConfig, id=alert_id).one_or_none()

    def store_alert_state(
        self,
        session,
        project: str,
        name: str,
        last_updated: datetime,
        count: typing.Optional[int] = None,
        active: bool = False,
        obj: typing.Optional[dict] = None,
        alert_id: typing.Optional[int] = None,
    ):
        if alert_id is not None:
            query = self._query(session, AlertState).filter(
                AlertState.parent_id == alert_id
            )
        else:
            # Get the alert id using the alert name and project
            query = (
                self._query(session, AlertState)
                .join(AlertConfig, AlertConfig.id == AlertState.parent_id)
                .filter(
                    AlertConfig.name == name,
                    AlertConfig.project == project,
                )
            )

        state = query.one_or_none()
        if state is None:
            raise mlrun.errors.MLRunNotFoundError(
                f"Alert state not found for alert name: {name}, project: {project}"
            )

        if count is not None:
            state.count = count
        state.last_updated = last_updated
        state.active = active
        if obj is not None:
            state.full_object = obj
        self._upsert(session, [state])

    def get_alert_state(self, session, alert_id: int) -> AlertState:
        state = self._query(session, AlertState, parent_id=alert_id).one_or_none()
        if state is None:
            raise mlrun.errors.MLRunNotFoundError(
                f"Alert state not found for alert id: {alert_id}"
            )
        return state

    def get_alert_state_dict(self, session, alert_id: int) -> dict:
        state = self.get_alert_state(session, alert_id)
        if state is not None:
            return state.to_dict()

    def create_alert_state(self, session, alert_id):
        state = AlertState(count=0, parent_id=alert_id)
        self._upsert(session, [state])

    def _delete_alert_notifications(
        self,
        session,
        alert_id: int,
        project: str,
        name: typing.Optional[str] = None,
        commit: bool = True,
    ):
        query = self._get_db_notifications(
            session, AlertConfig, name, alert_id, project
        )
        for notification in query:
            session.delete(notification)

        if commit:
            session.commit()

    def store_alert_activation(
        self,
        session,
        alert_data: mlrun.common.schemas.AlertConfig,
        event_data: mlrun.common.schemas.Event,
    ) -> int:
        extra_data = {
            "criteria": alert_data.criteria.dict(),
        }

        # For JOB entities, construct entity_id as "name.uid" format
        if alert_data.entities.kind == mlrun.common.schemas.alert.EventEntityKind.JOB:
            run_name = alert_data.entities.ids[0]
            run_uid = event_data.value_dict.get("uid")
            entity_id = f"{run_name}.{run_uid}" if run_uid else run_name
        else:
            entity_id = alert_data.entities.ids[0]

        alert_activation_record = AlertActivation(
            name=alert_data.name,
            project=alert_data.project,
            activation_time=event_data.timestamp,
            entity_id=entity_id,
            entity_kind=alert_data.entities.kind,
            event_kind=event_data.kind,
            severity=alert_data.severity,
            number_of_events=alert_data.criteria.count,
            data=extra_data,
        )

        # for auto reset policy reset_time is the same as the activation time
        # for manual reset policy, we keep it empty until the alert is reset
        if alert_data.reset_policy == mlrun.common.schemas.alert.ResetPolicy.AUTO:
            alert_activation_record.reset_time = alert_activation_record.activation_time

        # we need to keep id to be able to update number_of_events for manual reset policy
        # and to update notification state when notification is sent
        # NOTE: activation time is truncated to milliseconds when being saved to the database as we use mysql.DATETIME
        # example: 2024-12-18T16:06:09.083606+00:00 will be saved as 2024-12-18T16:06:09.084000+00:00
        return self._upsert_object_and_flush_to_get_field(
            session, alert_activation_record, "id"
        )

    def update_alert_activation(
        self,
        session,
        activation_id: int,
        activation_time: datetime,
        number_of_events: Optional[int] = None,
        notifications_states: Optional[
            list[mlrun.common.schemas.NotificationState]
        ] = None,
        update_reset_time: bool = False,
    ):
        query = self._query(
            session, AlertActivation, id=activation_id, activation_time=activation_time
        )
        activation = query.one_or_none()
        if not activation:
            # if the activation is not found, we try to find it again by id only
            # (in case something happened with activation time)
            # usually it won't really happen, but just stay in safe side
            query = self._query(session, AlertActivation, id=activation_id)
            activation = query.one_or_none()
            if not activation:
                raise mlrun.errors.MLRunNotFoundError(
                    f"Alert activation not found for id: {activation_id}"
                )
        if number_of_events:
            activation.number_of_events = number_of_events
        if update_reset_time:
            activation.reset_time = mlrun.utils.now_date()
        if notifications_states:
            data = activation.data or {}
            data["notifications"] = [
                notification.dict() for notification in notifications_states
            ]
            activation.data = data
            # is needed for proper update of JSON field
            flag_modified(activation, "data")
        self._upsert(session, [activation])

    def list_alert_activations(
        self,
        session: Session,
        projects_with_creation_time: list[tuple[str, datetime]],
        name: typing.Optional[str] = None,
        since: Optional[datetime] = None,
        until: Optional[datetime] = None,
        entity: typing.Optional[str] = None,
        severity: Optional[
            list[Union[mlrun.common.schemas.alert.AlertSeverity, str]]
        ] = None,
        entity_kind: Optional[
            Union[mlrun.common.schemas.alert.EventEntityKind, str]
        ] = None,
        event_kind: Optional[Union[mlrun.common.schemas.alert.EventKind, str]] = None,
        offset: typing.Optional[int] = None,
        limit: typing.Optional[int] = None,
    ) -> list[mlrun.common.schemas.AlertActivation]:
        query = self._query(session, AlertActivation)

        # Filter alert activations for the project created after the project creation date,
        # excluding activations linked to any previous instances of the project.
        # TODO: reconsider this approach when we move alerts out of main MLRun db
        query = self._apply_alert_activation_project_filters(
            query, projects_with_creation_time
        )

        if name:
            query = query.filter(
                generate_query_predicate_for_name(AlertActivation.name, name)
            )

        if since or until:
            query = generate_time_range_query(
                query=query,
                field=AlertActivation.activation_time,
                since=since,
                until=until,
            )
        if entity:
            query = query.filter(
                generate_query_for_name_with_wildcard(AlertActivation.entity_id, entity)
            )
        if severity:
            query = query.filter(AlertActivation.severity.in_(severity))

        if event_kind:
            query = query.filter(AlertActivation.event_kind == event_kind)

        if entity_kind:
            query = query.filter(AlertActivation.entity_kind == entity_kind)

        # If the activation_time fields are the same, we need a secondary field to sort by.
        query = query.order_by(
            AlertActivation.activation_time.desc(), AlertActivation.id.desc()
        )
        query = self._paginate_query(query, offset, limit)
        return [
            self._transform_alert_activation_record_to_scheme(record)
            for record in query.all()
        ]

    def get_alert_activation(
        self,
        session,
        activation_id: int,
    ) -> mlrun.common.schemas.AlertActivation:
        alert_activation_record = (
            self._query(session, AlertActivation)
            .filter(AlertActivation.id == activation_id)
            .one_or_none()
        )
        if not alert_activation_record:
            raise mlrun.errors.MLRunNotFoundError(
                f"Alert activation not found: activation_id={activation_id}"
            )
        return self._transform_alert_activation_record_to_scheme(
            alert_activation_record
        )

    @staticmethod
    def _transform_alert_activation_record_to_scheme(
        alert_activation_record: typing.Optional[AlertActivation],
    ) -> typing.Optional[mlrun.common.schemas.AlertActivation]:
        if alert_activation_record is None:
            return None

        return mlrun.common.schemas.AlertActivation(
            id=alert_activation_record.id,
            name=alert_activation_record.name,
            project=alert_activation_record.project,
            severity=alert_activation_record.severity,
            # the activation_time is already stored in UTC in the database as a naive datetime.
            # we explicitly set the timezone to UTC here to make it timezone-aware, avoiding any ambiguity.
            activation_time=alert_activation_record.activation_time.replace(tzinfo=UTC),
            entity_id=alert_activation_record.entity_id,
            entity_kind=alert_activation_record.entity_kind,
            event_kind=alert_activation_record.event_kind,
            number_of_events=alert_activation_record.number_of_events,
            notifications=alert_activation_record.data.get("notifications", []),
            criteria=alert_activation_record.data.get("criteria"),
            # the reset_time is already stored in UTC (if not None) in the database as a naive datetime.
            # we explicitly set the timezone to UTC here to make it timezone-aware, avoiding any ambiguity.
            reset_time=alert_activation_record.reset_time.replace(tzinfo=UTC)
            if alert_activation_record.reset_time
            else None,
        )

    # ---- Background Tasks ----

    @retry_on_conflict
    def store_background_task(
        self,
        session,
        name: str,
        project: str,
        state: str = mlrun.common.schemas.BackgroundTaskState.running,
        timeout: typing.Optional[int] = None,
        error: typing.Optional[str] = None,
        labels: Optional[dict[str, str]] = None,
    ):
        error = framework.db.sqldb.helpers.ensure_max_length(error)
        background_task_record = self._query(
            session,
            BackgroundTask,
            name=name,
            project=project,
        ).one_or_none()
        now = mlrun.utils.now_date()
        if background_task_record:
            # we don't want to be able to change state after it reached terminal state
            if (
                background_task_record.state
                in mlrun.common.schemas.BackgroundTaskState.terminal_states()
                and state != background_task_record.state
            ):
                raise mlrun.errors.MLRunRuntimeError(
                    "Background task already reached terminal state, can not change to another state. Failing"
                )

            if timeout and mlrun.mlconf.background_tasks.timeout_mode == "enabled":
                background_task_record.timeout = int(timeout)
            background_task_record.state = state
            background_task_record.error = error
            background_task_record.updated = now
        else:
            if mlrun.mlconf.background_tasks.timeout_mode == "disabled":
                timeout = None

            background_task_record = BackgroundTask(
                name=name,
                project=project,
                state=state,
                created=now,
                updated=now,
                timeout=int(timeout) if timeout else None,
                error=error,
            )
            if labels:
                for label_name, label_value in labels.items():
                    background_task_record.labels.append(
                        BackgroundTaskLabel(
                            name=label_name,
                            value=label_value,
                            project=project,
                        )
                    )
            session.add(background_task_record)
        self._upsert(session, [background_task_record])

    def get_background_task(
        self,
        session: Session,
        name: str,
        project: str,
        background_task_exceeded_timeout_func,
    ) -> mlrun.common.schemas.BackgroundTask:
        background_task_record = self._get_background_task_record(
            session, name, project
        )
        background_task_record = self._apply_background_task_timeout(
            session,
            background_task_exceeded_timeout_func,
            background_task_record,
        )

        return self._transform_background_task_record_to_schema(background_task_record)

    def get_background_task_by_state_and_labels(
        self,
        session: Session,
        status: mlrun.common.schemas.BackgroundTaskState,
        labels: dict[str, str],
    ) -> Optional[mlrun.common.schemas.BackgroundTask]:
        if not labels:
            raise mlrun.errors.MLRunInvalidArgumentError("Labels must not be empty")

        query = (
            session.query(BackgroundTask)
            .filter(BackgroundTask.state == status)
            .join(BackgroundTaskLabel)
            .filter(
                tuple_(BackgroundTaskLabel.name, BackgroundTaskLabel.value).in_(
                    labels.items()
                )
            )
            .group_by(BackgroundTask.id)
            .having(func.count() == len(labels))
        )

        background_task = query.one_or_none()
        if background_task is None:
            return None
        return self._transform_background_task_record_to_schema(background_task)

    def list_background_tasks(
        self,
        session,
        project: str,
        background_task_exceeded_timeout_func,
        states: typing.Optional[list[str]] = None,
        created_from: typing.Optional[datetime] = None,
        created_to: typing.Optional[datetime] = None,
        last_update_time_from: typing.Optional[datetime] = None,
        last_update_time_to: typing.Optional[datetime] = None,
    ) -> list[mlrun.common.schemas.BackgroundTask]:
        background_tasks = []
        query = self._list_project_background_tasks(session, project)
        if states is not None:
            query = query.filter(BackgroundTask.state.in_(states))
        if created_from is not None:
            query = query.filter(BackgroundTask.created >= created_from)
        if created_to is not None:
            query = query.filter(BackgroundTask.created <= created_to)
        if last_update_time_from is not None:
            query = query.filter(BackgroundTask.updated >= last_update_time_from)
        if last_update_time_to is not None:
            query = query.filter(BackgroundTask.updated <= last_update_time_to)

        background_task_records = query.all()
        for background_task_record in background_task_records:
            background_task_record = self._apply_background_task_timeout(
                session,
                background_task_exceeded_timeout_func,
                background_task_record,
            )

            # retest state after applying timeout
            if states and background_task_record.state not in states:
                continue

            background_tasks.append(
                self._transform_background_task_record_to_schema(background_task_record)
            )

        return background_tasks

    def cleanup_old_background_tasks(
        self,
        db_session: Session,
        max_age_seconds: int,
    ) -> None:
        cutoff_time = mlrun.utils.now_date() - timedelta(seconds=max_age_seconds)
        deleted_count = (
            db_session.query(BackgroundTask)
            .filter(BackgroundTask.created < cutoff_time)
            .delete()
        )
        logger.info("Deleted old background tasks", count=deleted_count)
        db_session.commit()

    def delete_background_task(self, session: Session, name: str, project: str):
        self._delete(session, BackgroundTask, name=name, project=project)

    def _apply_background_task_timeout(
        self,
        session: Session,
        background_task_exceeded_timeout_func: typing.Callable,
        background_task_record: BackgroundTask,
    ):
        if (
            background_task_exceeded_timeout_func
            and background_task_exceeded_timeout_func(
                background_task_record.updated,
                background_task_record.timeout,
                background_task_record.state,
            )
        ):
            # lazy update of state, only if get background task was requested and the timeout for the update passed
            # and the task still in progress then we change to failed
            self.store_background_task(
                session,
                background_task_record.name,
                background_task_record.project,
                mlrun.common.schemas.background_task.BackgroundTaskState.failed,
            )
            background_task_record = self._get_background_task_record(
                session, background_task_record.name, background_task_record.project
            )
        return background_task_record

    @staticmethod
    def _transform_background_task_record_to_schema(
        background_task_record: BackgroundTask,
    ) -> mlrun.common.schemas.BackgroundTask:
        return mlrun.common.schemas.BackgroundTask(
            metadata=mlrun.common.schemas.BackgroundTaskMetadata(
                id=background_task_record.id,
                name=background_task_record.name,
                project=background_task_record.project,
                created=background_task_record.created,
                updated=background_task_record.updated,
                timeout=background_task_record.timeout,
            ),
            spec=mlrun.common.schemas.BackgroundTaskSpec(),
            status=mlrun.common.schemas.BackgroundTaskStatus(
                state=background_task_record.state,
                error=background_task_record.error,
            ),
        )

    def _list_project_background_task_names(
        self, session: Session, project: str
    ) -> list[str]:
        return [
            name
            for (name,) in self._query(
                session, distinct(BackgroundTask.name), project=project
            ).all()
        ]

    def _list_project_background_tasks(self, session: Session, project: str):
        return self._query(session, BackgroundTask, project=project)

    def _delete_project_background_tasks(self, session: Session, project: str):
        logger.debug("Removing project background tasks from db", project=project)
        self._delete_multi_objects(
            session=session,
            main_table=BackgroundTask,
            project=project,
        )

    def _get_background_task_record(
        self,
        session: Session,
        name: str,
        project: str,
        raise_on_not_found: bool = True,
    ) -> typing.Optional[BackgroundTask]:
        background_task_record = self._query(
            session, BackgroundTask, name=name, project=project
        ).one_or_none()
        if not background_task_record:
            if not raise_on_not_found:
                return None
            raise mlrun.errors.MLRunNotFoundError(
                f"Background task not found: name={name}, project={project}"
            )
        return background_task_record

    # ---- Run Notifications ----
    def store_run_notifications(
        self,
        session,
        notification_objects: list[mlrun.model.Notification],
        run_uid: str,
        project: str,
    ):
        # iteration is 0, as we don't support multiple notifications per hyper param run, only for the whole run
        run = self._get_run(session, run_uid, project, 0)
        if not run:
            raise mlrun.errors.MLRunNotFoundError(
                f"Run not found: uid={run_uid}, project={project}"
            )

        self._store_notifications(session, Run, notification_objects, run.id, project)

    def store_alert_notifications(
        self,
        session,
        notification_objects: list[mlrun.model.Notification],
        alert_id: str,
        project: str,
    ):
        if self._get_alert_record_by_id(session, alert_id):
            self._store_notifications(
                session, AlertConfig, notification_objects, alert_id, project
            )
        else:
            raise mlrun.errors.MLRunNotFoundError(
                f"Alert not found: uid={alert_id}, project={project}"
            )

    def _store_notifications(
        self,
        session,
        cls,
        notification_objects: list[mlrun.model.Notification],
        parent_id: Union[str, int],
        project: str,
    ):
        db_notifications = {
            notification.name: notification
            for notification in self._get_db_notifications(
                session, cls, parent_id=parent_id
            )
        }
        notifications = []
        for notification_model in notification_objects:
            new_notification = False
            notification = db_notifications.get(notification_model.name, None)
            if not notification:
                new_notification = True
                notification = cls.Notification(
                    name=notification_model.name, parent_id=parent_id, project=project
                )

            notification.kind = notification_model.kind
            notification.message = notification_model.message or ""
            notification.severity = (
                notification_model.severity
                or mlrun.common.schemas.NotificationSeverity.INFO
            )
            notification.when = ",".join(notification_model.when or [])
            notification.condition = notification_model.condition or ""
            notification.secret_params = notification_model.secret_params
            notification.params = notification_model.params
            notification.status = (
                notification_model.status
                or mlrun.common.schemas.NotificationStatus.PENDING
            )
            notification.sent_time = notification_model.sent_time
            notification.reason = notification_model.reason

            logger.debug(
                f"Storing {'new' if new_notification else 'existing'} notification",
                notification_name=notification.name,
                notification_status=notification.status,
                parent_id=parent_id,
                project=project,
            )
            notifications.append(notification)

        self._upsert(session, notifications)

    def list_run_notifications(
        self,
        session,
        run_uid: str,
        project: str = "",
    ) -> list[mlrun.model.Notification]:
        # iteration is 0, as we don't support multiple notifications per hyper param run, only for the whole run
        run = self._get_run(session, run_uid, project, 0)
        if not run:
            return []

        return [
            self._transform_notification_record_to_schema(notification)
            for notification in self._query(
                session, Run.Notification, parent_id=run.id
            ).all()
        ]

    def delete_run_notifications(
        self,
        session,
        project: str,
        name: typing.Optional[str] = None,
        run_uid: typing.Optional[str] = None,
        commit: bool = True,
    ):
        if not project:
            raise mlrun.errors.MLRunMissingProjectError()
        run_id = None
        if run_uid:
            # iteration is 0, as we don't support multiple notifications per hyper param run, only for the whole run
            run = self._get_run(session, run_uid, project, 0)
            if not run:
                raise mlrun.errors.MLRunNotFoundError(
                    f"Run not found: uid={run_uid}, project={project}"
                )
            run_id = run.id

        # TODO: add project permissions handling like in the list methods
        if project == "*":
            project = None

        query = self._get_db_notifications(session, Run, name, run_id, project)
        for notification in query:
            session.delete(notification)

        if commit:
            session.commit()

    def set_run_notifications(
        self,
        session: Session,
        project: str,
        notifications: list[mlrun.model.Notification],
        identifier: mlrun.common.schemas.RunIdentifier,
        **kwargs,
    ):
        """
        Set notifications for a run. This will replace any existing notifications.
        :param session: SQLAlchemy session
        :param project: Project name
        :param notifications: List of notifications to set
        :param identifier: Run identifier
        :param kwargs: Ignored additional arguments (for interfacing purposes)
        """
        run = self._get_run(session, identifier.uid, project, None)
        if not run:
            raise mlrun.errors.MLRunNotFoundError(
                f"Run not found: project={project}, uid={identifier.uid}"
            )

        run.struct.setdefault("spec", {})["notifications"] = [
            notification.to_dict() for notification in notifications
        ]

        # update run, delete and store notifications all in one transaction.
        # using session.add instead of upsert, so we don't commit the run.
        # the commit will happen at the end (in store_run_notifications, or manually at the end).
        session.add(run)
        self.delete_run_notifications(
            session, run_uid=run.uid, project=project, commit=False
        )
        if notifications:
            self.store_run_notifications(
                session,
                notification_objects=notifications,
                run_uid=run.uid,
                project=project,
            )
        self._commit(session, [run], ignore=True)

    # ---- Data Store ----
    def store_datastore_profile(
        self, session, info: mlrun.common.schemas.DatastoreProfile
    ):
        """
        Create or replace a datastore profile.
        :param session: SQLAlchemy session
        :param info: datastore profile
        :returns: None
        """
        profile = self._query(
            session, DatastoreProfile, name=info.name, project=info.project
        ).one_or_none()
        if profile:
            profile.type = info.type
            profile.full_object = info.object
            self._commit(session, [profile])
        else:
            profile = DatastoreProfile(
                name=info.name,
                type=info.type,
                project=info.project,
                full_object=info.object,
            )
            self._upsert(session, [profile])

    def get_datastore_profile(
        self,
        session,
        profile: str,
        project: str,
    ):
        """
        get a datastore profile.
        :param session: SQLAlchemy session
        :param profile: name of the profile
        :param project: Name of the project
        :returns: None
        """
        res = self._query(session, DatastoreProfile, name=profile, project=project)
        if res.first():
            return self._transform_datastore_profile_model_to_schema(res.first())
        else:
            raise mlrun.errors.MLRunNotFoundError(
                f"Datastore profile '{profile}' not found in project '{project}'"
            )

    def delete_datastore_profile(
        self,
        session,
        profile: str,
        project: str,
    ):
        res = self._query(session, DatastoreProfile, name=profile, project=project)
        if res.first():
            session.delete(res.first())
            session.commit()
        else:
            raise mlrun.errors.MLRunNotFoundError(
                f"Datastore profile '{profile}' not found in project '{project}'"
            )

    def list_datastore_profiles(
        self,
        session,
        project: str,
    ):
        """
        list all datastore profiles for a project.
        :param session: SQLAlchemy session
        :param project: Name of the project
        :returns: List of DatatoreProfile objects (only the public portion of it)
        """
        datastore_records = self._query(session, DatastoreProfile, project=project)
        return [
            self._transform_datastore_profile_model_to_schema(datastore_record)
            for datastore_record in datastore_records
        ]

    def _delete_project_datastore_profiles(
        self,
        session,
        project: str,
    ):
        """
        Delete all datastore profiles.
        :param session: SQLAlchemy session
        :param project: Name of the project
        :returns: None
        """
        logger.debug("Removing project datastore profiles from db", project=project)
        self._delete_multi_objects(
            session=session,
            main_table=DatastoreProfile,
            project=project,
        )

    @staticmethod
    def _transform_datastore_profile_model_to_schema(
        db_object,
    ) -> mlrun.common.schemas.DatastoreProfile:
        return mlrun.common.schemas.DatastoreProfile(
            name=db_object.name,
            type=db_object.type,
            object=db_object.full_object,
            project=db_object.project,
        )

    # --- Pagination ---
    def store_paginated_query_cache_record(
        self,
        session,
        user: str,
        function: str,
        current_page: int,
        page_size: int,
        kwargs: dict,
        pagination_cache_record: typing.Optional[
            framework.db.sqldb.models.PaginationCache
        ] = None,
    ):
        self._validate_integer_max_value(
            PaginationCache.__table__.c.current_page, current_page
        )
        self._validate_integer_max_value(
            PaginationCache.__table__.c.page_size, page_size
        )

        # generate key hash from user, function, current_page and kwargs
        key = hashlib.sha256(
            f"{user}/{function}/{page_size}/{kwargs}".encode()
        ).hexdigest()
        if not pagination_cache_record:
            # in this case, we just lock for update to make sure no one else is writing to it
            pagination_cache_record = self.get_paginated_query_cache_record(
                session, key=key, for_update=True
            )
        if pagination_cache_record:
            pagination_cache_record.current_page = current_page
            pagination_cache_record.last_accessed = datetime.now(UTC)
            param_record = pagination_cache_record
        else:
            param_record = PaginationCache(
                key=key,
                user=user,
                function=function,
                current_page=current_page,
                page_size=page_size,
                kwargs=kwargs,
            )

        self._upsert(session, [param_record])
        return key

    def get_paginated_query_cache_record(
        self,
        session,
        key: str,
        for_update: bool = False,
    ) -> typing.Optional[PaginationCache]:
        query = self._query(session, PaginationCache, key=key)
        if for_update:
            query = query.populate_existing().with_for_update()
        return query.one_or_none()

    def list_paginated_query_cache_record(
        self,
        session,
        key: typing.Optional[str] = None,
        user: typing.Optional[str] = None,
        function: typing.Optional[str] = None,
        last_accessed_before: typing.Optional[datetime] = None,
        order_by: typing.Optional[mlrun.common.schemas.OrderType] = None,
        as_query: bool = False,
    ):
        query = self._query(session, PaginationCache)
        if key:
            query = query.filter(PaginationCache.key == key)
        if user:
            query = query.filter(PaginationCache.user == user)
        if function:
            query = query.filter(PaginationCache.function == function)
        if last_accessed_before:
            query = query.filter(PaginationCache.last_accessed < last_accessed_before)

        if order_by:
            query = query.order_by(
                order_by.to_order_by_predicate(PaginationCache.last_accessed)
            )

        if as_query:
            return query

        return query.all()

    def delete_paginated_query_cache_record(
        self,
        session,
        key: str,
    ):
        self._delete(session, PaginationCache, key=key)

    def store_time_window_tracker_record(
        self,
        session: Session,
        key: str,
        timestamp: typing.Optional[datetime] = None,
        max_window_size_seconds: typing.Optional[int] = None,
    ) -> TimeWindowTracker:
        time_window_tracker_record = self.get_time_window_tracker_record(
            session, key=key, raise_on_not_found=False
        )
        if not time_window_tracker_record:
            time_window_tracker_record = TimeWindowTracker(key=key)

        if timestamp:
            time_window_tracker_record.timestamp = timestamp
        if max_window_size_seconds:
            time_window_tracker_record.max_window_size_seconds = max_window_size_seconds

        self._upsert(session, [time_window_tracker_record])
        return time_window_tracker_record

    def get_time_window_tracker_record(
        self, session, key: str, raise_on_not_found: bool = True
    ) -> TimeWindowTracker:
        time_window_tracker_record = self._query(
            session, TimeWindowTracker, key=key
        ).one_or_none()
        if not time_window_tracker_record and raise_on_not_found:
            raise mlrun.errors.MLRunNotFoundError(
                f"Time window tracker record not found: key={key}"
            )
        return time_window_tracker_record

    def store_model_endpoints(
        self,
        session,
        model_endpoints: list[mlrun.common.schemas.ModelEndpoint],
        function_name: str,
        function_tag: str,
        project: str,
    ) -> None:
        meps = []
        function_record = self._get_mep_function(
            session=session,
            function_name=function_name,
            function_tag=function_tag,
            project=project,
        )
        if function_record is not None:
            obj_name_suffix = f"{function_record.name}-{function_tag}"
        else:
            obj_name_suffix = None
        for model_endpoint in model_endpoints:
            meps.append(
                self._create_mep_record_to_store(model_endpoint, function_record)
            )

        self._upsert_batch(session, meps)

        self.tag_objects_v2(
            session,
            meps,
            project,
            mlrun.common.constants.RESERVED_TAG_NAME_LATEST,
            obj_name_attribute=["name"],
            obj_name_suffix=obj_name_suffix,
        )

    def store_model_endpoint(
        self,
        session,
        model_endpoint: mlrun.common.schemas.ModelEndpoint,
    ) -> str:
        function_record = self._get_mep_function(
            session=session,
            function_name=model_endpoint.spec.function_name,
            function_tag=model_endpoint.spec.function_tag,
            project=model_endpoint.metadata.project,
        )
        if function_record is not None:
            obj_name_suffix = (
                f"{function_record.name}-{model_endpoint.spec.function_tag}"
            )
        else:
            obj_name_suffix = None
        mep = self._create_mep_record_to_store(model_endpoint, function_record)
        logger.debug(
            "Storing Model Endpoint Before upsert",
            metadata=model_endpoint.metadata,
        )
        self._upsert(session, [mep])
        self.tag_objects_v2(
            session,
            [mep],
            model_endpoint.metadata.project,
            mlrun.common.constants.RESERVED_TAG_NAME_LATEST,
            obj_name_attribute=["name"],
            obj_name_suffix=obj_name_suffix,
        )
        return mep.uid

    def _get_mep_function(
        self, session, function_name, function_tag, project
    ) -> Optional[Function]:
        """
        Extract the unversioned function record that matches the given name and tag,
        and return the function record.
        """
        function_tag = function_tag or mlrun.common.constants.RESERVED_TAG_NAME_LATEST
        try:
            function_record, _ = self._get_function_db_object(
                session,
                name=function_name,
                project=project,
                tag=function_tag,
            )
            return function_record
        except mlrun.errors.MLRunNotFoundError:
            try:
                function_record, _ = self._get_function_db_object(
                    session,
                    name=function_name,
                    project=project,
                    tag=function_tag,
                    hash_key=f"{unversioned_tagged_object_uid_prefix}{function_tag}",
                )
                return function_record
            except mlrun.errors.MLRunNotFoundError:
                return None

    @staticmethod
    def _create_mep_record_to_store(
        model_endpoint: mlrun.common.schemas.ModelEndpoint,
        function_record: Optional[Function] = None,
    ) -> ModelEndpoint:
        if not model_endpoint.metadata.name or not model_endpoint.metadata.project:
            raise mlrun.errors.MLRunInvalidArgumentError(
                "Model endpoint name and project must be provided"
            )
        logger.debug(
            "Storing Model Endpoint to DB",
            metadata=model_endpoint.metadata,
        )
        current_time = datetime.now(UTC)
        mep = ModelEndpoint(
            uid=model_endpoint.metadata.uid if model_endpoint.metadata.uid else None,
            name=model_endpoint.metadata.name,
            project=model_endpoint.metadata.project,
            function_id=function_record.id if function_record else None,
            model_id=model_endpoint.spec._model_id or None,
            endpoint_type=model_endpoint.metadata.endpoint_type.value,
            mode=model_endpoint.metadata.mode.value,
            created=current_time,
            updated=current_time,
        )

        update_labels(mep, model_endpoint.metadata.labels)
        mep.struct = model_endpoint.flat_dict()

        return mep

    def get_model_endpoint(
        self,
        session,
        project: str,
        name: str,
        function_name: Optional[str] = None,
        function_tag: typing.Optional[str] = None,
        uid: typing.Optional[str] = None,
    ) -> mlrun.common.schemas.ModelEndpoint:
        mep_record = self._get_model_endpoint(
            session, project, name, function_name, function_tag, uid
        )
        if not mep_record:
            raise mlrun.errors.MLRunNotFoundError(
                f"Model Endpoint not found in project {project} with name {name} under function {function_name}"
            )
        return self._transform_model_endpoint_model_to_schema(mep_record)

    def update_model_endpoints(
        self,
        session,
        project: str,
        attributes: dict[str, dict[str, Any]],
    ) -> None:
        model_endpoint_records: list[ModelEndpoint] = []
        uids = list(attributes.keys())
        updated = datetime.now(UTC)
        for mep_record in self._find_model_endpoints(
            session=session,
            uids=uids,
            project=project,
        ):
            model_endpoint_records.append(
                self._update_mep_record(
                    session, mep_record, attributes.get(mep_record.uid, {}), updated
                )
            )
        self._upsert_batch(session, model_endpoint_records)

    def update_model_endpoint(
        self,
        session,
        project: str,
        name: str,
        attributes: dict,
        function_name: Optional[str] = None,
        function_tag: typing.Optional[str] = None,
        uid: typing.Optional[str] = None,
    ) -> str:
        mep_record = self._get_model_endpoint(
            session, project, name, function_name, function_tag, uid
        )
        if mep_record:
            updated = datetime.now(UTC)
            mep_record = self._update_mep_record(
                session, mep_record, attributes, updated
            )
            self._upsert(session, [mep_record])
            return mep_record.uid
        else:
            raise mlrun.errors.MLRunNotFoundError(
                f"Model Endpoint not found in project {project} with name {name} under function {function_name}"
            )

    def _update_mep_record(
        self, session, mep_record: ModelEndpoint, attributes: dict, updated: datetime
    ) -> ModelEndpoint:
        attributes, schema_attr, labels, model_path = self._split_mep_update_attr(
            attributes
        )
        struct = mep_record.struct
        for key, val in attributes.items():
            update_in(struct, key, val)
        for key, val in schema_attr.items():
            setattr(mep_record, key, val)
            update_in(struct, key, val)
        if labels is not None and isinstance(labels, dict):
            update_labels(mep_record, labels)
            update_in(struct, "labels", labels)
        if model_path is not None:
            self._update_model_link(session, mep_record, model_path)
        mep_record.struct = struct
        mep_record.updated = updated
        return mep_record

    def _split_mep_update_attr(self, attributes: dict):
        if "labels" in attributes:
            # labels can be None, so if labels key exists, return {} and override existing labels.
            labels = attributes.pop(ModelEndpointSchema.LABELS) or {}
        else:
            labels = None
        model_path = attributes.pop(ModelEndpointSchema.MODEL_PATH, "")
        schema_attr = {}
        for key in list(attributes.keys()):
            if hasattr(ModelEndpoint, key):
                schema_attr[key] = attributes.pop(key)

        return attributes, schema_attr, labels, model_path

    def list_model_endpoints(
        self,
        session,
        project: str,
        names: typing.Optional[list[str]] = None,
        function_name: typing.Optional[str] = None,
        function_tag: typing.Optional[str] = None,
        model_name: typing.Optional[str] = None,
        model_tag: typing.Optional[str] = None,
        top_level: typing.Optional[bool] = None,
        modes: typing.Optional[list[mlrun.common.schemas.EndpointMode]] = None,
        labels: typing.Optional[list[str]] = None,
        start: typing.Optional[datetime] = None,
        end: typing.Optional[datetime] = None,
        uids: typing.Optional[list[str]] = None,
        latest_only: bool = False,
        offset: typing.Optional[int] = None,
        limit: typing.Optional[int] = None,
        order_by: typing.Optional[str] = None,
        as_dict: bool = False,
    ) -> Union[mlrun.common.schemas.ModelEndpointList, dict[str, str]]:
        if not as_dict:
            model_endpoints: mlrun.common.schemas.ModelEndpointList = (
                mlrun.common.schemas.ModelEndpointList(endpoints=[])
            )
        else:
            model_endpoints: dict[str, str] = {}
        for mep_record in self._find_model_endpoints(
            session=session,
            names=names,
            project=project,
            labels=labels,
            function_name=function_name,
            function_tag=function_tag,
            model_name=model_name,
            model_tag=model_tag,
            top_level=top_level,
            modes=modes,
            start=start,
            end=end,
            uids=uids,
            latest_only=latest_only,
            offset=offset,
            limit=limit,
            order_by=order_by,
        ):
            if not as_dict:
                model_endpoints.endpoints.append(
                    self._transform_model_endpoint_model_to_schema(mep_record)
                )
            else:
                model_endpoints[
                    (
                        f"{mep_record.project}-{mep_record.function.name}-"
                        f"{self._get_obj_tag_prioritizing_user_tag(mep_record.function.tags)}-{mep_record.name}"
                    )
                ] = mep_record.uid
        return model_endpoints

    def delete_model_endpoint(
        self,
        session,
        project: str,
        name: str,
        function_name: Optional[str] = None,
        function_tag: typing.Optional[str] = None,
        uid: typing.Optional[str] = None,
    ) -> None:
        self._check_model_endpoint_params(uid, function_name, function_tag)
        logger.debug(
            "Removing model endpoint from db", project=project, name=name, uid=uid
        )

        if uid != "*":
            self._delete(
                session,
                ModelEndpoint,
                project=project,
                name=name,
                uid=uid,
            )
        else:
            query = self._get_mep_instances(
                session,
                cls=ModelEndpoint,
                project=project,
                name=name,
                function_name=function_name,
                function_tag=function_tag,
                _get_query=True,
            )
            self._delete(
                session,
                ModelEndpoint,
                query=query,
                project=project,
                name=name,
                function_name=function_name,
                function_tag=function_tag,
            )

    def delete_model_endpoints(
        self,
        session: Session,
        project: str,
        uids: typing.Optional[list[str]] = None,
    ) -> None:
        logger.debug("Removing model endpoints from db", project=project)

        self._delete_multi_objects(
            session=session,
            main_table=ModelEndpoint,
            related_tables=[ModelEndpoint.Tag, ModelEndpoint.Label],
            project=project,
            main_table_identifier=ModelEndpoint.uid if uids else None,
            main_table_identifier_values=uids,
        )

    def delete_feature_sets(
        self,
        session: Session,
        project: str,
        uids: typing.Optional[list[str]] = None,
    ) -> None:
        logger.debug("Removing feature sets from db", project=project)

        self._delete_multi_objects(
            session=session,
            main_table=FeatureSet,
            related_tables=[FeatureSet.Tag, FeatureSet.Label],
            project=project,
            main_table_identifier=FeatureSet.uid if uids else None,
            main_table_identifier_values=uids,
        )

    def get_system_id(self, session: Session) -> typing.Optional[str]:
        system_id_record = (
            self._query(session, SystemMetadata)
            .filter(SystemMetadata.key == framework.constants.SYSTEM_ID_KEY)
            .one_or_none()
        )
        return system_id_record.value if system_id_record else None

    def store_system_id(self, session: Session, system_id: str):
        logger.debug("Storing a new system id in DB", system_id=system_id)

        system_id_record = SystemMetadata(
            key=framework.constants.SYSTEM_ID_KEY, value=system_id
        )
        self._upsert(session, [system_id_record])

    # ---- Utils ----
    def delete_table_records(
        self,
        session: Session,
        table: type[Base],
        raise_on_not_exists=True,
    ):
        """Delete all records from a table

        :param session: SQLAlchemy session
        :param table: the table class
        :param raise_on_not_exists: raise an error if the table does not exist
        """
        return self.delete_table_records_by_name(
            session, table.__tablename__, raise_on_not_exists
        )

    def delete_table_records_by_name(
        self,
        session: Session,
        table_name: str,
        raise_on_not_exists=True,
    ):
        """
        Delete a table by its name

        :param session: SQLAlchemy session
        :param table_name: table name
        :param raise_on_not_exists: raise an error if the table does not exist
        """

        # sanitize table name to prevent SQL injection, by removing all non-alphanumeric characters or underscores
        sanitized_table_name = re.sub(r"[^a-zA-Z0-9_]", "", table_name)

        # checking if the table exists can also help prevent SQL injection
        if self._is_table_exists(session, sanitized_table_name):
            truncate_statement = text(f"DELETE FROM {sanitized_table_name}")
            session.execute(truncate_statement)
            session.commit()
            return

        if raise_on_not_exists:
            raise mlrun.errors.MLRunNotFoundError(
                f"Table not found: {sanitized_table_name}"
            )
        logger.warning(
            "Table not found, skipping delete",
            table_name=sanitized_table_name,
        )

    @staticmethod
    def _is_table_exists(session: Session, table_name: str) -> bool:
        """
        Check if a table exists

        :param table_name: table name
        :return: True if the table exists, False otherwise
        """
        inspector = sqlalchemy.inspect(
            subject=session.bind,
        )
        return inspector.has_table(
            table_name=table_name,
        )

    @staticmethod
    def _paginate_query(
        query, offset: typing.Optional[int] = None, limit: typing.Optional[int] = None
    ):
        if offset:
            query = query.offset(offset)

        if limit == 0:
            raise mlrun.errors.MLRunInvalidArgumentError("Limit cannot be 0")
        elif limit:
            query = query.limit(limit)

        return query

    @staticmethod
    def _validate_integer_max_value(column: Column, value: int):
        """
        Validate that the value of a column does not exceed the max allowed integer value for that column's type.

        :param column: The SQLAlchemy column to check (e.g., PaginationCache.__table__.c.current_page).
        :param value: The value to validate.
        :raises: MLRunInvalidArgumentError if value exceeds the max allowed integer value for the column's type.
        """
        if isinstance(column.type, sqlalchemy.Integer):
            # Validate against 32-bit max
            if value > MAX_INT_32:
                raise mlrun.errors.MLRunInvalidArgumentError(
                    f"The '{column.name}' field value must be less than or equal to {MAX_INT_32}."
                )

        elif isinstance(column.type, sqlalchemy.BigInteger):
            # Validate against 64-bit max
            if value > MAX_INT_64:
                raise mlrun.errors.MLRunInvalidArgumentError(
                    f"The '{column.name}' field value must be less than or equal to {MAX_INT_64}."
                )

        else:
            raise mlrun.errors.MLRunInvalidArgumentError(
                f"Unsupported column type '{column.type}' for validation."
            )

    def _update_model_link(self, session, mep_record: ModelEndpoint, model_path: str):
        if mlrun.datastore.is_store_uri(model_path):
            _, model_uri = mlrun.datastore.parse_store_uri(model_path)
            project, key, iteration, tag, tree, uid = parse_artifact_uri(
                model_uri, mep_record.project
            )
            db_artifact = self.read_artifact(
                session=session,
                key=key,
                tag=tag,
                iter=iteration,
                project=project,
                producer_id=tree,
                uid=uid,
                as_record=True,
            )
            mep_record.model_id = db_artifact.id

    def update_db_object(self, session, model, filters=None, **fields):
        """Helper function to update fields of a database object and commit the changes."""
        query = self._query(session, model)

        # Apply filters if provided
        if filters:
            query = query.filter_by(**filters)

        db_object = query.one_or_none()

        if not db_object:
            raise ValueError(f"No record found for model {model.__name__}")

        for field, value in fields.items():
            setattr(db_object, field, value)

        session.add(db_object)
        self._commit(session, db_object)
        session.flush()

    def _ensure_datetime_obj(self, date: typing.Union[str, datetime]) -> datetime:
        """
        Ensure the input date is a datetime object. If it's a string, try to parse it as ISO 8601.
        """
        if isinstance(date, str):
            try:
                date = datetime.fromisoformat(date)

            except ValueError:
                raise mlrun.errors.MLRunInvalidArgumentError(
                    f"Invalid date format: {date}. Expected ISO 8601 format."
                )
        elif not isinstance(date, datetime):
            raise mlrun.errors.MLRunInvalidArgumentError(
                f"Invalid date type: {type(date)}. Expected str or datetime."
            )
        return date


class MySQLDB(SQLDB):
    @staticmethod
    def create_partitions(
        session: Session,
        table_name: str,
        partitioning_information_list: list[tuple[str, str]],
    ):
        """
        Add RANGE partitions to a MySQL table.

        * Duplicate name with different bound → ValueError.
        * Duplicate name with same bound     → ignored.
        * New upper bounds must be strictly greater than every existing bound.
        """
        engine = session.get_bind()
        preparer = IdentifierPreparer(engine.dialect)
        quoted_table = preparer.quote(table_name)

        # existing_partitions = {partition_name: upper_bound_int}
        existing_partitions = MySQLDB._get_partition_metadata(session, table_name)
        highest_existing_bound = max(existing_partitions.values(), default=-1)
        literal_int = types.Integer().literal_processor(engine.dialect)

        sql_fragments: list[tuple[int, str]] = []  # (upper_bound_int, "…SQL…")

        for partition_name, upper_bound_text in partitioning_information_list:
            upper_bound_int = int(upper_bound_text)

            # — duplicate-name checks -------------------------------------------------
            if partition_name in existing_partitions:
                if existing_partitions[partition_name] != upper_bound_int:
                    raise ValueError(
                        f"Partition {partition_name} already exists with a different "
                        f"bound: {existing_partitions[partition_name]} vs {upper_bound_int}"
                    )
                continue  # same bound → nothing to add

            # — ascending-order rule --------------------------------------------------
            if upper_bound_int <= highest_existing_bound:
                continue  # not strictly higher → skip

            sql_fragment = (
                f"PARTITION `{partition_name}` "
                f"VALUES LESS THAN ({literal_int(upper_bound_int)})"
            )
            sql_fragments.append((upper_bound_int, sql_fragment))
            highest_existing_bound = upper_bound_int  # advance cursor

        if not sql_fragments:
            return

        # apply in ascending bound order
        sql_fragments.sort(key=lambda pair: pair[0])
        alter_clause = ", ".join(fragment for _, fragment in sql_fragments)

        logger.info(
            "Creating new MySQL partitions",
            table_name=table_name,
            partitions_sql=alter_clause,
        )
        session.execute(
            text(f"ALTER TABLE {quoted_table} ADD PARTITION ({alter_clause})")
        )

    @staticmethod
    def drop_partitions(
        session: Session,
        table_name: str,
        cutoff_partition_name: str,
    ):
        """
        Execute the drop operation for partitions older than the cutoff partition name.

        :param session: SQLAlchemy session.
        :param table_name: The name of the table with partitions.
        :param cutoff_partition_name: The cutoff partition name for dropping old partitions.
        """
        engine = session.get_bind()
        preparer = IdentifierPreparer(engine.dialect)
        safe_table = preparer.quote(table_name)

        names = MySQLDB._get_partitions_older_than(
            session=session,
            table_name=table_name,
            cutoff_partition_name=cutoff_partition_name,
        )
        if not names:
            return

        parts = ", ".join(f"`{name}`" for name in names)
        logger.debug(
            "Dropping partitions for table",
            table_name=table_name,
            parts=parts,
        )
        session.execute(text(f"ALTER TABLE {safe_table} DROP PARTITION {parts}"))

    @staticmethod
    def get_partition_expression_for_table(
        session: Session,
        table_name: str,
    ) -> str:
        """
        Returns partitioning expression for a given table.

        :param session: SQLAlchemy session.
        :param table_name: Name of the table.
        """
        return session.execute(
            text("""
                SELECT PARTITION_EXPRESSION
                FROM information_schema.PARTITIONS
                WHERE TABLE_NAME = :table_name
            """),
            {"table_name": table_name},
        ).scalar()

    @staticmethod
    def table_exists(
        session: Session,
        table_name: str,
    ) -> bool:
        """
        Checks if a table exists in the current database schema.

        :param session: SQLAlchemy session.
        :param table_name: Name of the table to check.
        """
        result = session.execute(
            text("""
                SELECT COUNT(*)
                FROM information_schema.tables
                WHERE TABLE_NAME = :table_name
            """),
            {
                "table_name": table_name,
            },
        ).scalar()
        return result > 0

    @staticmethod
    def _get_partitions_older_than(
        session: Session,
        table_name: str,
        cutoff_partition_name: str,
    ) -> list[str]:
        """
        Return names of MySQL partitions whose name is lexicographically
        less than *cutoff_partition_name*.
        """
        rows = session.execute(
            text(
                """
                SELECT PARTITION_NAME
                FROM INFORMATION_SCHEMA.PARTITIONS
                WHERE TABLE_SCHEMA = DATABASE()
                  AND TABLE_NAME = :table_name
                  AND PARTITION_NAME < :cutoff
                  AND PARTITION_NAME IS NOT NULL
                """
            ),
            {"table_name": table_name, "cutoff": cutoff_partition_name},
        ).fetchall()

        return [row[0] for row in rows]

    @staticmethod
    def _get_partition_metadata(
        session: Session,
        table_name: str,
    ) -> dict[str, int]:
        """
        Return a dict {partition_name: upper_bound_int}, skipping NULL bounds.

        Mirrors:
        SELECT PARTITION_NAME, PARTITION_DESCRIPTION
        FROM   INFORMATION_SCHEMA.PARTITIONS
        WHERE  TABLE_SCHEMA = DATABASE() AND TABLE_NAME = <table_name>
          AND  PARTITION_NAME IS NOT NULL
        """
        rows = session.execute(
            text(
                """
                SELECT PARTITION_NAME, PARTITION_DESCRIPTION
                FROM INFORMATION_SCHEMA.PARTITIONS
                WHERE TABLE_SCHEMA = DATABASE()
                  AND TABLE_NAME = :table_name
                  AND PARTITION_NAME IS NOT NULL
                """
            ),
            {"table_name": table_name},
        ).fetchall()

        return {name: int(bound) for name, bound in rows if bound is not None}


class PostgreSQLDB(SQLDB):
    @staticmethod
    def create_partitions(
        session: Session,
        table_name: str,
        partitioning_information_list: list[tuple[str, str]],
    ):
        """
        Add RANGE partitions to a PostgreSQL table (single-column integer key).

        * Duplicate name with different bound → ValueError.
        * Duplicate name with same bound     → ignored.
        * New upper bounds must be strictly greater than every existing bound.
        * The lower bound of each new partition is taken to be the current
          highest existing upper bound (i.e. partitions are assumed contiguous).
        """
        engine = session.get_bind()
        preparer = IdentifierPreparer(engine.dialect)
        quoted_table = preparer.quote(table_name)

        existing_partitions = PostgreSQLDB._get_partition_metadata(session, table_name)
        highest_existing_bound = max(existing_partitions.values(), default=-1)
        literal_int = types.Integer().literal_processor(engine.dialect)

        ddl_fragments = []  # (upper_bound_int, DDL)

        for partition_name, upper_bound_text in partitioning_information_list:
            upper_bound_int = int(upper_bound_text)

            # ---- duplicate-name handling ----------------------------------------
            if partition_name in existing_partitions:
                if existing_partitions[partition_name] != upper_bound_int:
                    raise ValueError(
                        f"Partition {partition_name} already exists with a different "
                        f"bound: {existing_partitions[partition_name]} vs {upper_bound_int}"
                    )
                continue  # already present with same bound

            # ---- ascending-order rule -------------------------------------------
            if upper_bound_int <= highest_existing_bound:
                continue  # not strictly higher → skip

            # contiguous assumption: lower == current highest bound
            lower_bound_int = highest_existing_bound
            ddl_fragments.append(
                (
                    upper_bound_int,
                    text(
                        f"""
                        CREATE TABLE {preparer.quote(partition_name)}
                        PARTITION OF {quoted_table}
                        FOR VALUES FROM ({literal_int(lower_bound_int)})
                                   TO   ({literal_int(upper_bound_int)})
                        """
                    ),
                )
            )
            highest_existing_bound = upper_bound_int  # advance cursor

        if not ddl_fragments:
            return

        ddl_fragments.sort(key=lambda pair: pair[0])  # ascending
        for _upper, ddl in ddl_fragments:
            session.execute(ddl)
        session.commit()

    @staticmethod
    def drop_partitions(
        session: Session,
        table_name: str,
        cutoff_partition_name: str,
    ):
        """
        Execute the drop operation for partitions older than the cutoff partition name.

        :param session: SQLAlchemy session.
        :param table_name: The name of the table with partitions.
        :param cutoff_partition_name: The cutoff partition name for dropping old partitions.
        """
        to_drop = PostgreSQLDB._get_partitions_older_than(
            session, table_name, cutoff_partition_name
        )
        if not to_drop:
            return

        engine = session.get_bind()
        preparer = IdentifierPreparer(engine.dialect)
        q_table = preparer.quote(table_name)

        logger.info(
            "Detaching and dropping partitions",
            table_name=table_name,
            parts=to_drop,
        )
        for part in sorted(to_drop):
            q_part = preparer.quote(part)
            session.execute(text(f"ALTER TABLE {q_table} DETACH PARTITION {q_part}"))
            session.execute(text(f"DROP TABLE IF EXISTS {q_part}"))

        session.commit()

    @staticmethod
    def get_partition_expression_for_table(
        session: Session,
        table_name: str,
    ) -> str:
        """
        Returns partitioning expression for a given table.

        :param session: SQLAlchemy session.
        :param table_name: Name of the table.
        """
        return session.execute(
            text("""
                SELECT pg_get_expr(pt.partbound, pt.partrelid) AS partition_expression
                FROM pg_partitioned_table AS pt
                JOIN pg_class      AS pc ON pt.partrelid   = pc.oid
                JOIN pg_namespace  AS pn ON pc.relnamespace = pn.oid
                WHERE pn.nspname = current_schema()
                  AND pc.relname = :table_name
            """),
            {
                "table_name": table_name,
            },
        ).scalar()

    @staticmethod
    def table_exists(
        session: Session,
        table_name: str,
    ) -> bool:
        """
        Checks if a table exists in the current database schema.

        :param session: SQLAlchemy session.
        :param table_name: Name of the table to check.
        """
        return bool(
            session.execute(
                text("""
                SELECT EXISTS (
                    SELECT 1
                    FROM information_schema.tables
                    WHERE table_schema = current_schema()
                      AND table_name   = :table_name
                )
            """),
                {
                    "table_name": table_name,
                },
            ).scalar()
        )

    @staticmethod
    def _get_partitions_older_than(
        session: Session,
        table_name: str,
        cutoff_partition_name: str,
    ) -> list[str]:
        """
        Returns names of PostgreSQL partitions for a table that are lexicographically less than the cutoff.
        """
        existing = PostgreSQLDB._get_partition_metadata(session, table_name)
        return [name for name in existing if name < cutoff_partition_name]

    @staticmethod
    def _get_partition_metadata(session: Session, table_name: str) -> dict[str, int]:
        """
        Return {partition_name: upper_bound_int} for RANGE partitions.
        """
        rows = session.execute(
            text(
                """
                SELECT c.relname AS partition_name,
                       (regexp_match(
                               pg_get_expr(c.relpartbound, c.oid),
                               'TO \\((\\d+)\\)'
                        ))[1]::int                      AS upper_bound
                FROM pg_inherits i
                    JOIN pg_class c
                ON c.oid = i.inhrelid
                WHERE i.inhparent = CAST (:tbl AS regclass)
                  AND c.relkind = 'r' -- ordinary leaf tables
                """
            ),
            {"tbl": table_name},
        ).fetchall()

        return {name: bound for name, bound in rows}
