"""
FiftyOne models.

| Copyright 2017-2025, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""

import contextlib
import inspect
import logging

import numpy as np

import eta.core.frameutils as etaf
import eta.core.learning as etal
import eta.core.models as etam
import eta.core.utils as etau
import eta.core.video as etav
import eta.core.web as etaw

import fiftyone as fo
import fiftyone.core.collections as foc
import fiftyone.core.fields as fof
import fiftyone.core.labels as fol
import fiftyone.core.media as fom
import fiftyone.core.utils as fou
import fiftyone.core.validation as fov

tud = fou.lazy_import("torch.utils.data")

foue = fou.lazy_import("fiftyone.utils.eta")
fouf = fou.lazy_import("fiftyone.utils.flash")
foui = fou.lazy_import("fiftyone.utils.image")
foup = fou.lazy_import("fiftyone.utils.patches")
fous = fou.lazy_import("fiftyone.utils.super_gradients")
fout = fou.lazy_import("fiftyone.utils.torch")
foutr = fou.lazy_import("fiftyone.utils.transformers")
fouu = fou.lazy_import("fiftyone.utils.ultralytics")


logger = logging.getLogger(__name__)


_ALLOWED_PATCH_TYPES = (
    fol.Detection,
    fol.Detections,
    fol.Polyline,
    fol.Polylines,
)


def apply_model(
    samples,
    model,
    label_field="predictions",
    confidence_thresh=None,
    classes=None,
    store_logits=False,
    batch_size=None,
    num_workers=None,
    skip_failures=True,
    output_dir=None,
    rel_dir=None,
    progress=None,
    **kwargs,
):
    """Applies the model to the samples in the collection.

    This method supports all of the following cases:

    -   Applying an image model to an image collection
    -   Applying an image model to the frames of a video collection
    -   Applying a video model to a video collection

    Args:
        samples: a :class:`fiftyone.core.collections.SampleCollection`
        model: a :class:`Model`, Hugging Face Transformers model, Ultralytics
            model, SuperGradients model, or Lightning Flash model
        label_field ("predictions"): the name of the field in which to store
            the model predictions. When performing inference on video frames,
            the "frames." prefix is optional
        confidence_thresh (None): an optional confidence threshold to apply to
            any applicable labels generated by the model
        classes (None): an optional iterable of classes to which to restrict
            any applicable labels generated by the model
        store_logits (False): whether to store logits for the model
            predictions. This is only supported when the provided ``model`` has
            logits, ``model.has_logits == True``
        batch_size (None): an optional batch size to use, if the model supports
            batching
        num_workers (None): the number of workers to use when loading images.
            Only applicable for Torch-based models
        skip_failures (True): whether to gracefully continue without raising an
            error if predictions cannot be generated for a sample. Only
            applicable to :class:`Model` instances
        output_dir (None): an optional output directory in which to write
            segmentation images. Only applicable if the model generates
            segmentations. If none is provided, the segmentations are stored in
            the database
        rel_dir (None): an optional relative directory to strip from each input
            filepath to generate a unique identifier that is joined with
            ``output_dir`` to generate an output path for each segmentation
            image. This argument allows for populating nested subdirectories in
            ``output_dir`` that match the shape of the input paths. The path is
            converted to an absolute path (if necessary) via
            :func:`fiftyone.core.storage.normalize_path`
        progress (None): whether to render a progress bar (True/False), use the
            default value ``fiftyone.config.show_progress_bars`` (None), or a
            progress callback function to invoke instead
        **kwargs: optional model-specific keyword arguments passed through
            to the underlying inference implementation
    """
    if _is_flash_model(model):
        return fouf.apply_flash_model(
            samples,
            model,
            label_field=label_field,
            confidence_thresh=confidence_thresh,
            store_logits=store_logits,
            batch_size=batch_size,
            num_workers=num_workers,
            output_dir=output_dir,
            rel_dir=rel_dir,
            **kwargs,
        )

    model = _convert_model_if_necessary(model)

    if not isinstance(model, Model):
        raise ValueError("Unsupported model type: %s" % type(model))

    if samples.media_type == fom.IMAGE:
        fov.validate_image_collection(samples)
    elif samples.media_type == fom.GROUP:
        raise fom.SelectGroupSlicesError((fom.IMAGE, fom.VIDEO))
    elif samples.media_type != fom.VIDEO:
        raise fom.MediaTypeError(
            "Unsupported media type '%s'" % samples.media_type
        )

    if model.media_type == "video" and samples.media_type != fom.VIDEO:
        raise ValueError(
            "Video models can only be applied to video collections"
        )

    if model.media_type not in ("image", "video"):
        raise ValueError(
            "Unsupported model `media_type=%s`. Supported values are "
            "('image', 'video')" % model.media_type
        )

    if store_logits and not model.has_logits:
        raise ValueError(
            "The provided model does not expose logits "
            "(model.has_logits = %s)" % model.has_logits
        )

    if classes is not None:
        classes = set(classes)

    supports_get_item = isinstance(model, SupportsGetItem)
    needs_samples = isinstance(model, SamplesMixin)

    if supports_get_item and needs_samples:
        field_mapping = kwargs.pop("field_mapping", {})
        needs_fields = kwargs.pop("needs_fields", {})
        needs_fields.update(kwargs)
    elif supports_get_item:
        field_mapping = kwargs.pop("field_mapping", {})
        field_mapping.update(kwargs)
        needs_fields = None
    elif needs_samples:
        field_mapping = None
        needs_fields = kwargs.pop("needs_fields", {})
        needs_fields.update(kwargs)
    else:
        field_mapping = None
        needs_fields = None

    process_video_frames = (
        samples.media_type == fom.VIDEO and model.media_type == "image"
    )

    use_data_loader = (
        isinstance(model, (SupportsGetItem, TorchModelMixin))
        and not process_video_frames
    )

    if num_workers is not None and not use_data_loader:
        logger.warning("Ignoring unsupported `num_workers` parameter")

    if output_dir is not None:
        filename_maker = fou.UniqueFilenameMaker(
            output_dir=output_dir, rel_dir=rel_dir, idempotent=False
        )
    else:
        filename_maker = None

    with contextlib.ExitStack() as context:
        if confidence_thresh is not None and hasattr(
            model.config, "confidence_thresh"
        ):
            # Model directly supports confidence thresholding, so defer to it
            context.enter_context(
                fou.SetAttributes(
                    model.config, confidence_thresh=confidence_thresh
                )
            )
            confidence_thresh = None

        if classes is not None and hasattr(model.config, "filter_classes"):
            # Model directly supports class filtering, so defer to it
            context.enter_context(
                fou.SetAttributes(model.config, filter_classes=classes)
            )
            classes = None

        if store_logits:
            context.enter_context(fou.SetAttributes(model, store_logits=True))

        if use_data_loader:
            context.enter_context(fou.SetAttributes(model, preprocess=False))

        if needs_samples:
            context.enter_context(
                fou.SetAttributes(model, needs_fields=needs_fields)
            )

        context.enter_context(model)

        if model.media_type == "video":
            return _apply_video_model(
                samples,
                model,
                label_field,
                confidence_thresh,
                classes,
                skip_failures,
                filename_maker,
                progress,
            )

        batch_size = _parse_batch_size(batch_size, model, use_data_loader)

        if process_video_frames:
            label_field, _ = samples._handle_frame_field(label_field)

            if batch_size is not None:
                return _apply_image_model_to_frames_batch(
                    samples,
                    model,
                    label_field,
                    confidence_thresh,
                    classes,
                    batch_size,
                    skip_failures,
                    filename_maker,
                    progress,
                )

            return _apply_image_model_to_frames_single(
                samples,
                model,
                label_field,
                confidence_thresh,
                classes,
                skip_failures,
                filename_maker,
                progress,
            )

        if use_data_loader:
            return _apply_image_model_data_loader(
                samples,
                model,
                label_field,
                confidence_thresh,
                classes,
                batch_size,
                num_workers,
                skip_failures,
                filename_maker,
                progress,
                field_mapping,
            )

        if batch_size is not None:
            return _apply_image_model_batch(
                samples,
                model,
                label_field,
                confidence_thresh,
                classes,
                batch_size,
                skip_failures,
                filename_maker,
                progress,
            )

        return _apply_image_model_single(
            samples,
            model,
            label_field,
            confidence_thresh,
            classes,
            skip_failures,
            filename_maker,
            progress,
        )


def _is_flash_model(model):
    for cls in inspect.getmro(type(model)):
        if etau.get_class_name(cls) == "flash.core.model.Task":
            return True

    return False


def _is_transformers_model(model):
    return type(model).__module__.startswith("transformers.models.")


def _is_ultralytics_model(model):
    return type(model).__module__.startswith("ultralytics.")


def _is_super_gradients_models(model):
    return type(model).__module__.startswith("super_gradients.")


def _convert_model_if_necessary(model):
    if _is_transformers_model(model):
        return foutr.convert_transformers_model(model)

    if _is_ultralytics_model(model):
        return fouu.convert_ultralytics_model(model)

    if _is_super_gradients_models(model):
        return fous.convert_super_gradients_model(model)

    return model


def _apply_image_model_single(
    samples,
    model,
    label_field,
    confidence_thresh,
    classes,
    skip_failures,
    filename_maker,
    progress,
):
    needs_samples = isinstance(model, SamplesMixin)

    samples = _select_fields_for_inference(samples, model)

    with contextlib.ExitStack() as context:
        pb = context.enter_context(fou.ProgressBar(progress=progress))
        ctx = context.enter_context(foc.SaveContext(samples))

        for sample in pb(samples):
            try:
                img = foui.read(sample.filepath)

                if needs_samples:
                    labels = model.predict(img, sample=sample)
                else:
                    labels = model.predict(img)

                if filename_maker is not None:
                    _export_arrays(labels, sample.filepath, filename_maker)

                sample.add_labels(
                    labels,
                    label_field=label_field,
                    confidence_thresh=confidence_thresh,
                    classes=classes,
                )
                ctx.save(sample)
            except Exception as e:
                if not skip_failures:
                    raise e

                logger.warning("Sample: %s\nError: %s\n", sample.id, e)


def _apply_image_model_batch(
    samples,
    model,
    label_field,
    confidence_thresh,
    classes,
    batch_size,
    skip_failures,
    filename_maker,
    progress,
):
    needs_samples = isinstance(model, SamplesMixin)

    samples = _select_fields_for_inference(samples, model)

    with contextlib.ExitStack() as context:
        pb = context.enter_context(fou.ProgressBar(samples, progress=progress))
        ctx = context.enter_context(foc.SaveContext(samples))

        for sample_batch in fou.iter_batches(samples, batch_size):
            try:
                imgs = [foui.read(sample.filepath) for sample in sample_batch]

                if needs_samples:
                    labels_batch = model.predict_all(
                        imgs, samples=sample_batch
                    )
                else:
                    labels_batch = model.predict_all(imgs)

                for sample, labels in zip(sample_batch, labels_batch):
                    if filename_maker is not None:
                        _export_arrays(labels, sample.filepath, filename_maker)

                    sample.add_labels(
                        labels,
                        label_field=label_field,
                        confidence_thresh=confidence_thresh,
                        classes=classes,
                    )
                    ctx.save(sample)

            except Exception as e:
                if not skip_failures:
                    raise e

                logger.warning(
                    "Batch: %s - %s\nError: %s\n",
                    sample_batch[0].id,
                    sample_batch[-1].id,
                    e,
                )

            pb.update(len(sample_batch))


def _apply_image_model_data_loader(
    samples,
    model,
    label_field,
    confidence_thresh,
    classes,
    batch_size,
    num_workers,
    skip_failures,
    filename_maker,
    progress,
    field_mapping,
):
    needs_samples = isinstance(model, SamplesMixin)

    data_loader = _make_data_loader(
        samples,
        model,
        batch_size,
        num_workers,
        skip_failures,
        field_mapping,
    )

    samples = _select_fields_for_inference(samples, model)

    with contextlib.ExitStack() as context:
        pb = context.enter_context(fou.ProgressBar(samples, progress=progress))
        ctx = context.enter_context(foc.SaveContext(samples))

        for sample_batch, imgs in zip(
            fou.iter_batches(samples, batch_size),
            data_loader,
        ):
            try:
                if isinstance(imgs, Exception):
                    raise imgs

                if needs_samples:
                    labels_batch = model.predict_all(
                        imgs, samples=sample_batch
                    )
                else:
                    labels_batch = model.predict_all(imgs)

                for sample, labels in zip(sample_batch, labels_batch):
                    if filename_maker is not None:
                        _export_arrays(labels, sample.filepath, filename_maker)

                    sample.add_labels(
                        labels,
                        label_field=label_field,
                        confidence_thresh=confidence_thresh,
                        classes=classes,
                    )
                    ctx.save(sample)
            except Exception as e:
                if not skip_failures:
                    raise e

                logger.warning(
                    "Batch: %s - %s\nError: %s\n",
                    sample_batch[0].id,
                    sample_batch[-1].id,
                    e,
                    exc_info=True,
                )

            pb.update(len(sample_batch))


def _apply_image_model_to_frames_single(
    samples,
    model,
    label_field,
    confidence_thresh,
    classes,
    skip_failures,
    filename_maker,
    progress,
):
    needs_samples = isinstance(model, SamplesMixin)
    frame_counts, total_frame_count = _get_frame_counts(samples)
    is_clips = samples._dataset._is_clips

    samples = _select_fields_for_inference(samples, model)

    with contextlib.ExitStack() as context:
        pb = context.enter_context(
            fou.ProgressBar(total=total_frame_count, progress=progress)
        )
        ctx = context.enter_context(foc.SaveContext(samples))

        for idx, sample in enumerate(samples):
            if is_clips:
                frames = etaf.FrameRange(*sample.support)
            else:
                frames = None

            try:
                with etav.FFmpegVideoReader(
                    sample.filepath, frames=frames
                ) as video_reader:
                    for img in video_reader:
                        if needs_samples:
                            frame = sample.frames[video_reader.frame_number]
                            labels = model.predict(img, sample=frame)
                        else:
                            labels = model.predict(img)

                        if filename_maker is not None:
                            _export_arrays(
                                labels, sample.filepath, filename_maker
                            )

                        sample.add_labels(
                            {video_reader.frame_number: labels},
                            label_field=label_field,
                            confidence_thresh=confidence_thresh,
                            classes=classes,
                        )
                        ctx.save(sample)

                        pb.update()

            except Exception as e:
                if not skip_failures:
                    raise e

                logger.warning("Sample: %s\nError: %s\n", sample.id, e)

            # Explicitly set in case actual # frames differed from expected #
            pb.set_iteration(frame_counts[idx])


def _apply_image_model_to_frames_batch(
    samples,
    model,
    label_field,
    confidence_thresh,
    classes,
    batch_size,
    skip_failures,
    filename_maker,
    progress,
):
    needs_samples = isinstance(model, SamplesMixin)
    frame_counts, total_frame_count = _get_frame_counts(samples)
    is_clips = samples._dataset._is_clips

    samples = _select_fields_for_inference(samples, model)

    with contextlib.ExitStack() as context:
        pb = context.enter_context(
            fou.ProgressBar(total=total_frame_count, progress=progress)
        )
        ctx = context.enter_context(foc.SaveContext(samples))

        for idx, sample in enumerate(samples):
            if is_clips:
                frames = etaf.FrameRange(*sample.support)
            else:
                frames = None

            try:
                with etav.FFmpegVideoReader(
                    sample.filepath, frames=frames
                ) as video_reader:
                    for fns, imgs in _iter_batches(video_reader, batch_size):
                        if needs_samples:
                            _frames = [sample.frames[fn] for fn in fns]
                            labels_batch = model.predict_all(
                                imgs, samples=_frames
                            )
                        else:
                            labels_batch = model.predict_all(imgs)

                        if filename_maker is not None:
                            for labels in labels_batch:
                                _export_arrays(
                                    labels, sample.filepath, filename_maker
                                )

                        sample.add_labels(
                            {
                                fn: labels
                                for fn, labels in zip(fns, labels_batch)
                            },
                            label_field=label_field,
                            confidence_thresh=confidence_thresh,
                            classes=classes,
                        )
                        ctx.save(sample)

                        pb.update(len(imgs))

            except Exception as e:
                if not skip_failures:
                    raise e

                logger.warning("Sample: %s\nError: %s\n", sample.id, e)

            # Explicitly set in case actual # frames differed from expected #
            pb.set_iteration(frame_counts[idx])


def _apply_video_model(
    samples,
    model,
    label_field,
    confidence_thresh,
    classes,
    skip_failures,
    filename_maker,
    progress,
):
    needs_samples = isinstance(model, SamplesMixin)
    is_clips = samples._dataset._is_clips

    samples = _select_fields_for_inference(samples, model)

    with contextlib.ExitStack() as context:
        pb = context.enter_context(fou.ProgressBar(progress=progress))
        ctx = context.enter_context(foc.SaveContext(samples))

        for sample in pb(samples):
            if is_clips:
                frames = etaf.FrameRange(*sample.support)
            else:
                frames = None

            try:
                with etav.FFmpegVideoReader(
                    sample.filepath, frames=frames
                ) as video_reader:
                    if needs_samples:
                        labels = model.predict(video_reader, sample=sample)
                    else:
                        labels = model.predict(video_reader)

                if filename_maker is not None:
                    _export_arrays(labels, sample.filepath, filename_maker)

                sample.add_labels(
                    labels,
                    label_field=label_field,
                    confidence_thresh=confidence_thresh,
                    classes=classes,
                )
                ctx.save(sample)
            except Exception as e:
                if not skip_failures:
                    raise e

                logger.warning("Sample: %s\nError: %s\n", sample.id, e)


def _select_fields_for_inference(samples, model):
    if isinstance(model, SamplesMixin):
        fields = list(model.needs_fields.values())
        return samples.select_fields(fields)
    else:
        return samples.select_fields()


def _export_arrays(label, input_path, filename_maker):
    if isinstance(label, dict):
        for _label in label.values():
            _do_export_array(_label, input_path, filename_maker)
    else:
        _do_export_array(label, input_path, filename_maker)


def _do_export_array(label, input_path, filename_maker):
    if isinstance(label, fol.Segmentation):
        mask_path = filename_maker.get_output_path(
            input_path, output_ext=".png"
        )
        label.export_mask(mask_path, update=True)
    elif isinstance(label, fol.Heatmap):
        map_path = filename_maker.get_output_path(
            input_path, output_ext=".png"
        )
        label.export_map(map_path, update=True)
    elif isinstance(label, fol.Detection):
        if label.mask is not None:
            mask_path = filename_maker.get_output_path(
                input_path, output_ext=".png"
            )
            label.export_mask(mask_path, update=True)
    elif isinstance(label, fol.Detections):
        for detection in label.detections:
            if detection.mask is not None:
                mask_path = filename_maker.get_output_path(
                    input_path, output_ext=".png"
                )
                detection.export_mask(mask_path, update=True)


def _get_frame_counts(samples):
    if samples._dataset._is_clips:
        expr = fo.ViewField("support")[1] - fo.ViewField("support")[0] + 1
        frame_counts = samples.values(expr)
    else:
        samples.compute_metadata()
        frame_counts = samples.values("metadata.total_frame_count")

    frame_counts = np.cumsum([(fc or 0) for fc in frame_counts])
    total_frame_count = frame_counts[-1] if frame_counts.size > 0 else 0

    return frame_counts, total_frame_count


def _iter_batches(video_reader, batch_size):
    frame_numbers = []
    imgs = []
    for img in video_reader:
        imgs.append(img)
        frame_numbers.append(video_reader.frame_number)
        if len(imgs) >= batch_size:
            yield frame_numbers, imgs
            frame_numbers = []
            imgs = []

    if frame_numbers:
        yield frame_numbers, imgs


class ErrorHandlingCollate(object):
    def __init__(
        self, skip_failures, ragged_batches, use_numpy, user_collate_fn=None
    ):
        self.skip_failures = skip_failures

        if ragged_batches:
            self._collate_fn = self._ragged_batches
        elif use_numpy:
            self._collate_fn = self._use_numpy
        else:
            self._collate_fn = self._default

        self.user_collate_fn = user_collate_fn
        if self.user_collate_fn is None:
            self.user_collate_fn = tud.dataloader.default_collate

    def __call__(self, batch):
        return self._collate_fn(batch)

    @staticmethod
    def handle_errors(batch):
        errors = [b for b in batch if isinstance(b, Exception)]

        if not errors:
            return None

        if len(errors) == 1:
            return errors[0]

        return Exception("\n" + "\n".join([str(e) for e in errors]))

    def _ragged_batches(self, batch):
        error = ErrorHandlingCollate.handle_errors(batch)
        if error is not None:
            return error

        return batch

    def _use_numpy(self, batch):
        error = ErrorHandlingCollate.handle_errors(batch)
        if error is not None:
            return error

        try:
            return np.stack(batch)
        except Exception as e:
            if not self.skip_failures:
                raise e

            return e

    def _default(self, batch):
        error = ErrorHandlingCollate.handle_errors(batch)
        if error is not None:
            return error

        try:
            return self.user_collate_fn(batch)
        except Exception as e:
            if not self.skip_failures:
                raise e

            return e


def _make_data_loader(
    samples,
    model,
    batch_size,
    num_workers,
    skip_failures,
    field_mapping,
):
    # This function supports DataLoaders that emit numpy arrays that can
    # therefore be used for non-Torch models; but we do not currently use this
    # functionality
    use_numpy = not isinstance(model, TorchModelMixin)

    num_workers = fout.recommend_num_workers(num_workers)

    if model.has_collate_fn:
        user_collate_fn = model.collate_fn
    else:
        user_collate_fn = None

    collate_fn = ErrorHandlingCollate(
        skip_failures,
        ragged_batches=model.ragged_batches,
        use_numpy=use_numpy,
        user_collate_fn=user_collate_fn,
    )

    if batch_size is None:
        batch_size = 1

    if isinstance(model, SupportsGetItem):
        get_item = model.build_get_item(field_mapping=field_mapping)
        dataset = samples.to_torch(get_item, skip_failures=skip_failures)
        worker_init_fn = fout.FiftyOneTorchDataset.worker_init
    else:
        dataset = fout.TorchImageDataset(
            samples=samples,
            transform=model.transforms,
            use_numpy=use_numpy,
            force_rgb=True,
            skip_failures=skip_failures,
        )
        worker_init_fn = None

    pin_memory = isinstance(model, fout.TorchImageModel) and model._using_gpu

    return tud.DataLoader(
        dataset,
        batch_size=batch_size,
        num_workers=num_workers,
        collate_fn=collate_fn,
        pin_memory=pin_memory,
        persistent_workers=False,
        worker_init_fn=worker_init_fn,
    )


def compute_embeddings(
    samples,
    model,
    embeddings_field=None,
    batch_size=None,
    num_workers=None,
    skip_failures=True,
    progress=None,
    **kwargs,
):
    """Computes embeddings for the samples in the collection using the given
    model.

    This method supports all the following cases:

    -   Using an image model to compute embeddings for an image collection
    -   Using an image model to compute frame embeddings for a video collection
    -   Using a video model to compute embeddings for a video collection

    The ``model`` must expose embeddings, i.e., :meth:`Model.has_embeddings`
    must return ``True``.

    If an ``embeddings_field`` is provided, the embeddings are saved to the
    samples; otherwise, the embeddings are returned in-memory.

    Args:
        samples: a :class:`fiftyone.core.collections.SampleCollection`
        model: a :class:`Model`, Hugging Face Transformers model, Ultralytics
            model, SuperGradients model, or Lightning Flash model
        embeddings_field (None): the name of a field in which to store the
            embeddings. When computing video frame embeddings, the "frames."
            prefix is optional
        batch_size (None): an optional batch size to use, if the model supports
            batching
        num_workers (None): the number of workers to use when loading images.
            Only applicable for Torch-based models
        skip_failures (True): whether to gracefully continue without raising an
            error if embeddings cannot be generated for a sample. Only
            applicable to :class:`Model` instances
        progress (None): whether to render a progress bar (True/False), use the
            default value ``fiftyone.config.show_progress_bars`` (None), or a
            progress callback function to invoke instead
        **kwargs: optional model-specific keyword arguments passed through
            to the underlying inference implementation

    Returns:
        one of the following:

        -   ``None``, if an ``embeddings_field`` is provided
        -   a ``num_samples x num_dim`` array of embeddings, when computing
            embeddings for image/video collections with image/video models,
            respectively, and no ``embeddings_field`` is provided. If
            ``skip_failures`` is ``True`` and any errors are detected, a list
            of length ``num_samples`` is returned instead containing all
            successfully computed embedding vectors along with ``None`` entries
            for samples for which embeddings could not be computed
        -   a dictionary mapping sample IDs to ``num_frames x num_dim`` arrays
            of embeddings, when computing frame embeddings for video
            collections using an image model. If ``skip_failures`` is ``True``
            and any errors are detected, the values of this dictionary will
            contain arrays of embeddings for all frames 1, 2, ... until the
            error occurred, or ``None`` if no embeddings were computed at all
    """
    if _is_flash_model(model):
        return fouf.compute_flash_embeddings(
            samples,
            model,
            embeddings_field=embeddings_field,
            batch_size=batch_size,
            num_workers=num_workers,
            **kwargs,
        )

    model = _convert_model_if_necessary(model)

    if not isinstance(model, Model):
        raise ValueError("Unsupported model type: %s" % type(model))

    if not model.has_embeddings:
        raise ValueError(
            "Model must expose embeddings; found model.has_embeddings = %s"
            % model.has_embeddings
        )

    if samples.media_type == fom.IMAGE:
        fov.validate_image_collection(samples)
    elif samples.media_type == fom.GROUP:
        raise fom.SelectGroupSlicesError((fom.IMAGE, fom.VIDEO))
    elif samples.media_type != fom.VIDEO:
        raise fom.MediaTypeError(
            "Unsupported media type '%s'" % samples.media_type
        )

    if model.media_type == "video" and samples.media_type != fom.VIDEO:
        raise ValueError(
            "Video models can only be applied to video collections"
        )

    if model.media_type not in ("image", "video"):
        raise ValueError(
            "Unsupported model `media_type=%s`. Supported values are "
            "('image', 'video')" % model.media_type
        )

    if isinstance(model, SupportsGetItem):
        field_mapping = kwargs.pop("field_mapping", {})
        field_mapping.update(kwargs)
    else:
        field_mapping = None

    process_video_frames = (
        samples.media_type == fom.VIDEO and model.media_type == "image"
    )

    use_data_loader = (
        isinstance(model, (SupportsGetItem, TorchModelMixin))
        and not process_video_frames
    )

    if num_workers is not None and not use_data_loader:
        logger.warning("Ignoring unsupported `num_workers` parameter")

    if embeddings_field is not None:
        dataset = samples._dataset
        embeddings_field, _is_frame_field = dataset._handle_frame_field(
            embeddings_field
        )

        if dataset.media_type == fom.VIDEO and model.media_type == "image":
            if not dataset.has_frame_field(embeddings_field):
                dataset.add_frame_field(embeddings_field, fof.VectorField)
        else:
            if not dataset.has_sample_field(embeddings_field):
                dataset.add_sample_field(embeddings_field, fof.VectorField)

    with contextlib.ExitStack() as context:
        if use_data_loader:
            context.enter_context(fou.SetAttributes(model, preprocess=False))

        context.enter_context(model)

        if model.media_type == "video":
            return _compute_video_embeddings(
                samples, model, embeddings_field, skip_failures, progress
            )

        batch_size = _parse_batch_size(batch_size, model, use_data_loader)

        if process_video_frames:
            if batch_size is not None:
                return _compute_frame_embeddings_batch(
                    samples,
                    model,
                    embeddings_field,
                    batch_size,
                    skip_failures,
                    progress,
                )

            return _compute_frame_embeddings_single(
                samples, model, embeddings_field, skip_failures, progress
            )

        if use_data_loader:
            return _compute_image_embeddings_data_loader(
                samples,
                model,
                embeddings_field,
                batch_size,
                num_workers,
                skip_failures,
                progress,
                field_mapping,
            )

        if batch_size is not None:
            return _compute_image_embeddings_batch(
                samples,
                model,
                embeddings_field,
                batch_size,
                skip_failures,
                progress,
            )

        return _compute_image_embeddings_single(
            samples, model, embeddings_field, skip_failures, progress
        )


def _compute_image_embeddings_single(
    samples, model, embeddings_field, skip_failures, progress
):
    samples = _select_fields_for_embeddings(samples, embeddings_field)

    embeddings = []
    errors = False

    with contextlib.ExitStack() as context:
        pb = context.enter_context(fou.ProgressBar(progress=progress))
        if embeddings_field is not None:
            ctx = context.enter_context(foc.SaveContext(samples))

        for sample in pb(samples):
            embedding = None

            try:
                img = foui.read(sample.filepath)
                embedding = model.embed(img)
            except Exception as e:
                if not skip_failures:
                    raise e

                errors = True
                logger.warning("Sample: %s\nError: %s\n", sample.id, e)

            if embeddings_field is not None:
                sample[embeddings_field] = embedding
                ctx.save(sample)
            else:
                embeddings.append(embedding)

    if embeddings_field is not None:
        return None

    if errors:
        return embeddings  # may contain None, must return as list

    if not embeddings:
        return np.empty((0, 0), dtype=float)

    return np.stack(embeddings)


def _compute_image_embeddings_batch(
    samples, model, embeddings_field, batch_size, skip_failures, progress
):
    samples = _select_fields_for_embeddings(samples, embeddings_field)

    embeddings = []
    errors = False

    with contextlib.ExitStack() as context:
        pb = context.enter_context(fou.ProgressBar(samples, progress=progress))
        if embeddings_field is not None:
            ctx = context.enter_context(foc.SaveContext(samples))

        for sample_batch in fou.iter_batches(samples, batch_size):
            embeddings_batch = [None] * len(sample_batch)

            try:
                imgs = [foui.read(sample.filepath) for sample in sample_batch]
                embeddings_batch = list(model.embed_all(imgs))  # list of 1D
            except Exception as e:
                if not skip_failures:
                    raise e

                errors = True
                logger.warning(
                    "Batch: %s - %s\nError: %s\n",
                    sample_batch[0].id,
                    sample_batch[-1].id,
                    e,
                )

            if embeddings_field is not None:
                for sample, embedding in zip(sample_batch, embeddings_batch):
                    sample[embeddings_field] = embedding
                    ctx.save(sample)
            else:
                embeddings.extend(embeddings_batch)

            pb.update(len(sample_batch))

    if embeddings_field is not None:
        return None

    if errors:
        return embeddings  # may contain None, must return as list

    if not embeddings:
        return np.empty((0, 0), dtype=float)

    return np.stack(embeddings)


def _compute_image_embeddings_data_loader(
    samples,
    model,
    embeddings_field,
    batch_size,
    num_workers,
    skip_failures,
    progress,
    field_mapping,
):
    data_loader = _make_data_loader(
        samples,
        model,
        batch_size,
        num_workers,
        skip_failures,
        field_mapping,
    )

    samples = _select_fields_for_embeddings(samples, embeddings_field)

    embeddings = []
    errors = False

    with contextlib.ExitStack() as context:
        pb = context.enter_context(fou.ProgressBar(samples, progress=progress))
        if embeddings_field is not None:
            ctx = context.enter_context(foc.SaveContext(samples))
        else:
            ctx = None

        for sample_batch, imgs in zip(
            fou.iter_batches(samples, batch_size),
            data_loader,
        ):
            embeddings_batch = [None] * len(sample_batch)

            try:
                if isinstance(imgs, Exception):
                    raise imgs

                embeddings_batch = list(model.embed_all(imgs))  # list of 1D
            except Exception as e:
                if not skip_failures:
                    raise e

                errors = True
                logger.warning(
                    "Batch: %s - %s\nError: %s\n",
                    sample_batch[0].id,
                    sample_batch[-1].id,
                    e,
                )

            if embeddings_field is not None:
                try:
                    for sample, embedding in zip(
                        sample_batch, embeddings_batch
                    ):
                        sample[embeddings_field] = embedding
                        if ctx:
                            ctx.save(sample)
                except Exception as e:
                    if not skip_failures:
                        raise e

                    logger.warning(
                        "Batch: %s - %s\nError: %s\n",
                        sample_batch[0].id,
                        sample_batch[-1].id,
                        e,
                        exc_info=True,
                    )
            else:
                embeddings.extend(embeddings_batch)

            pb.update(len(sample_batch))

    if embeddings_field is not None:
        return None

    if errors:
        return embeddings  # may contain None, must return as list

    if not embeddings:
        return np.empty((0, 0), dtype=float)

    return np.stack(embeddings)


def _compute_frame_embeddings_single(
    samples, model, embeddings_field, skip_failures, progress
):
    frame_counts, total_frame_count = _get_frame_counts(samples)
    is_clips = samples._dataset._is_clips

    samples = _select_fields_for_embeddings(samples, embeddings_field)

    embeddings_dict = {}

    with contextlib.ExitStack() as context:
        pb = context.enter_context(
            fou.ProgressBar(total=total_frame_count, progress=progress)
        )
        if embeddings_field is not None:
            ctx = context.enter_context(foc.SaveContext(samples))

        for idx, sample in enumerate(samples):
            embeddings = []

            if is_clips:
                frames = etaf.FrameRange(*sample.support)
            else:
                frames = None

            try:
                with etav.FFmpegVideoReader(
                    sample.filepath, frames=frames
                ) as video_reader:
                    for img in video_reader:
                        embedding = model.embed(img)

                        if embeddings_field is not None:
                            sample.add_labels(
                                {video_reader.frame_number: embedding},
                                label_field=embeddings_field,
                            )
                            ctx.save(sample)
                        else:
                            embeddings.append(embedding)

                        pb.update()

            except Exception as e:
                if not skip_failures:
                    raise e

                logger.warning("Sample: %s\nError: %s\n", sample.id, e)

            if embeddings_field is None:
                if embeddings:
                    embeddings = np.stack(embeddings)
                else:
                    embeddings = None

                embeddings_dict[sample.id] = embeddings

            # Explicitly set in case actual # frames differed from expected #
            pb.set_iteration(frame_counts[idx])

    if embeddings_field is not None:
        return None

    return embeddings_dict


def _compute_frame_embeddings_batch(
    samples, model, embeddings_field, batch_size, skip_failures, progress
):
    frame_counts, total_frame_count = _get_frame_counts(samples)
    is_clips = samples._dataset._is_clips

    samples = _select_fields_for_embeddings(samples, embeddings_field)

    embeddings_dict = {}

    with contextlib.ExitStack() as context:
        pb = context.enter_context(
            fou.ProgressBar(total=total_frame_count, progress=progress)
        )
        if embeddings_field is not None:
            ctx = context.enter_context(foc.SaveContext(samples))

        for idx, sample in enumerate(samples):
            embeddings = []

            if is_clips:
                frames = etaf.FrameRange(*sample.support)
            else:
                frames = None

            try:
                with etav.FFmpegVideoReader(
                    sample.filepath, frames=frames
                ) as video_reader:
                    for fns, imgs in _iter_batches(video_reader, batch_size):
                        embeddings_batch = list(model.embed_all(imgs))

                        if embeddings_field is not None:
                            sample.add_labels(
                                {
                                    fn: embedding
                                    for fn, embedding in zip(
                                        fns, embeddings_batch
                                    )
                                },
                                label_field=embeddings_field,
                            )
                            ctx.save(sample)
                        else:
                            embeddings.extend(embeddings_batch)

                        pb.update(len(imgs))

            except Exception as e:
                if not skip_failures:
                    raise e

                logger.warning("Sample: %s\nError: %s\n", sample.id, e)

            if embeddings_field is None:
                if embeddings:
                    embeddings = np.stack(embeddings)
                else:
                    embeddings = None

                embeddings_dict[sample.id] = embeddings

            # Explicitly set in case actual # frames differed from expected #
            pb.set_iteration(frame_counts[idx])

    if embeddings_field is not None:
        return None

    return embeddings_dict


def _compute_video_embeddings(
    samples, model, embeddings_field, skip_failures, progress
):
    is_clips = samples._dataset._is_clips

    samples = _select_fields_for_embeddings(samples, embeddings_field)

    embeddings = []
    errors = False

    with contextlib.ExitStack() as context:
        pb = context.enter_context(fou.ProgressBar(progress=progress))
        if embeddings_field is not None:
            ctx = context.enter_context(foc.SaveContext(samples))

        for sample in pb(samples):
            if is_clips:
                frames = etaf.FrameRange(*sample.support)
            else:
                frames = None

            try:
                with etav.FFmpegVideoReader(
                    sample.filepath, frames=frames
                ) as video_reader:
                    embedding = model.embed(video_reader)

            except Exception as e:
                if not skip_failures:
                    raise e

                errors = True
                logger.warning("Sample: %s\nError: %s\n", sample.id, e)

            if embeddings_field is not None:
                sample[embeddings_field] = embedding
                ctx.save(sample)
            else:
                embeddings.append(embedding)

    if embeddings_field is not None:
        return None

    if errors:
        return embeddings  # may contain None, must return as list

    if not embeddings:
        return np.empty((0, 0), dtype=float)

    return np.stack(embeddings)


def _select_fields_for_embeddings(samples, embeddings_field):
    if embeddings_field is not None and "." in embeddings_field:
        root_field = embeddings_field.split(".", 1)[0]
        return samples.select_fields(root_field)
    else:
        return samples.select_fields()


def compute_patch_embeddings(
    samples,
    model,
    patches_field,
    embeddings_field=None,
    force_square=False,
    alpha=None,
    handle_missing="skip",
    batch_size=None,
    num_workers=None,
    skip_failures=True,
    progress=None,
):
    """Computes embeddings for the image patches defined by ``patches_field``
    of the samples in the collection using the given model.

    This method supports all the following cases:

    -   Using an image model to compute patch embeddings for an image
        collection
    -   Using an image model to compute frame patch embeddings for a video
        collection

    The ``model`` must expose embeddings, i.e., :meth:`Model.has_embeddings`
    must return ``True``.

    If an ``embeddings_field`` is provided, the embeddings are saved to the
    samples; otherwise, the embeddings are returned in-memory.

    Args:
        samples: a :class:`fiftyone.core.collections.SampleCollection`
        model: a :class:`Model`, Hugging Face Transformers model, Ultralytics
            model, SuperGradients model, or Lightning Flash model
        patches_field: the name of the field defining the image patches in each
            sample to embed. Must be of type
            :class:`fiftyone.core.labels.Detection`,
            :class:`fiftyone.core.labels.Detections`,
            :class:`fiftyone.core.labels.Polyline`, or
            :class:`fiftyone.core.labels.Polylines`. When computing video frame
            embeddings, the "frames." prefix is optional
        embeddings_field (None): the name of a label attribute in which to
            store the embeddings
        force_square (False): whether to minimally manipulate the patch
            bounding boxes into squares prior to extraction
        alpha (None): an optional expansion/contraction to apply to the patches
            before extracting them, in ``[-1, inf)``. If provided, the length
            and width of the box are expanded (or contracted, when
            ``alpha < 0``) by ``(100 * alpha)%``. For example, set
            ``alpha = 0.1`` to expand the boxes by 10%, and set
            ``alpha = -0.1`` to contract the boxes by 10%
        handle_missing ("skip"): how to handle images with no patches.
            Supported values are:

            -   "skip": skip the image and assign its embedding as ``None``
            -   "image": use the whole image as a single patch
            -   "error": raise an error

        batch_size (None): an optional batch size to use, if the model supports
            batching
        num_workers (None): the number of workers to use when loading images.
            Only applicable for Torch models
        skip_failures (True): whether to gracefully continue without raising an
            error if embeddings cannot be generated for a sample
        progress (None): whether to render a progress bar (True/False), use the
            default value ``fiftyone.config.show_progress_bars`` (None), or a
            progress callback function to invoke instead

    Returns:
        one of the following:

        -   ``None``, if an ``embeddings_field`` is provided
        -   a dict mapping sample IDs to ``num_patches x num_dim`` arrays of
            patch embeddings, when computing patch embeddings for image
            collections and no ``embeddings_field`` is provided. If
            ``skip_failures`` is ``True`` and any errors are detected, this
            dictionary will contain ``None`` values for any samples for which
            embeddings could not be computed
        -   a dict of dicts mapping sample IDs to frame numbers to
            ``num_patches x num_dim`` arrays of patch embeddings, when
            computing patch embeddings for the frames of video collections and
            no ``embeddings_field`` is provided. If ``skip_failures`` is
            ``True`` and any errors are detected, this nested dict will contain
            missing or ``None`` values to indicate uncomputable embeddings
    """
    model = _convert_model_if_necessary(model)

    if not isinstance(model, Model):
        raise ValueError("Unsupported model type: %s" % type(model))

    if not model.has_embeddings:
        raise ValueError(
            "Model must expose embeddings; found model.has_embeddings = %s"
            % model.has_embeddings
        )

    if model.media_type != "image":
        raise ValueError(
            "This method only supports image models; found "
            "model.media_type = %s" % model.media_type
        )

    _handle_missing_supported = {"skip", "image", "error"}
    if handle_missing not in _handle_missing_supported:
        raise ValueError(
            "Unsupported handle_missing = '%s'; supported values are %s"
            % (handle_missing, _handle_missing_supported)
        )

    process_video_frames = samples.media_type == fom.VIDEO

    use_data_loader = (
        isinstance(model, (SupportsGetItem, TorchModelMixin))
        and not process_video_frames
    )

    if num_workers is not None and not use_data_loader:
        logger.warning("Ignoring unsupported `num_workers` parameter")

    if samples.media_type == fom.IMAGE:
        fov.validate_image_collection(samples)
        fov.validate_collection_label_fields(
            samples, patches_field, _ALLOWED_PATCH_TYPES
        )
    elif samples.media_type == fom.VIDEO:
        patches_field, _ = samples._handle_frame_field(patches_field)
        fov.validate_collection_label_fields(
            samples,
            samples._FRAMES_PREFIX + patches_field,
            _ALLOWED_PATCH_TYPES,
        )
    elif samples.media_type == fom.GROUP:
        raise fom.SelectGroupSlicesError((fom.IMAGE, fom.VIDEO))
    else:
        raise fom.MediaTypeError(
            "Unsupported media type '%s'" % samples.media_type
        )

    if embeddings_field is not None:
        if "." in embeddings_field:
            raise ValueError(
                "Invalid `embeddings_field=%s`. Expected a label attribute "
                "name that contains no '.'" % embeddings_field
            )

        dataset = samples._dataset
        if dataset.media_type == fom.VIDEO:
            _, embeddings_path = dataset._get_label_field_path(
                dataset._FRAMES_PREFIX + patches_field, embeddings_field
            )
            embeddings_path, _ = dataset._handle_frame_field(embeddings_path)
            if not dataset.has_frame_field(embeddings_path):
                dataset.add_frame_field(embeddings_path, fof.VectorField)
        else:
            _, embeddings_path = dataset._get_label_field_path(
                patches_field, embeddings_field
            )
            if not dataset.has_sample_field(embeddings_path):
                dataset.add_sample_field(embeddings_path, fof.VectorField)

    with contextlib.ExitStack() as context:
        if use_data_loader:
            context.enter_context(fou.SetAttributes(model, preprocess=False))

        context.enter_context(model)

        batch_size = _parse_batch_size(batch_size, model, use_data_loader)

        if process_video_frames:
            return _embed_frame_patches(
                samples,
                model,
                patches_field,
                embeddings_field,
                force_square,
                alpha,
                handle_missing,
                batch_size,
                skip_failures,
                progress,
            )

        if use_data_loader:
            return _embed_patches_data_loader(
                samples,
                model,
                patches_field,
                embeddings_field,
                force_square,
                alpha,
                handle_missing,
                batch_size,
                num_workers,
                skip_failures,
                progress,
            )

        return _embed_patches(
            samples,
            model,
            patches_field,
            embeddings_field,
            force_square,
            alpha,
            handle_missing,
            batch_size,
            skip_failures,
            progress,
        )


def _embed_patches(
    samples,
    model,
    patches_field,
    embeddings_field,
    force_square,
    alpha,
    handle_missing,
    batch_size,
    skip_failures,
    progress,
):
    samples = _select_fields_for_patch_embeddings(samples, patches_field)

    if embeddings_field is not None:
        label_parser = _make_label_parser(samples, patches_field)
    else:
        embeddings_dict = {}

    with contextlib.ExitStack() as context:
        pb = context.enter_context(fou.ProgressBar(progress=progress))
        if embeddings_field is not None:
            ctx = context.enter_context(foc.SaveContext(samples))

        for sample in pb(samples):
            embeddings = None

            try:
                patches = foup.parse_patches(
                    sample, patches_field, handle_missing=handle_missing
                )

                if patches is not None:
                    img = foui.read(sample.filepath)

                    if batch_size is None:
                        embeddings = _embed_patches_single(
                            model, img, patches, force_square, alpha
                        )
                    else:
                        embeddings = _embed_patches_batch(
                            model,
                            img,
                            patches,
                            force_square,
                            alpha,
                            batch_size,
                        )

            except Exception as e:
                if not skip_failures:
                    raise e

                logger.warning("Sample: %s\nError: %s\n", sample.id, e)

            if embeddings_field is not None:
                if embeddings is not None:
                    labels = label_parser(sample)
                    for label, embedding in zip(labels, embeddings):
                        label[embeddings_field] = embedding

                    ctx.save(sample)
            else:
                embeddings_dict[sample.id] = embeddings

    if embeddings_field is not None:
        return None

    return embeddings_dict


def _embed_patches_single(model, img, detections, force_square, alpha):
    embeddings = []
    for detection in detections.detections:
        patch = foup.extract_patch(
            img, detection, force_square=force_square, alpha=alpha
        )
        embedding = model.embed(patch)
        embeddings.append(embedding)

    if not embeddings:
        return None

    return np.stack(embeddings)


def _embed_patches_batch(
    model, img, detections, force_square, alpha, batch_size
):
    embeddings = []
    for detection_batch in fou.iter_batches(detections.detections, batch_size):
        patches = [
            foup.extract_patch(
                img, detection, force_square=force_square, alpha=alpha
            )
            for detection in detection_batch
        ]
        embeddings_batch = model.embed_all(patches)
        embeddings.append(embeddings_batch)

    return np.concatenate(embeddings)


def _embed_patches_data_loader(
    samples,
    model,
    patches_field,
    embeddings_field,
    force_square,
    alpha,
    handle_missing,
    batch_size,
    num_workers,
    skip_failures,
    progress,
):
    data_loader = _make_patch_data_loader(
        samples,
        model,
        patches_field,
        force_square,
        alpha,
        handle_missing,
        num_workers,
        skip_failures,
    )

    samples = _select_fields_for_patch_embeddings(samples, patches_field)

    if embeddings_field is not None:
        label_parser = _make_label_parser(samples, patches_field)
    else:
        embeddings_dict = {}

    with contextlib.ExitStack() as context:
        pb = context.enter_context(fou.ProgressBar(samples, progress=progress))
        if embeddings_field is not None:
            ctx = context.enter_context(foc.SaveContext(samples))

        for sample, patches in pb(zip(samples, data_loader)):
            embeddings = None

            try:
                if isinstance(patches, Exception):
                    raise patches

                if patches is not None:
                    embeddings = []
                    for patches_batch in fou.iter_slices(patches, batch_size):
                        embeddings_batch = model.embed_all(patches_batch)
                        embeddings.append(embeddings_batch)

                    embeddings = np.concatenate(embeddings)

            except Exception as e:
                if not skip_failures:
                    raise e

                logger.warning("Sample: %s\nError: %s\n", sample.id, e)

            if embeddings_field is not None:
                if embeddings is not None:
                    labels = label_parser(sample)
                    for label, embedding in zip(labels, embeddings):
                        label[embeddings_field] = embedding

                    ctx.save(sample)
            else:
                embeddings_dict[sample.id] = embeddings

    if embeddings_field is not None:
        return None

    return embeddings_dict


def _embed_frame_patches(
    samples,
    model,
    patches_field,
    embeddings_field,
    force_square,
    alpha,
    handle_missing,
    batch_size,
    skip_failures,
    progress,
):
    frame_counts, total_frame_count = _get_frame_counts(samples)
    is_clips = samples._dataset._is_clips
    _patches_field = samples._FRAMES_PREFIX + patches_field

    samples = _select_fields_for_patch_embeddings(samples, _patches_field)

    if embeddings_field is not None:
        label_parser = _make_label_parser(samples, _patches_field)
    else:
        embeddings_dict = {}

    with contextlib.ExitStack() as context:
        pb = context.enter_context(
            fou.ProgressBar(total=total_frame_count, progress=progress)
        )
        if embeddings_field is not None:
            ctx = context.enter_context(foc.SaveContext(samples))

        for idx, sample in enumerate(samples):
            if is_clips:
                frames = etaf.FrameRange(*sample.support)
            else:
                frames = None

            frame_embeddings_dict = {}

            try:
                with etav.FFmpegVideoReader(
                    sample.filepath, frames=frames
                ) as video_reader:
                    for img in video_reader:
                        frame_number = video_reader.frame_number
                        frame = sample.frames[frame_number]

                        embeddings = None

                        patches = foup.parse_patches(
                            frame, patches_field, handle_missing=handle_missing
                        )

                        if patches is not None:
                            if batch_size is None:
                                embeddings = _embed_patches_single(
                                    model, img, patches, force_square, alpha
                                )
                            else:
                                embeddings = _embed_patches_batch(
                                    model,
                                    img,
                                    patches,
                                    force_square,
                                    alpha,
                                    batch_size,
                                )

                        if embeddings_field is not None:
                            if embeddings is not None:
                                labels = label_parser(frame)
                                for label, embedding in zip(
                                    labels, embeddings
                                ):
                                    label[embeddings_field] = embedding
                        else:
                            frame_embeddings_dict[frame_number] = embeddings

                        pb.update()

            except Exception as e:
                if not skip_failures:
                    raise e

                logger.warning("Sample: %s\nError: %s\n", sample.id, e)

            if embeddings_field is not None:
                ctx.save(sample)
            else:
                embeddings_dict[sample.id] = frame_embeddings_dict

            # Explicitly set in case actual # frames differed from expected #
            pb.set_iteration(frame_counts[idx])

    if embeddings_field is not None:
        return None

    return embeddings_dict


def _make_patch_data_loader(
    samples,
    model,
    patches_field,
    force_square,
    alpha,
    handle_missing,
    num_workers,
    skip_failures,
):
    # This function supports DataLoaders that emit numpy arrays that can
    # therefore be used for non-Torch models; but we do not currently use this
    # functionality
    use_numpy = not isinstance(model, TorchModelMixin)

    num_workers = fout.recommend_num_workers(num_workers)

    dataset = fout.TorchImagePatchesDataset(
        samples=samples,
        patches_field=patches_field,
        handle_missing=handle_missing,
        transform=model.transforms,
        ragged_batches=model.ragged_batches,
        use_numpy=use_numpy,
        force_rgb=True,
        force_square=force_square,
        alpha=alpha,
        skip_failures=skip_failures,
    )

    return tud.DataLoader(
        dataset,
        batch_size=1,
        num_workers=num_workers,
        collate_fn=_patch_collate_fn,
    )


def _patch_collate_fn(batch):
    return batch[0]  # return patches directly


def _parse_batch_size(batch_size, model, use_data_loader):
    if batch_size is None:
        batch_size = fo.config.default_batch_size

    if batch_size is not None and batch_size > 1 and model.ragged_batches:
        logger.warning("Model does not support batching")
        batch_size = None

    if use_data_loader and batch_size is None:
        batch_size = 1

    return batch_size


def _select_fields_for_patch_embeddings(samples, patches_field):
    return samples.select_fields(patches_field)


def _make_label_parser(samples, patches_field):
    patches_attr, _ = samples._handle_frame_field(patches_field)
    label_type = samples._get_label_field_type(patches_field)
    is_list_field = issubclass(label_type, fol._HasLabelList)

    if not is_list_field:

        def parse_label(sample):
            label = sample[patches_attr]
            if label is None:
                return []

            if isinstance(label, list):
                return label

            return [label]

        return parse_label

    list_attr = label_type._LABEL_LIST_FIELD

    def parse_list_labels(sample):
        labels = sample[patches_attr]
        if labels is None:
            return []

        return labels[list_attr]

    return parse_list_labels


def load_model(model_config_dict, model_path=None, **kwargs):
    """Loads the model specified by the given :class:`ModelConfig` dict.

    Args:
        model_config_dict: a :class:`ModelConfig` dict
        model_path (None): an optional model path to inject into the
            ``model_path`` field of the model's ``Config`` instance, which must
            implement the ``eta.core.learning.HasPublishedModel`` interface.
            This is useful when working with a model whose weights are stored
            locally and do not need to be downloaded
        **kwargs: optional keyword arguments to inject into the model's
            ``Config`` instance

    Returns:
        a :class:`Model` instance
    """
    # Inject config args
    if kwargs:
        if model_config_dict["type"] == etau.get_class_name(foue.ETAModel):
            _merge_config(model_config_dict["config"]["config"], kwargs)
        else:
            _merge_config(model_config_dict["config"], kwargs)

    # Load model config
    config = ModelConfig.from_dict(model_config_dict)

    #
    # Inject model path
    #
    # Models must be implemented in one of the following ways in order for
    # us to know how to inject ``model_path``:
    #
    # (1) Their config implements ``eta.core.learning.HasPublishedModel``
    #
    # (2) Their config is an ``fiftyone.utils.eta.ETAModelConfig`` whose
    #     embedded config implements ``eta.core.learning.HasPublishedModel``
    #
    if model_path:
        if isinstance(config.config, etal.HasPublishedModel):
            config.config.model_name = None
            config.config.model_path = model_path
        elif isinstance(config.config, foue.ETAModelConfig) and isinstance(
            config.config.config, etal.HasPublishedModel
        ):
            config.config.config.model_name = None
            config.config.config.model_path = model_path
        else:
            raise ValueError(
                "Model config must implement the %s interface"
                % etal.HasPublishedModel
            )

    # Build model
    return config.build()


def _merge_config(d, kwargs):
    for k, v in kwargs.items():
        if k in d and isinstance(d[k], dict):
            d[k].update(v)
        else:
            d[k] = v


class ModelConfig(etal.ModelConfig):
    """Base configuration class that encapsulates the name of a :class:`Model`
    and an instance of its associated Config class.

    Args:
        type: the fully-qualified class name of the :class:`Model` subclass
        config: an instance of the Config class associated with the model
    """

    pass


class Model(etal.Model):
    """Abstract base class for models.

    This class declares the following conventions:

    (a)     :meth:`Model.__init__` should take a single ``config`` argument
            that is an instance of ``<Model>Config``

    (b)     Models implement the context manager interface. This means that
            models can optionally use context to perform any necessary setup
            and teardown, and so any code that builds a model should use the
            ``with`` syntax
    """

    def __enter__(self):
        return self

    def __exit__(self, *args):
        pass

    @property
    def media_type(self):
        """The media type processed by the model.

        Supported values are "image" and "video".
        """
        raise NotImplementedError("subclasses must implement media_type")

    @property
    def has_logits(self):
        """Whether this model can generate logits for its predictions.

        This method returns ``False`` by default. Models that can generate
        logits should override this via implementing the
        :class:`LogitsMixin` interface.
        """
        return False

    @property
    def has_embeddings(self):
        """Whether this model can generate embeddings.

        This method returns ``False`` by default. Models that can generate
        embeddings should override this via implementing the
        :class:`EmbeddingsMixin` interface.
        """
        return False

    @property
    def can_embed_prompts(self):
        """Whether this model can generate prompt embeddings.

        This method returns ``False`` by default. Models that can generate
        prompt embeddings should override this via implementing the
        :class:`PromptMixin` interface.
        """
        return False

    @property
    def ragged_batches(self):
        """True/False whether :meth:`transforms` may return tensors of
        different sizes. If True, then passing ragged lists of data to
        :meth:`predict_all` is not allowed.
        """
        raise NotImplementedError("subclasses must implement ragged_batches")

    @property
    def transforms(self):
        """The preprocessing function that will/must be applied to each input
        before prediction, or ``None`` if no preprocessing is performed.
        """
        raise NotImplementedError("subclasses must implement transforms")

    @property
    def preprocess(self):
        """Whether to apply :meth:`transforms` during inference (True) or to
        assume that they have already been applied (False).
        """
        raise NotImplementedError("subclasses must implement preprocess")

    @preprocess.setter
    def preprocess(self, value):
        raise NotImplementedError("subclasses must implement preprocess")

    def predict(self, arg):
        """Performs prediction on the given data.

        Image models should support, at minimum, processing ``arg`` values that
        are uint8 numpy arrays (HWC).

        Video models should support, at minimum, processing ``arg`` values that
        are ``eta.core.video.VideoReader`` instances.

        Args:
            arg: the data

        Returns:
            a :class:`fiftyone.core.labels.Label` instance or dict of
            :class:`fiftyone.core.labels.Label` instances containing the
            predictions
        """
        raise NotImplementedError("subclasses must implement predict()")

    def predict_all(self, args):
        """Performs prediction on the given iterable of data.

        Image models should support, at minimum, processing ``args`` values
        that are either lists of uint8 numpy arrays (HWC) or numpy array
        tensors (NHWC).

        Video models should support, at minimum, processing ``args`` values
        that are lists of ``eta.core.video.VideoReader`` instances.

        Subclasses can override this method to increase efficiency, but, by
        default, this method simply iterates over the data and applies
        :meth:`predict` to each.

        Args:
            args: an iterable of data

        Returns:
            a list of :class:`fiftyone.core.labels.Label` instances or a list
            of dicts of :class:`fiftyone.core.labels.Label` instances
            containing the predictions
        """
        return [self.predict(arg) for arg in args]


class LogitsMixin(object):
    """Mixin for :class:`Model` classes that can generate logits for their
    predictions.

    This mixin allows for the possibility that only some instances of a class
    are capable of generating logits, per the value of the
    :meth:`has_logits` property.
    """

    def __init__(self):
        self._store_logits = False

    @property
    def store_logits(self):
        """Whether the model should store logits in its predictions."""
        return self._store_logits

    @store_logits.setter
    def store_logits(self, flag):
        if flag and not self.has_logits:
            raise ValueError("This model cannot generate logits to store")

        self._store_logits = flag

    @property
    def has_logits(self):
        """Whether this model can generate logits."""
        raise NotImplementedError("subclasses must implement has_logits")


class EmbeddingsMixin(object):
    """Mixin for :class:`Model` classes that can generate embeddings for
    their predictions.

    This mixin allows for the possibility that only some instances of a class
    are capable of generating embeddings, per the value of the
    :meth:`has_embeddings` property.
    """

    @property
    def has_embeddings(self):
        """Whether this model has embeddings."""
        raise NotImplementedError("subclasses must implement has_embeddings")

    def get_embeddings(self):
        """Returns the embeddings generated by the last forward pass of the
        model.

        By convention, this method should always return an array whose first
        axis represents batch size (which will always be 1 when :meth:`predict`
        was last used).

        Returns:
            a numpy array containing the embedding(s)
        """
        raise NotImplementedError("subclasses must implement get_embeddings()")

    def embed(self, arg):
        """Generates an embedding for the given data.

        Subclasses can override this method to increase efficiency, but, by
        default, this method simply calls :meth:`predict` and then returns
        :meth:`get_embeddings`.

        Args:
            arg: the data. See :meth:`predict` for details

        Returns:
            a numpy array containing the embedding
        """
        # pylint: disable=no-member
        self.predict(arg)
        return self.get_embeddings()

    def embed_all(self, args):
        """Generates embeddings for the given iterable of data.

        Subclasses can override this method to increase efficiency, but, by
        default, this method simply iterates over the data and applies
        :meth:`embed` to each.

        Args:
            args: an iterable of data. See :meth:`predict_all` for details

        Returns:
            a numpy array containing the embeddings stacked along axis 0
        """
        return np.stack([self.embed(arg) for arg in args])


class PromptMixin(object):
    """Mixin for :class:`Model` classes that can generate prompt embeddings.

    This mixin allows for the possibility that only some instances of a class
    are capable of generating prompt embeddings, per the value of the
    :meth:`can_embed_prompts` property.
    """

    @property
    def can_embed_prompts(self):
        """Whether this model can generate prompt embeddings."""
        raise NotImplementedError(
            "subclasses must implement can_embed_prompts"
        )

    def embed_prompt(self, arg):
        """Generates an embedding for the given prompt.

        Args:
            arg: the prompt

        Returns:
            a numpy array containing the embedding
        """
        raise NotImplementedError("subclasses must implement embed_prompt")

    def embed_prompts(self, args):
        """Generates embeddings for the given prompts.

        Subclasses can override this method to increase efficiency, but, by
        default, this method simply iterates over the data and applies
        :meth:`embed_prompt` to each.

        Args:
            args: an iterable of prompts

        Returns:
            a numpy array containing the embeddings stacked along axis 0
        """
        return np.stack([self.embed_prompt(arg) for arg in args])


class SamplesMixin(object):
    """Mixin for :class:`Model` classes that need samples for prediction.

    Models can implement this mixin to declare that they require one or more
    fields of the current sample when performing inference on its media.

    The fields are get/set via :meth:`needs_fields`, which is a dict that maps
    model-specific keys to sample field names::

        model.needs_fields = {"key1": "field1", "key2": "field2", ...}
    """

    def __init__(self):
        self._fields = {}

    @property
    def needs_fields(self):
        """A dict mapping model-specific keys to sample field names."""
        return self._fields

    @needs_fields.setter
    def needs_fields(self, fields):
        self._fields = fields

    def predict(self, arg, sample=None):
        """Performs prediction on the given data.

        Image models should support, at minimum, processing ``arg`` values that
        are uint8 numpy arrays (HWC).

        Video models should support, at minimum, processing ``arg`` values that
        are ``eta.core.video.VideoReader`` instances.

        Args:
            arg: the data
            sample (None): the :class:`fiftyone.core.sample.Sample` associated
                with the data

        Returns:
            a :class:`fiftyone.core.labels.Label` instance or dict of
            :class:`fiftyone.core.labels.Label` instances containing the
            predictions
        """
        raise NotImplementedError("subclasses must implement predict()")

    def predict_all(self, args, samples=None):
        """Performs prediction on the given iterable of data.

        Image models should support, at minimum, processing ``args`` values
        that are either lists of uint8 numpy arrays (HWC) or numpy array
        tensors (NHWC).

        Video models should support, at minimum, processing ``args`` values
        that are lists of ``eta.core.video.VideoReader`` instances.

        Subclasses can override this method to increase efficiency, but, by
        default, this method simply iterates over the data and applies
        :meth:`predict` to each.

        Args:
            args: an iterable of data
            samples (None): an iterable of :class:`fiftyone.core.sample.Sample`
                instances associated with the data

        Returns:
            a list of :class:`fiftyone.core.labels.Label` instances or a list
            of dicts of :class:`fiftyone.core.labels.Label` instances
            containing the predictions
        """
        if samples is None:
            return [self.predict(arg) for arg in args]

        return [
            self.predict(arg, sample=sample)
            for arg, sample in zip(args, samples)
        ]


class TorchModelMixin(object):
    """Mixin for :class:`Model` classes that support feeding data for inference
    via a :class:`torch:torch.utils.data.DataLoader`.

    Models implementing this mixin must expose via their
    :meth:`Model.transforms` property the
    :mod:`torchvision:torchvision.transforms` function that will/must be
    applied to each input before prediction.
    """

    @property
    def has_collate_fn(self):
        """Whether this model has a custom collate function.

        Set this to ``True`` if you want :meth:`collate_fn` to be used during
        inference.
        """
        return False

    @staticmethod
    def collate_fn(batch):
        """The collate function to use when creating dataloaders for this
        model.

        In order to enable this functionality, the model's
        :meth:`has_collate_fn` property must return ``True``.

        By default, this is the identity function, but subclasses can override
        this method as necessary.

        Note that this function must be serializable so it is compatible
        with multiprocessing for dataloaders.

        Args:
            batch: a list of items to collate

        Returns:
            the collated batch, which will be fed directly to the model
        """
        return batch


class SupportsGetItem(object):
    """Mixin for models that support inference with
    :class:`fiftyone.utils.torch.FiftyOneTorchDataset`.

    Models that implement this mixin must implement
    :meth:`build_get_item` to build the :class:`fiftyone.utils.torch.GetItem`
    instance that defines how their data should be loaded by data loaders.
    """

    @property
    def required_keys(self):
        """The required keys that must be provided as parameters to methods
        like :func:`apply_model` and :func:`compute_embeddings` at runtime.
        """
        return self.build_get_item().required_keys

    def build_get_item(self, field_mapping=None):
        """Builds the :class:`fiftyone.utils.torch.GetItem` instance that
        defines how the model's data should be loaded by data loaders.

        Args:
            field_mapping (None): a user-provided dict mapping required keys to
                dataset field names

        Returns:
            a :class:`fiftyone.utils.torch.GetItem` instance
        """
        raise NotImplementedError("subclasses must implement build_get_item()")


class ModelManagerConfig(etam.ModelManagerConfig):
    """Config settings for a :class:`ModelManager`.

    Args:
        url (None): the URL of the file
        google_drive_id (None): the ID of the file in Google Drive
        extract_archive (None): whether to extract the downloaded model, which
            is assumed to be an archive
        delete_archive (None): whether to delete the archive after extracting
            it, if applicable
    """

    def __init__(self, d):
        super().__init__(d)

        self.url = self.parse_string(d, "url", default=None)
        self.google_drive_id = self.parse_string(
            d, "google_drive_id", default=None
        )


class ModelManager(etam.ModelManager):
    """Class for downloading FiftyOne models from the web."""

    @staticmethod
    def upload_model(model_path, *args, **kwargs):
        raise NotImplementedError("Uploading models via API is not supported")

    def _download_model(self, model_path):
        if self.config.google_drive_id:
            gid = self.config.google_drive_id
            logger.info("Downloading model from Google Drive ID '%s'...", gid)
            etaw.download_google_drive_file(gid, path=model_path)
        elif self.config.url:
            url = self.config.url
            logger.info("Downloading model from '%s'...", url)
            etaw.download_file(url, path=model_path)
        else:
            logger.info("This model's downloading is not managed by FiftyOne")

    def delete_model(self):
        raise NotImplementedError("Deleting models via API is not supported")
