<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.10.0" />
<title>silk.models.superpoint API documentation</title>
<meta name="description" content="The SuperPoint model, as subclassed from magicpoint.py.
SuperPoint adds a descriptor head to the MagicPoint model." />
<link rel="preload stylesheet" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/11.0.1/sanitize.min.css" integrity="sha256-PK9q560IAAa6WVRRh76LtCaI8pjTJ2z11v0miyNNjrs=" crossorigin>
<link rel="preload stylesheet" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/11.0.1/typography.min.css" integrity="sha256-7l/o7C8jubJiy74VsKTidCy1yBkRtiUGbVkYBylBqUg=" crossorigin>
<link rel="stylesheet preload" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/styles/github.min.css" crossorigin>
<style>:root{--highlight-color:#fe9}.flex{display:flex !important}body{line-height:1.5em}#content{padding:20px}#sidebar{padding:30px;overflow:hidden}#sidebar > *:last-child{margin-bottom:2cm}.http-server-breadcrumbs{font-size:130%;margin:0 0 15px 0}#footer{font-size:.75em;padding:5px 30px;border-top:1px solid #ddd;text-align:right}#footer p{margin:0 0 0 1em;display:inline-block}#footer p:last-child{margin-right:30px}h1,h2,h3,h4,h5{font-weight:300}h1{font-size:2.5em;line-height:1.1em}h2{font-size:1.75em;margin:1em 0 .50em 0}h3{font-size:1.4em;margin:25px 0 10px 0}h4{margin:0;font-size:105%}h1:target,h2:target,h3:target,h4:target,h5:target,h6:target{background:var(--highlight-color);padding:.2em 0}a{color:#058;text-decoration:none;transition:color .3s ease-in-out}a:hover{color:#e82}.title code{font-weight:bold}h2[id^="header-"]{margin-top:2em}.ident{color:#900}pre code{background:#f8f8f8;font-size:.8em;line-height:1.4em}code{background:#f2f2f1;padding:1px 4px;overflow-wrap:break-word}h1 code{background:transparent}pre{background:#f8f8f8;border:0;border-top:1px solid #ccc;border-bottom:1px solid #ccc;margin:1em 0;padding:1ex}#http-server-module-list{display:flex;flex-flow:column}#http-server-module-list div{display:flex}#http-server-module-list dt{min-width:10%}#http-server-module-list p{margin-top:0}.toc ul,#index{list-style-type:none;margin:0;padding:0}#index code{background:transparent}#index h3{border-bottom:1px solid #ddd}#index ul{padding:0}#index h4{margin-top:.6em;font-weight:bold}@media (min-width:200ex){#index .two-column{column-count:2}}@media (min-width:300ex){#index .two-column{column-count:3}}dl{margin-bottom:2em}dl dl:last-child{margin-bottom:4em}dd{margin:0 0 1em 3em}#header-classes + dl > dd{margin-bottom:3em}dd dd{margin-left:2em}dd p{margin:10px 0}.name{background:#eee;font-weight:bold;font-size:.85em;padding:5px 10px;display:inline-block;min-width:40%}.name:hover{background:#e0e0e0}dt:target .name{background:var(--highlight-color)}.name > span:first-child{white-space:nowrap}.name.class > span:nth-child(2){margin-left:.4em}.inherited{color:#999;border-left:5px solid #eee;padding-left:1em}.inheritance em{font-style:normal;font-weight:bold}.desc h2{font-weight:400;font-size:1.25em}.desc h3{font-size:1em}.desc dt code{background:inherit}.source summary,.git-link-div{color:#666;text-align:right;font-weight:400;font-size:.8em;text-transform:uppercase}.source summary > *{white-space:nowrap;cursor:pointer}.git-link{color:inherit;margin-left:1em}.source pre{max-height:500px;overflow:auto;margin:0}.source pre code{font-size:12px;overflow:visible}.hlist{list-style:none}.hlist li{display:inline}.hlist li:after{content:',\2002'}.hlist li:last-child:after{content:none}.hlist .hlist{display:inline;padding-left:1em}img{max-width:100%}td{padding:0 .5em}.admonition{padding:.1em .5em;margin-bottom:1em}.admonition-title{font-weight:bold}.admonition.note,.admonition.info,.admonition.important{background:#aef}.admonition.todo,.admonition.versionadded,.admonition.tip,.admonition.hint{background:#dfd}.admonition.warning,.admonition.versionchanged,.admonition.deprecated{background:#fd4}.admonition.error,.admonition.danger,.admonition.caution{background:lightpink}</style>
<style media="screen and (min-width: 700px)">@media screen and (min-width:700px){#sidebar{width:30%;height:100vh;overflow:auto;position:sticky;top:0}#content{width:70%;max-width:100ch;padding:3em 4em;border-left:1px solid #ddd}pre code{font-size:1em}.item .name{font-size:1em}main{display:flex;flex-direction:row-reverse;justify-content:flex-end}.toc ul ul,#index ul{padding-left:1.5em}.toc > ul > li{margin-top:.5em}}</style>
<style media="print">@media print{#sidebar h1{page-break-before:always}.source{display:none}}@media print{*{background:transparent !important;color:#000 !important;box-shadow:none !important;text-shadow:none !important}a[href]:after{content:" (" attr(href) ")";font-size:90%}a[href][title]:after{content:none}abbr[title]:after{content:" (" attr(title) ")"}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100% !important}@page{margin:0.5cm}p,h2,h3{orphans:3;widows:3}h1,h2,h3,h4,h5,h6{page-break-after:avoid}}</style>
<script defer src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/highlight.min.js" integrity="sha256-Uv3H6lx7dJmRfRvH8TH6kJD1TSK1aFcwgx+mdg3epi8=" crossorigin></script>
<script>window.addEventListener('DOMContentLoaded', () => hljs.initHighlighting())</script>
</head>
<body>
<main>
<article id="content">
<header>
<h1 class="title">Module <code>silk.models.superpoint</code></h1>
</header>
<section id="section-intro">
<p>The SuperPoint model, as subclassed from magicpoint.py.
SuperPoint adds a descriptor head to the MagicPoint model.</p>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python"># Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.

# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.

&#34;&#34;&#34;
The SuperPoint model, as subclassed from magicpoint.py.
SuperPoint adds a descriptor head to the MagicPoint model.
&#34;&#34;&#34;

from typing import Any, Dict, Iterable, Union, Optional

import pytorch_lightning as pl
import torch
from silk.backbones.superpoint.utils import space_to_depth, positions_to_label_map
from silk.config.core import ensure_is_instance
from silk.config.optimizer import Spec
from silk.flow import AutoForward, Flow
from silk.losses.superpoint import build_similarity_mask, DescriptorLoss, KeypointLoss
from silk.models.abstract import OptimizersHandler, StateDictRedirect
from silk.models.magicpoint import HomographyAdaptation
from silk.transforms.abstract import NamedContext, Transform
from silk.transforms.cv.homography import RandomHomographicSampler


class SuperPoint(
    OptimizersHandler,
    AutoForward,
    StateDictRedirect,
    pl.LightningModule,
    HomographyAdaptation,
):
    DEFAULT_DESCRIPTOR_LOSS = DescriptorLoss()
    DEFAULT_KEYPOINT_LOSS = KeypointLoss()

    def __init__(
        self,
        model,
        optimizer_spec: Union[Spec, None] = None,
        image_aug_transform: Union[Transform, None] = None,
        warp_original: bool = False,
        descriptor_loss=DEFAULT_DESCRIPTOR_LOSS,
        detection_loss=DEFAULT_KEYPOINT_LOSS,
        lamdba_descriptor_loss: float = 0.0001,
        training_random_homography_kwargs: Union[Dict[str, Any], None] = None,
        random_homographic_adaptation_kwargs: Union[Dict[str, Any], None] = None,
        default_outputs: Union[str, Iterable[str]] = (&#34;coarse_descriptors&#34;, &#34;logits&#34;),
        **kwargs,
    ):
        &#34;&#34;&#34;Initialize the SuperPoint model.

        Assumes an RGB image with 1 color channel (grayscale image).

        Parameters
        ----------
        optimizer_spec : Spec
            Optimizer spec to use for training.
        image_aug_transform : Union[Transform, None], optional
            Transform to apply to every warped images used during training.
        warp_original : bool, optional
            Warps original image during training, by default False
        lamdba_descriptor_loss : float, optional
            Descriptor loss weight, by default 0.0001
        random_homographic_adaptation_kwargs : Union[Dict[str, Any], None]
            Parameters passed to `RandomHomographicSampler` (used during homographic adaptation)
        training_random_homography_kwargs: Union[Dict[str, Any], None]
            Parameters passed to `RandomHomographicSampler` (used during training)
        &#34;&#34;&#34;

        OptimizersHandler.__init__(self, optimizer_spec)
        pl.LightningModule.__init__(self, **kwargs)
        StateDictRedirect.__init__(self, model)
        AutoForward.__init__(self, Flow(&#34;batch&#34;, &#34;use_image_aug&#34;), default_outputs)
        HomographyAdaptation.__init__(
            self,
            random_homographic_adaptation_kwargs,
            self._get_scores,
            model.magicpoint._detection_threshold,
            model.magicpoint._nms_dist,
            model.magicpoint._border_dist,
        )

        self._model = model
        self._cell_size = self.model.magicpoint._cell_size
        self._detection_loss = detection_loss
        self._descriptor_loss = descriptor_loss

        self._lamdba_descriptor_loss = lamdba_descriptor_loss
        self._image_aug_transform = image_aug_transform

        # homographic sampler arguments
        self._training_random_homography_kwargs = (
            {}
            if training_random_homography_kwargs is None
            else training_random_homography_kwargs
        )

        self._warp_original = warp_original

        self.flow.define_transition(&#34;checked_batch&#34;, self._check_batch, &#34;batch&#34;)
        self.flow.define_transition(
            (&#34;images&#34;, &#34;image_shape&#34;),
            self._get_images,
            &#34;checked_batch&#34;,
        )
        self.flow.define_transition(&#34;positions&#34;, self._get_positions, &#34;checked_batch&#34;)
        self.flow.define_transition(
            (&#34;sampler&#34;, &#34;warped_images&#34;),
            self._warp_images,
            &#34;images&#34;,
        )
        self.flow.define_transition(
            &#34;augmented_images&#34;,
            self._aug_images,
            &#34;warped_images&#34;,
            &#34;use_image_aug&#34;,
        )
        self.flow.define_transition(
            &#34;warped_positions&#34;,
            self._warp_positions,
            &#34;sampler&#34;,
            &#34;positions&#34;,
            &#34;image_shape&#34;,
        )
        self.flow.define_transition(
            &#34;labels&#34;,
            self._get_labels,
            &#34;warped_positions&#34;,
            &#34;image_shape&#34;,
        )
        self.flow.define_transition(
            (&#34;descriptors&#34;, &#34;logits&#34;),
            self._model.forward_flow,
            outputs=Flow.Constant((&#34;coarse_descriptors&#34;, &#34;logits&#34;)),
            images=&#34;augmented_images&#34;,
        )
        self.flow.define_transition(
            (&#34;positions_0&#34;, &#34;positions_1&#34;),
            self._split_batch_dim,
            &#34;warped_positions&#34;,
        )
        self.flow.define_transition(
            (&#34;descriptors_0&#34;, &#34;descriptors_1&#34;),
            self._split_batch_dim,
            &#34;descriptors&#34;,
        )
        self.flow.define_transition(
            &#34;similarity_mask&#34;,
            build_similarity_mask,
            descriptors=&#34;descriptors&#34;,
            positions_0=&#34;positions_0&#34;,
            positions_1=&#34;positions_1&#34;,
            cell_size=Flow.Constant(self._cell_size),
        )

        # compute losses
        self.flow.define_transition(
            &#34;detection_loss&#34;,
            self._detection_loss,
            &#34;logits&#34;,
            &#34;labels&#34;,
        )
        self.flow.define_transition(
            &#34;descriptor_loss&#34;,
            self._descriptor_loss,
            &#34;descriptors_0&#34;,
            &#34;descriptors_1&#34;,
            &#34;similarity_mask&#34;,
        )

        self._batch_to_both_losses = self.flow.with_outputs(
            (&#34;detection_loss&#34;, &#34;descriptor_loss&#34;)
        )

    def _get_scores(self, images):
        images = images.to(self.device)
        return self._model.forward_flow(&#34;score&#34;, images)

    @property
    def model(self):
        return self._model

    def model_forward_flow(self, *args, **kwargs):
        return self._model.forward_flow(*args, **kwargs)

    def _check_batch(self, batch):
        # check batch
        ensure_is_instance(batch, NamedContext)

        if self.training:
            batch.ensure_exists(&#34;image&#34;, &#34;positions&#34;)
        else:
            batch.ensure_exists(&#34;image&#34;)

        def to_device(el):
            if isinstance(el, torch.Tensor):
                return el.to(self.device)
            elif isinstance(el, list):
                return [e.to(self.device) for e in el]
            raise RuntimeError(f&#34;type {type(el)} not handled&#34;)

        # send data to model&#39;s device
        batch = batch.map(to_device)

        return batch

    def _split_batch_dim(self, tensor):
        return tensor[0::2], tensor[1::2]

    def _get_images(self, batch):
        assert isinstance(batch[&#34;image&#34;], torch.Tensor)

        # check data shape
        shape = batch[&#34;image&#34;].shape
        assert len(shape) == 4
        assert shape[1] == 1
        assert shape[2] % self._cell_size == 0
        assert shape[3] % self._cell_size == 0

        return batch[&#34;image&#34;], shape

    def _get_labels(self, positions, shape):
        assert isinstance(positions, list)

        # get label map of sampled images
        label_map = positions_to_label_map(positions, shape[-2:])
        label_map = label_map.permute(0, 3, 1, 2)

        # get labels by adding dustbin and conver cells to depth
        label_map = space_to_depth(label_map, self._cell_size)

        return label_map

    def _get_positions(self, batch):
        # remove confidence value
        return [p[..., :2] for p in batch[&#34;positions&#34;]]

    def _warp_images(self, images):
        shape = images.shape

        # apply two homographic transforms to each input images
        sampler = RandomHomographicSampler(
            (2 if self._warp_original else 1) * shape[0],
            shape[-2:],
            device=images.device,
            **self._training_random_homography_kwargs,
        )

        warped_images = sampler.forward_sampling(images)

        if self._warp_original:
            images = warped_images
        else:
            images = torch.stack((images, warped_images), dim=1)
            images = images.view((-1,) + shape[1:])

        return sampler, images

    def _warp_positions(self, sampler, positions, images_shape):
        # transform label positions to transformed image space
        warped_positions = sampler.transform_points(
            positions,
            image_shape=images_shape[-2:],
            direction=&#34;forward&#34;,
            ordering=&#34;yx&#34;,
        )

        if self._warp_original:
            positions = warped_positions
        else:
            new_positions = []
            for p0, p1 in zip(positions, warped_positions):
                new_positions.extend((p0, p1))
            positions = new_positions

        return positions

    def _aug_images(self, images, use_image_aug):
        if use_image_aug:
            images = self._image_aug_transform(images)
        return images

    def _total_loss(
        self,
        mode,
        batch,
        use_image_aug,
    ):
        detection_loss, descriptor_loss = self._batch_to_both_losses(
            batch, use_image_aug
        )

        total_loss = detection_loss + self._lamdba_descriptor_loss * descriptor_loss

        self.log(f&#34;{mode}.detection.loss&#34;, detection_loss)
        self.log(f&#34;{mode}.descriptor.loss&#34;, descriptor_loss)
        self.log(f&#34;{mode}.total.loss&#34;, total_loss)

        return total_loss

    def training_step(self, batch, batch_idx):
        return self._total_loss(&#34;train&#34;, batch, use_image_aug=True)

    def validation_step(self, batch, batch_idx):
        return self._total_loss(&#34;val&#34;, batch, use_image_aug=False)

    def predict_step(
        self,
        batch: NamedContext,
        batch_idx: Optional[int] = None,
        dataloader_idx: Optional[int] = None,
    ) -&gt; Any:
        images = self.flow.with_outputs(&#34;images&#34;)(batch)
        points = self._model.flow.with_outputs(&#34;positions&#34;)(images)
        return batch.add(&#34;points&#34;, points)</code></pre>
</details>
</section>
<section>
</section>
<section>
</section>
<section>
</section>
<section>
<h2 class="section-title" id="header-classes">Classes</h2>
<dl>
<dt id="silk.models.superpoint.SuperPoint"><code class="flex name class">
<span>class <span class="ident">SuperPoint</span></span>
<span>(</span><span>model, optimizer_spec: Optional[<a title="silk.config.optimizer.Spec" href="../config/optimizer.html#silk.config.optimizer.Spec">Spec</a>] = None, image_aug_transform: Optional[<a title="silk.transforms.abstract.Transform" href="../transforms/abstract.html#silk.transforms.abstract.Transform">Transform</a>] = None, warp_original: bool = False, descriptor_loss=DescriptorLoss(), detection_loss=KeypointLoss(), lamdba_descriptor_loss: float = 0.0001, training_random_homography_kwargs: Optional[Dict[str, Any]] = None, random_homographic_adaptation_kwargs: Optional[Dict[str, Any]] = None, default_outputs: Union[str, Iterable[str]] = ('coarse_descriptors', 'logits'), **kwargs)</span>
</code></dt>
<dd>
<div class="desc"><p>Automate the most common pattern of optimizer creation.
This pattern consists of one optimizer per model.</p>
<h2 id="examples">Examples</h2>
<pre><code class="language-python">class MyCustomModel(OptimizersHandler, pl.LightningModule):
    def __init__(self, optimizer_spec, **kwargs):
        OptimizersHandler.__init__(self, optimizer_spec)
        pl.LightningModule.__init__(self, **kwargs)
        ...
</code></pre>
<p>This will automatically equip <code>MyCustomModel</code> with the <code>configure_optimizers</code> method required by Pytorch Lightning.
Notice how <code>OptimizersHandler</code> is before <code>pl.LightningModule</code> in the list of base classes.
This is necessary since <code>pl.LightningModule</code> checks if the current class has a method called <code>configure_optimizers</code>.</p>
<pre><code class="language-python">class MyCustomModel(OptimizersHandler, pl.LightningModule):
    def __init__(self, optimizer_spec_A, optimizer_spec_B, **kwargs):
        self.submodel_A = ModelA(...)
        self.submodel_B = ModelB(...)

        OptimizersHandler.__init__(self,
            MultiSpec(optimizer_spec_A, optimizer_spec_B),
            self.submodel_A, self.submodel_B
        )
        pl.LightningModule.__init__(self, **kwargs)
        ...
</code></pre>
<p>In this case, two optimizers will be automatically created and attached to their relative model.</p>
<p>Initialize the SuperPoint model.</p>
<p>Assumes an RGB image with 1 color channel (grayscale image).</p>
<h2 id="parameters">Parameters</h2>
<dl>
<dt><strong><code>optimizer_spec</code></strong> :&ensp;<code>Spec</code></dt>
<dd>Optimizer spec to use for training.</dd>
<dt><strong><code>image_aug_transform</code></strong> :&ensp;<code>Union[Transform, None]</code>, optional</dt>
<dd>Transform to apply to every warped images used during training.</dd>
<dt><strong><code>warp_original</code></strong> :&ensp;<code>bool</code>, optional</dt>
<dd>Warps original image during training, by default False</dd>
<dt><strong><code>lamdba_descriptor_loss</code></strong> :&ensp;<code>float</code>, optional</dt>
<dd>Descriptor loss weight, by default 0.0001</dd>
<dt><strong><code>random_homographic_adaptation_kwargs</code></strong> :&ensp;<code>Union[Dict[str, Any], None]</code></dt>
<dd>Parameters passed to <code>RandomHomographicSampler</code> (used during homographic adaptation)</dd>
<dt><strong><code>training_random_homography_kwargs</code></strong> :&ensp;<code>Union[Dict[str, Any], None]</code></dt>
<dd>Parameters passed to <code>RandomHomographicSampler</code> (used during training)</dd>
</dl></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">class SuperPoint(
    OptimizersHandler,
    AutoForward,
    StateDictRedirect,
    pl.LightningModule,
    HomographyAdaptation,
):
    DEFAULT_DESCRIPTOR_LOSS = DescriptorLoss()
    DEFAULT_KEYPOINT_LOSS = KeypointLoss()

    def __init__(
        self,
        model,
        optimizer_spec: Union[Spec, None] = None,
        image_aug_transform: Union[Transform, None] = None,
        warp_original: bool = False,
        descriptor_loss=DEFAULT_DESCRIPTOR_LOSS,
        detection_loss=DEFAULT_KEYPOINT_LOSS,
        lamdba_descriptor_loss: float = 0.0001,
        training_random_homography_kwargs: Union[Dict[str, Any], None] = None,
        random_homographic_adaptation_kwargs: Union[Dict[str, Any], None] = None,
        default_outputs: Union[str, Iterable[str]] = (&#34;coarse_descriptors&#34;, &#34;logits&#34;),
        **kwargs,
    ):
        &#34;&#34;&#34;Initialize the SuperPoint model.

        Assumes an RGB image with 1 color channel (grayscale image).

        Parameters
        ----------
        optimizer_spec : Spec
            Optimizer spec to use for training.
        image_aug_transform : Union[Transform, None], optional
            Transform to apply to every warped images used during training.
        warp_original : bool, optional
            Warps original image during training, by default False
        lamdba_descriptor_loss : float, optional
            Descriptor loss weight, by default 0.0001
        random_homographic_adaptation_kwargs : Union[Dict[str, Any], None]
            Parameters passed to `RandomHomographicSampler` (used during homographic adaptation)
        training_random_homography_kwargs: Union[Dict[str, Any], None]
            Parameters passed to `RandomHomographicSampler` (used during training)
        &#34;&#34;&#34;

        OptimizersHandler.__init__(self, optimizer_spec)
        pl.LightningModule.__init__(self, **kwargs)
        StateDictRedirect.__init__(self, model)
        AutoForward.__init__(self, Flow(&#34;batch&#34;, &#34;use_image_aug&#34;), default_outputs)
        HomographyAdaptation.__init__(
            self,
            random_homographic_adaptation_kwargs,
            self._get_scores,
            model.magicpoint._detection_threshold,
            model.magicpoint._nms_dist,
            model.magicpoint._border_dist,
        )

        self._model = model
        self._cell_size = self.model.magicpoint._cell_size
        self._detection_loss = detection_loss
        self._descriptor_loss = descriptor_loss

        self._lamdba_descriptor_loss = lamdba_descriptor_loss
        self._image_aug_transform = image_aug_transform

        # homographic sampler arguments
        self._training_random_homography_kwargs = (
            {}
            if training_random_homography_kwargs is None
            else training_random_homography_kwargs
        )

        self._warp_original = warp_original

        self.flow.define_transition(&#34;checked_batch&#34;, self._check_batch, &#34;batch&#34;)
        self.flow.define_transition(
            (&#34;images&#34;, &#34;image_shape&#34;),
            self._get_images,
            &#34;checked_batch&#34;,
        )
        self.flow.define_transition(&#34;positions&#34;, self._get_positions, &#34;checked_batch&#34;)
        self.flow.define_transition(
            (&#34;sampler&#34;, &#34;warped_images&#34;),
            self._warp_images,
            &#34;images&#34;,
        )
        self.flow.define_transition(
            &#34;augmented_images&#34;,
            self._aug_images,
            &#34;warped_images&#34;,
            &#34;use_image_aug&#34;,
        )
        self.flow.define_transition(
            &#34;warped_positions&#34;,
            self._warp_positions,
            &#34;sampler&#34;,
            &#34;positions&#34;,
            &#34;image_shape&#34;,
        )
        self.flow.define_transition(
            &#34;labels&#34;,
            self._get_labels,
            &#34;warped_positions&#34;,
            &#34;image_shape&#34;,
        )
        self.flow.define_transition(
            (&#34;descriptors&#34;, &#34;logits&#34;),
            self._model.forward_flow,
            outputs=Flow.Constant((&#34;coarse_descriptors&#34;, &#34;logits&#34;)),
            images=&#34;augmented_images&#34;,
        )
        self.flow.define_transition(
            (&#34;positions_0&#34;, &#34;positions_1&#34;),
            self._split_batch_dim,
            &#34;warped_positions&#34;,
        )
        self.flow.define_transition(
            (&#34;descriptors_0&#34;, &#34;descriptors_1&#34;),
            self._split_batch_dim,
            &#34;descriptors&#34;,
        )
        self.flow.define_transition(
            &#34;similarity_mask&#34;,
            build_similarity_mask,
            descriptors=&#34;descriptors&#34;,
            positions_0=&#34;positions_0&#34;,
            positions_1=&#34;positions_1&#34;,
            cell_size=Flow.Constant(self._cell_size),
        )

        # compute losses
        self.flow.define_transition(
            &#34;detection_loss&#34;,
            self._detection_loss,
            &#34;logits&#34;,
            &#34;labels&#34;,
        )
        self.flow.define_transition(
            &#34;descriptor_loss&#34;,
            self._descriptor_loss,
            &#34;descriptors_0&#34;,
            &#34;descriptors_1&#34;,
            &#34;similarity_mask&#34;,
        )

        self._batch_to_both_losses = self.flow.with_outputs(
            (&#34;detection_loss&#34;, &#34;descriptor_loss&#34;)
        )

    def _get_scores(self, images):
        images = images.to(self.device)
        return self._model.forward_flow(&#34;score&#34;, images)

    @property
    def model(self):
        return self._model

    def model_forward_flow(self, *args, **kwargs):
        return self._model.forward_flow(*args, **kwargs)

    def _check_batch(self, batch):
        # check batch
        ensure_is_instance(batch, NamedContext)

        if self.training:
            batch.ensure_exists(&#34;image&#34;, &#34;positions&#34;)
        else:
            batch.ensure_exists(&#34;image&#34;)

        def to_device(el):
            if isinstance(el, torch.Tensor):
                return el.to(self.device)
            elif isinstance(el, list):
                return [e.to(self.device) for e in el]
            raise RuntimeError(f&#34;type {type(el)} not handled&#34;)

        # send data to model&#39;s device
        batch = batch.map(to_device)

        return batch

    def _split_batch_dim(self, tensor):
        return tensor[0::2], tensor[1::2]

    def _get_images(self, batch):
        assert isinstance(batch[&#34;image&#34;], torch.Tensor)

        # check data shape
        shape = batch[&#34;image&#34;].shape
        assert len(shape) == 4
        assert shape[1] == 1
        assert shape[2] % self._cell_size == 0
        assert shape[3] % self._cell_size == 0

        return batch[&#34;image&#34;], shape

    def _get_labels(self, positions, shape):
        assert isinstance(positions, list)

        # get label map of sampled images
        label_map = positions_to_label_map(positions, shape[-2:])
        label_map = label_map.permute(0, 3, 1, 2)

        # get labels by adding dustbin and conver cells to depth
        label_map = space_to_depth(label_map, self._cell_size)

        return label_map

    def _get_positions(self, batch):
        # remove confidence value
        return [p[..., :2] for p in batch[&#34;positions&#34;]]

    def _warp_images(self, images):
        shape = images.shape

        # apply two homographic transforms to each input images
        sampler = RandomHomographicSampler(
            (2 if self._warp_original else 1) * shape[0],
            shape[-2:],
            device=images.device,
            **self._training_random_homography_kwargs,
        )

        warped_images = sampler.forward_sampling(images)

        if self._warp_original:
            images = warped_images
        else:
            images = torch.stack((images, warped_images), dim=1)
            images = images.view((-1,) + shape[1:])

        return sampler, images

    def _warp_positions(self, sampler, positions, images_shape):
        # transform label positions to transformed image space
        warped_positions = sampler.transform_points(
            positions,
            image_shape=images_shape[-2:],
            direction=&#34;forward&#34;,
            ordering=&#34;yx&#34;,
        )

        if self._warp_original:
            positions = warped_positions
        else:
            new_positions = []
            for p0, p1 in zip(positions, warped_positions):
                new_positions.extend((p0, p1))
            positions = new_positions

        return positions

    def _aug_images(self, images, use_image_aug):
        if use_image_aug:
            images = self._image_aug_transform(images)
        return images

    def _total_loss(
        self,
        mode,
        batch,
        use_image_aug,
    ):
        detection_loss, descriptor_loss = self._batch_to_both_losses(
            batch, use_image_aug
        )

        total_loss = detection_loss + self._lamdba_descriptor_loss * descriptor_loss

        self.log(f&#34;{mode}.detection.loss&#34;, detection_loss)
        self.log(f&#34;{mode}.descriptor.loss&#34;, descriptor_loss)
        self.log(f&#34;{mode}.total.loss&#34;, total_loss)

        return total_loss

    def training_step(self, batch, batch_idx):
        return self._total_loss(&#34;train&#34;, batch, use_image_aug=True)

    def validation_step(self, batch, batch_idx):
        return self._total_loss(&#34;val&#34;, batch, use_image_aug=False)

    def predict_step(
        self,
        batch: NamedContext,
        batch_idx: Optional[int] = None,
        dataloader_idx: Optional[int] = None,
    ) -&gt; Any:
        images = self.flow.with_outputs(&#34;images&#34;)(batch)
        points = self._model.flow.with_outputs(&#34;positions&#34;)(images)
        return batch.add(&#34;points&#34;, points)</code></pre>
</details>
<h3>Ancestors</h3>
<ul class="hlist">
<li><a title="silk.models.abstract.OptimizersHandler" href="abstract.html#silk.models.abstract.OptimizersHandler">OptimizersHandler</a></li>
<li><a title="silk.flow.AutoForward" href="../flow.html#silk.flow.AutoForward">AutoForward</a></li>
<li><a title="silk.models.abstract.StateDictRedirect" href="abstract.html#silk.models.abstract.StateDictRedirect">StateDictRedirect</a></li>
<li>pytorch_lightning.core.lightning.LightningModule</li>
<li>pytorch_lightning.core.mixins.device_dtype_mixin.DeviceDtypeModuleMixin</li>
<li>pytorch_lightning.core.mixins.hparams_mixin.HyperparametersMixin</li>
<li>pytorch_lightning.core.saving.ModelIO</li>
<li>pytorch_lightning.core.hooks.ModelHooks</li>
<li>pytorch_lightning.core.hooks.DataHooks</li>
<li>pytorch_lightning.core.hooks.CheckpointHooks</li>
<li>torch.nn.modules.module.Module</li>
<li><a title="silk.models.magicpoint.HomographyAdaptation" href="magicpoint.html#silk.models.magicpoint.HomographyAdaptation">HomographyAdaptation</a></li>
</ul>
<h3>Class variables</h3>
<dl>
<dt id="silk.models.superpoint.SuperPoint.DEFAULT_DESCRIPTOR_LOSS"><code class="name">var <span class="ident">DEFAULT_DESCRIPTOR_LOSS</span></code></dt>
<dd>
<div class="desc"></div>
</dd>
<dt id="silk.models.superpoint.SuperPoint.DEFAULT_KEYPOINT_LOSS"><code class="name">var <span class="ident">DEFAULT_KEYPOINT_LOSS</span></code></dt>
<dd>
<div class="desc"></div>
</dd>
<dt id="silk.models.superpoint.SuperPoint.dump_patches"><code class="name">var <span class="ident">dump_patches</span> : bool</code></dt>
<dd>
<div class="desc"></div>
</dd>
<dt id="silk.models.superpoint.SuperPoint.training"><code class="name">var <span class="ident">training</span> : bool</code></dt>
<dd>
<div class="desc"></div>
</dd>
</dl>
<h3>Instance variables</h3>
<dl>
<dt id="silk.models.superpoint.SuperPoint.model"><code class="name">var <span class="ident">model</span></code></dt>
<dd>
<div class="desc"></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">@property
def model(self):
    return self._model</code></pre>
</details>
</dd>
</dl>
<h3>Methods</h3>
<dl>
<dt id="silk.models.superpoint.SuperPoint.forward"><code class="name flex">
<span>def <span class="ident">forward</span></span>(<span>self, *args, **kwargs) ‑> Callable[..., Any]</span>
</code></dt>
<dd>
<div class="desc"></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">def forward(self, *args, **kwargs):
    if self._forward_flow is None:
        self._forward_flow = self._flow.with_outputs(self._default_outputs)
    return self._forward_flow(*args, **kwargs)</code></pre>
</details>
</dd>
<dt id="silk.models.superpoint.SuperPoint.model_forward_flow"><code class="name flex">
<span>def <span class="ident">model_forward_flow</span></span>(<span>self, *args, **kwargs)</span>
</code></dt>
<dd>
<div class="desc"></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">def model_forward_flow(self, *args, **kwargs):
    return self._model.forward_flow(*args, **kwargs)</code></pre>
</details>
</dd>
<dt id="silk.models.superpoint.SuperPoint.predict_step"><code class="name flex">
<span>def <span class="ident">predict_step</span></span>(<span>self, batch: <a title="silk.transforms.abstract.NamedContext" href="../transforms/abstract.html#silk.transforms.abstract.NamedContext">NamedContext</a>, batch_idx: Optional[int] = None, dataloader_idx: Optional[int] = None) ‑> Any</span>
</code></dt>
<dd>
<div class="desc"><p>Step function called during :meth:<code>~pytorch_lightning.trainer.trainer.Trainer.predict</code>. By default, it
calls :meth:<code>~pytorch_lightning.core.lightning.LightningModule.forward</code>. Override to add any processing
logic.</p>
<p>The :meth:<code>~pytorch_lightning.core.lightning.LightningModule.predict_step</code> is used
to scale inference on multi-devices.</p>
<p>To prevent an OOM error, it is possible to use :class:<code>~pytorch_lightning.callbacks.BasePredictionWriter</code>
callback to write the predictions to disk or database after each batch or on epoch end.</p>
<p>The :class:<code>~pytorch_lightning.callbacks.BasePredictionWriter</code> should be used while using a spawn
based accelerator. This happens for <code>Trainer(strategy="ddp_spawn")</code>
or training on 8 TPU cores with <code>Trainer(tpu_cores=8)</code> as predictions won't be returned.</p>
<p>Example ::</p>
<pre><code>class MyModel(LightningModule):

    def predicts_step(self, batch, batch_idx, dataloader_idx):
        return self(batch)

dm = ...
model = MyModel()
trainer = Trainer(gpus=2)
predictions = trainer.predict(model, dm)
</code></pre>
<h2 id="args">Args</h2>
<dl>
<dt><strong><code>batch</code></strong></dt>
<dd>Current batch</dd>
<dt><strong><code>batch_idx</code></strong></dt>
<dd>Index of current batch</dd>
<dt><strong><code>dataloader_idx</code></strong></dt>
<dd>Index of the current dataloader</dd>
</dl>
<h2 id="return">Return</h2>
<p>Predicted output</p></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">def predict_step(
    self,
    batch: NamedContext,
    batch_idx: Optional[int] = None,
    dataloader_idx: Optional[int] = None,
) -&gt; Any:
    images = self.flow.with_outputs(&#34;images&#34;)(batch)
    points = self._model.flow.with_outputs(&#34;positions&#34;)(images)
    return batch.add(&#34;points&#34;, points)</code></pre>
</details>
</dd>
<dt id="silk.models.superpoint.SuperPoint.training_step"><code class="name flex">
<span>def <span class="ident">training_step</span></span>(<span>self, batch, batch_idx)</span>
</code></dt>
<dd>
<div class="desc"><p>Here you compute and return the training loss and some additional metrics for e.g.
the progress bar or logger.</p>
<h2 id="args">Args</h2>
<p>batch (:class:<code>~torch.Tensor</code> | (:class:<code>~torch.Tensor</code>, &hellip;) | [:class:<code>~torch.Tensor</code>, &hellip;]):
The output of your :class:<code>~torch.utils.data.DataLoader</code>. A tensor, tuple or list.
batch_idx (<code>int</code>): Integer displaying index of this batch
optimizer_idx (<code>int</code>): When using multiple optimizers, this argument will also be present.
hiddens (<code>Any</code>): Passed in if
:paramref:<code>~pytorch_lightning.core.lightning.LightningModule.truncated_bptt_steps</code> &gt; 0.</p>
<h2 id="return">Return</h2>
<p>Any of.</p>
<ul>
<li>:class:<code>~torch.Tensor</code> - The loss tensor</li>
<li><code>dict</code> - A dictionary. Can include any keys, but must include the key <code>'loss'</code></li>
<li><code>None</code> - Training will skip to the next batch. This is only for automatic optimization.
This is not supported for multi-GPU, TPU, IPU, or DeepSpeed.</li>
</ul>
<p>In this step you'd normally do the forward pass and calculate the loss for a batch.
You can also do fancier things like multiple forward passes or something model specific.</p>
<p>Example::</p>
<pre><code>def training_step(self, batch, batch_idx):
    x, y, z = batch
    out = self.encoder(x)
    loss = self.loss(out, x)
    return loss
</code></pre>
<p>If you define multiple optimizers, this step will be called with an additional
<code>optimizer_idx</code> parameter.</p>
<p>.. code-block:: python</p>
<pre><code># Multiple optimizers (e.g.: GANs)
def training_step(self, batch, batch_idx, optimizer_idx):
    if optimizer_idx == 0:
        # do training_step with encoder
        ...
    if optimizer_idx == 1:
        # do training_step with decoder
        ...
</code></pre>
<p>If you add truncated back propagation through time you will also get an additional
argument with the hidden states of the previous step.</p>
<p>.. code-block:: python</p>
<pre><code># Truncated back-propagation through time
def training_step(self, batch, batch_idx, hiddens):
    # hiddens are the hidden states from the previous truncated backprop step
    out, hiddens = self.lstm(data, hiddens)
    loss = ...
    return {"loss": loss, "hiddens": hiddens}
</code></pre>
<h2 id="note">Note</h2>
<p>The loss value shown in the progress bar is smoothed (averaged) over the last values,
so it differs from the actual loss returned in train/validation step.</p></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">def training_step(self, batch, batch_idx):
    return self._total_loss(&#34;train&#34;, batch, use_image_aug=True)</code></pre>
</details>
</dd>
<dt id="silk.models.superpoint.SuperPoint.validation_step"><code class="name flex">
<span>def <span class="ident">validation_step</span></span>(<span>self, batch, batch_idx)</span>
</code></dt>
<dd>
<div class="desc"><p>Operates on a single batch of data from the validation set.
In this step you'd might generate examples or calculate anything of interest like accuracy.</p>
<p>.. code-block:: python</p>
<pre><code># the pseudocode for these calls
val_outs = []
for val_batch in val_data:
    out = validation_step(val_batch)
    val_outs.append(out)
validation_epoch_end(val_outs)
</code></pre>
<h2 id="args">Args</h2>
<dl>
<dt>batch (:class:<code>~torch.Tensor</code> | (:class:<code>~torch.Tensor</code>, &hellip;) | [:class:<code>~torch.Tensor</code>, &hellip;]):</dt>
<dt>The output of your :class:<code>~torch.utils.data.DataLoader</code>. A tensor, tuple or list.</dt>
<dt><strong><code>batch_idx</code></strong> :&ensp;<code>int</code></dt>
<dd>The index of this batch</dd>
<dt><strong><code>dataloader_idx</code></strong> :&ensp;<code>int</code></dt>
<dd>The index of the dataloader that produced this batch
(only if multiple val dataloaders used)</dd>
</dl>
<h2 id="return">Return</h2>
<ul>
<li>Any object or value</li>
<li><code>None</code> - Validation will skip to the next batch</li>
</ul>
<p>.. code-block:: python</p>
<pre><code># pseudocode of order
val_outs = []
for val_batch in val_data:
    out = validation_step(val_batch)
    if defined("validation_step_end"):
        out = validation_step_end(out)
    val_outs.append(out)
val_outs = validation_epoch_end(val_outs)
</code></pre>
<p>.. code-block:: python</p>
<pre><code># if you have one val dataloader:
def validation_step(self, batch, batch_idx):
    ...


# if you have multiple val dataloaders:
def validation_step(self, batch, batch_idx, dataloader_idx):
    ...
</code></pre>
<p>Examples::</p>
<pre><code># CASE 1: A single validation dataset
def validation_step(self, batch, batch_idx):
    x, y = batch

    # implement your own
    out = self(x)
    loss = self.loss(out, y)

    # log 6 example images
    # or generated text... or whatever
    sample_imgs = x[:6]
    grid = torchvision.utils.make_grid(sample_imgs)
    self.logger.experiment.add_image('example_images', grid, 0)

    # calculate acc
    labels_hat = torch.argmax(out, dim=1)
    val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)

    # log the outputs!
    self.log_dict({'val_loss': loss, 'val_acc': val_acc})
</code></pre>
<p>If you pass in multiple val dataloaders, :meth:<code>validation_step</code> will have an additional argument.</p>
<p>.. code-block:: python</p>
<pre><code># CASE 2: multiple validation dataloaders
def validation_step(self, batch, batch_idx, dataloader_idx):
    # dataloader_idx tells you which dataset this is.
    ...
</code></pre>
<h2 id="note">Note</h2>
<p>If you don't need to validate you don't need to implement this method.</p>
<h2 id="note_1">Note</h2>
<p>When the :meth:<code>validation_step</code> is called, the model has been put in eval mode
and PyTorch gradients have been disabled. At the end of validation,
the model goes back to training mode and gradients are enabled.</p></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">def validation_step(self, batch, batch_idx):
    return self._total_loss(&#34;val&#34;, batch, use_image_aug=False)</code></pre>
</details>
</dd>
</dl>
<h3>Inherited members</h3>
<ul class="hlist">
<li><code><b><a title="silk.models.magicpoint.HomographyAdaptation" href="magicpoint.html#silk.models.magicpoint.HomographyAdaptation">HomographyAdaptation</a></b></code>:
<ul class="hlist">
<li><code><a title="silk.models.magicpoint.HomographyAdaptation.homographic_adaptation_prediction" href="magicpoint.html#silk.models.magicpoint.HomographyAdaptation.homographic_adaptation_prediction">homographic_adaptation_prediction</a></code></li>
</ul>
</li>
</ul>
</dd>
</dl>
</section>
</article>
<nav id="sidebar">
<h1>Index</h1>
<div class="toc">
<ul></ul>
</div>
<ul id="index">
<li><h3>Super-module</h3>
<ul>
<li><code><a title="silk.models" href="index.html">silk.models</a></code></li>
</ul>
</li>
<li><h3><a href="#header-classes">Classes</a></h3>
<ul>
<li>
<h4><code><a title="silk.models.superpoint.SuperPoint" href="#silk.models.superpoint.SuperPoint">SuperPoint</a></code></h4>
<ul class="">
<li><code><a title="silk.models.superpoint.SuperPoint.DEFAULT_DESCRIPTOR_LOSS" href="#silk.models.superpoint.SuperPoint.DEFAULT_DESCRIPTOR_LOSS">DEFAULT_DESCRIPTOR_LOSS</a></code></li>
<li><code><a title="silk.models.superpoint.SuperPoint.DEFAULT_KEYPOINT_LOSS" href="#silk.models.superpoint.SuperPoint.DEFAULT_KEYPOINT_LOSS">DEFAULT_KEYPOINT_LOSS</a></code></li>
<li><code><a title="silk.models.superpoint.SuperPoint.dump_patches" href="#silk.models.superpoint.SuperPoint.dump_patches">dump_patches</a></code></li>
<li><code><a title="silk.models.superpoint.SuperPoint.forward" href="#silk.models.superpoint.SuperPoint.forward">forward</a></code></li>
<li><code><a title="silk.models.superpoint.SuperPoint.model" href="#silk.models.superpoint.SuperPoint.model">model</a></code></li>
<li><code><a title="silk.models.superpoint.SuperPoint.model_forward_flow" href="#silk.models.superpoint.SuperPoint.model_forward_flow">model_forward_flow</a></code></li>
<li><code><a title="silk.models.superpoint.SuperPoint.predict_step" href="#silk.models.superpoint.SuperPoint.predict_step">predict_step</a></code></li>
<li><code><a title="silk.models.superpoint.SuperPoint.training" href="#silk.models.superpoint.SuperPoint.training">training</a></code></li>
<li><code><a title="silk.models.superpoint.SuperPoint.training_step" href="#silk.models.superpoint.SuperPoint.training_step">training_step</a></code></li>
<li><code><a title="silk.models.superpoint.SuperPoint.validation_step" href="#silk.models.superpoint.SuperPoint.validation_step">validation_step</a></code></li>
</ul>
</li>
</ul>
</li>
</ul>
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.10.0</a>.</p>
</footer>
</body>
</html>