<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.10.0" />
<title>silk.cv.homography API documentation</title>
<meta name="description" content="" />
<link rel="preload stylesheet" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/11.0.1/sanitize.min.css" integrity="sha256-PK9q560IAAa6WVRRh76LtCaI8pjTJ2z11v0miyNNjrs=" crossorigin>
<link rel="preload stylesheet" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/11.0.1/typography.min.css" integrity="sha256-7l/o7C8jubJiy74VsKTidCy1yBkRtiUGbVkYBylBqUg=" crossorigin>
<link rel="stylesheet preload" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/styles/github.min.css" crossorigin>
<style>:root{--highlight-color:#fe9}.flex{display:flex !important}body{line-height:1.5em}#content{padding:20px}#sidebar{padding:30px;overflow:hidden}#sidebar > *:last-child{margin-bottom:2cm}.http-server-breadcrumbs{font-size:130%;margin:0 0 15px 0}#footer{font-size:.75em;padding:5px 30px;border-top:1px solid #ddd;text-align:right}#footer p{margin:0 0 0 1em;display:inline-block}#footer p:last-child{margin-right:30px}h1,h2,h3,h4,h5{font-weight:300}h1{font-size:2.5em;line-height:1.1em}h2{font-size:1.75em;margin:1em 0 .50em 0}h3{font-size:1.4em;margin:25px 0 10px 0}h4{margin:0;font-size:105%}h1:target,h2:target,h3:target,h4:target,h5:target,h6:target{background:var(--highlight-color);padding:.2em 0}a{color:#058;text-decoration:none;transition:color .3s ease-in-out}a:hover{color:#e82}.title code{font-weight:bold}h2[id^="header-"]{margin-top:2em}.ident{color:#900}pre code{background:#f8f8f8;font-size:.8em;line-height:1.4em}code{background:#f2f2f1;padding:1px 4px;overflow-wrap:break-word}h1 code{background:transparent}pre{background:#f8f8f8;border:0;border-top:1px solid #ccc;border-bottom:1px solid #ccc;margin:1em 0;padding:1ex}#http-server-module-list{display:flex;flex-flow:column}#http-server-module-list div{display:flex}#http-server-module-list dt{min-width:10%}#http-server-module-list p{margin-top:0}.toc ul,#index{list-style-type:none;margin:0;padding:0}#index code{background:transparent}#index h3{border-bottom:1px solid #ddd}#index ul{padding:0}#index h4{margin-top:.6em;font-weight:bold}@media (min-width:200ex){#index .two-column{column-count:2}}@media (min-width:300ex){#index .two-column{column-count:3}}dl{margin-bottom:2em}dl dl:last-child{margin-bottom:4em}dd{margin:0 0 1em 3em}#header-classes + dl > dd{margin-bottom:3em}dd dd{margin-left:2em}dd p{margin:10px 0}.name{background:#eee;font-weight:bold;font-size:.85em;padding:5px 10px;display:inline-block;min-width:40%}.name:hover{background:#e0e0e0}dt:target .name{background:var(--highlight-color)}.name > span:first-child{white-space:nowrap}.name.class > span:nth-child(2){margin-left:.4em}.inherited{color:#999;border-left:5px solid #eee;padding-left:1em}.inheritance em{font-style:normal;font-weight:bold}.desc h2{font-weight:400;font-size:1.25em}.desc h3{font-size:1em}.desc dt code{background:inherit}.source summary,.git-link-div{color:#666;text-align:right;font-weight:400;font-size:.8em;text-transform:uppercase}.source summary > *{white-space:nowrap;cursor:pointer}.git-link{color:inherit;margin-left:1em}.source pre{max-height:500px;overflow:auto;margin:0}.source pre code{font-size:12px;overflow:visible}.hlist{list-style:none}.hlist li{display:inline}.hlist li:after{content:',\2002'}.hlist li:last-child:after{content:none}.hlist .hlist{display:inline;padding-left:1em}img{max-width:100%}td{padding:0 .5em}.admonition{padding:.1em .5em;margin-bottom:1em}.admonition-title{font-weight:bold}.admonition.note,.admonition.info,.admonition.important{background:#aef}.admonition.todo,.admonition.versionadded,.admonition.tip,.admonition.hint{background:#dfd}.admonition.warning,.admonition.versionchanged,.admonition.deprecated{background:#fd4}.admonition.error,.admonition.danger,.admonition.caution{background:lightpink}</style>
<style media="screen and (min-width: 700px)">@media screen and (min-width:700px){#sidebar{width:30%;height:100vh;overflow:auto;position:sticky;top:0}#content{width:70%;max-width:100ch;padding:3em 4em;border-left:1px solid #ddd}pre code{font-size:1em}.item .name{font-size:1em}main{display:flex;flex-direction:row-reverse;justify-content:flex-end}.toc ul ul,#index ul{padding-left:1.5em}.toc > ul > li{margin-top:.5em}}</style>
<style media="print">@media print{#sidebar h1{page-break-before:always}.source{display:none}}@media print{*{background:transparent !important;color:#000 !important;box-shadow:none !important;text-shadow:none !important}a[href]:after{content:" (" attr(href) ")";font-size:90%}a[href][title]:after{content:none}abbr[title]:after{content:" (" attr(title) ")"}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100% !important}@page{margin:0.5cm}p,h2,h3{orphans:3;widows:3}h1,h2,h3,h4,h5,h6{page-break-after:avoid}}</style>
<script defer src="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/highlight.min.js" integrity="sha256-Uv3H6lx7dJmRfRvH8TH6kJD1TSK1aFcwgx+mdg3epi8=" crossorigin></script>
<script>window.addEventListener('DOMContentLoaded', () => hljs.initHighlighting())</script>
</head>
<body>
<main>
<article id="content">
<header>
<h1 class="title">Module <code>silk.cv.homography</code></h1>
</header>
<section id="section-intro">
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python"># Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.

# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.

import itertools
from typing import List, Tuple, Union, Optional

import torch
from torch.nn.functional import grid_sample
from torch.nn.utils.rnn import pad_sequence


def resize_homography(
    homography: torch.Tensor,
    original_image_shape: Tuple[int, int],
    new_original_image_shape,
    warped_image_shape=None,
    new_warped_image_shape=None,
) -&gt; torch.Tensor:
    &#34;&#34;&#34;Change homography matrix when image sizes change.

    Parameters
    ----------
    homography : torch.Tensor
        Homography matrix as a 3x3 Tensor.
    original_image_shape : Tuple[int, int]
        Size of the original image the current homography applies to.
    new_original_image_shape : Tuple[int, int]
        Size of the new original image the resized homography should apply to.
    warped_image_shape : Tuple[int, int], optional
        Size of the warped image the current homography applies to, by default None. Set to `original_image_shape` when None.
    new_warped_image_shape : Tuple[int, int], optional
        Size of the new warped image the resized homography should apply to, by default None. Set to `new_original_image_shape` when None.

    Returns
    -------
    torch.Tensor
        New homography operating on provided image sizes.
    &#34;&#34;&#34;
    warped_image_shape = (
        original_image_shape if warped_image_shape is None else warped_image_shape
    )
    new_warped_image_shape = (
        new_original_image_shape
        if new_warped_image_shape is None
        else new_warped_image_shape
    )

    # compute resizing factors
    oh_factor = original_image_shape[0] / new_original_image_shape[0]
    ow_factor = original_image_shape[1] / new_original_image_shape[1]

    wh_factor = new_warped_image_shape[0] / warped_image_shape[0]
    ww_factor = new_warped_image_shape[1] / warped_image_shape[1]

    # build resizing diagonal matrices
    up_scale = torch.diag(
        torch.tensor(
            [ow_factor, oh_factor, 1.0],
            dtype=homography.dtype,
            device=homography.device,
        )
    )
    down_scale = torch.diag(
        torch.tensor(
            [ww_factor, wh_factor, 1.0],
            dtype=homography.dtype,
            device=homography.device,
        )
    )

    # apply resizing to homography
    homography = down_scale @ homography @ up_scale

    return homography


class HomographicSampler:
    &#34;&#34;&#34;Samples multiple homographic crops from multiples batched images.

    This sampler makes it very easy to sample homographic crops from multiple images by manipulating a virtual crop initially centered on the entire image.
    Applying successive simple transformations (xyz-rotation, shift, scale) will modify the position and shape of that virtual crop.
    Transformations operates on normalized coordinates independent of an image shape.
    The initial virtual crop has its top-left position at (-1, -1), and bottom-right position at (+1, +1).
    Thus the center being at position (0, 0).

    Examples
    --------

    ```python
    hc = HomographicSampler(2, &#34;cpu&#34;) # homographic sampler with 2 virtual crops

    hc.scale(0.5) # reduce all virtual crops size by half
    hc.shift(((-0.25, -0.25), (+0.25, +0.25))) # shift first virtual crop to top-left part, second virtual crop to bottom-right part
    hc.rotate(3.14/4., axis=&#34;x&#34;, clockwise=True, local_center=True) # rotate both virtual crops locally by 45 degrees clockwise (around x-axis)

    crops = hc.extract_crop(image, (100, 100)) # extract two homographic crops defined earlier as (100, 100) images
    ```

    &#34;&#34;&#34;

    _DEST_COORD = torch.tensor(
        [
            [-1.0, -1.0],  # top-left
            [+1.0, -1.0],  # top-right
            [-1.0, +1.0],  # bottom-left
            [+1.0, +1.0],  # bottom-right
        ],
        dtype=torch.double,
    )

    _VALID_AXIS = {&#34;x&#34;, &#34;y&#34;, &#34;z&#34;}
    _VALID_DIRECTIONS = {&#34;forward&#34;, &#34;backward&#34;}
    _VALID_ORDERING = {&#34;xy&#34;, &#34;yx&#34;}

    def __init__(self, batch_size: int, device: str):
        &#34;&#34;&#34;

        Parameters
        ----------
        batch_size : int
            Number of virtual crops to handle.
        device : str
            Device on which operations will be done.
        &#34;&#34;&#34;
        self.reset(batch_size, device)

    @staticmethod
    def _convert_points_from_homogeneous(
        points: torch.Tensor, eps: float = 1e-8
    ) -&gt; torch.Tensor:
        &#34;&#34;&#34;Function that converts points from homogeneous to Euclidean space.&#34;&#34;&#34;

        # we check for points at max_val
        z_vec: torch.Tensor = points[..., -1:]

        # set the results of division by zeror/near-zero to 1.0
        # follow the convention of opencv:
        # https://github.com/opencv/opencv/pull/14411/files
        mask: torch.Tensor = torch.abs(z_vec) &gt; eps
        scale = torch.where(mask, 1.0 / (z_vec + eps), torch.ones_like(z_vec))

        return scale * points[..., :-1]

    @staticmethod
    def _convert_points_to_homogeneous(points: torch.Tensor) -&gt; torch.Tensor:
        &#34;&#34;&#34;Function that converts points from Euclidean to homogeneous space.&#34;&#34;&#34;

        return torch.nn.functional.pad(points, [0, 1], &#34;constant&#34;, 1.0)

    @staticmethod
    def _transform_points(
        trans_01: torch.Tensor, points_1: torch.Tensor
    ) -&gt; torch.Tensor:
        &#34;&#34;&#34;Function that applies a linear transformations to a set of points.&#34;&#34;&#34;

        points_1 = points_1.to(trans_01.device)
        points_1 = points_1.to(trans_01.dtype)

        # We reshape to BxNxD in case we get more dimensions, e.g., MxBxNxD
        shape_inp = points_1.shape
        points_1 = points_1.reshape(-1, points_1.shape[-2], points_1.shape[-1])
        trans_01 = trans_01.reshape(-1, trans_01.shape[-2], trans_01.shape[-1])
        # We expand trans_01 to match the dimensions needed for bmm
        trans_01 = torch.repeat_interleave(
            trans_01, repeats=points_1.shape[0] // trans_01.shape[0], dim=0
        )
        # to homogeneous
        points_1_h = HomographicSampler._convert_points_to_homogeneous(
            points_1
        )  # BxNxD+1
        # transform coordinates
        points_0_h = torch.bmm(points_1_h, trans_01.permute(0, 2, 1))
        points_0_h = torch.squeeze(points_0_h, dim=-1)
        # to euclidean
        points_0 = HomographicSampler._convert_points_from_homogeneous(
            points_0_h
        )  # BxNxD
        # reshape to the input shape
        points_0 = points_0.reshape(shape_inp)
        return points_0

    @staticmethod
    def _create_meshgrid(
        height: int,
        width: int,
        device: Optional[torch.device] = None,
        dtype: Optional[torch.dtype] = None,
        normalized: bool = True,
    ) -&gt; torch.Tensor:
        &#34;&#34;&#34;Generate a coordinate grid for an image.&#34;&#34;&#34;
        if normalized:
            min_x = -1.0
            max_x = +1.0
            min_y = -1.0
            max_y = +1.0
        else:
            min_x = 0.5
            max_x = width - 0.5
            min_y = 0.5
            max_y = height - 0.5

        xs: torch.Tensor = torch.linspace(
            min_x,
            max_x,
            width,
            device=device,
            dtype=dtype,
        )
        ys: torch.Tensor = torch.linspace(
            min_y,
            max_y,
            height,
            device=device,
            dtype=dtype,
        )

        # generate grid by stacking coordinates
        base_grid: torch.Tensor = torch.stack(
            torch.meshgrid([xs, ys], indexing=&#34;ij&#34;), dim=-1
        )  # WxHx2
        return base_grid.permute(1, 0, 2).unsqueeze(0)  # 1xHxWx2

    @staticmethod
    def _build_perspective_param(
        p: torch.Tensor, q: torch.Tensor, axis: str
    ) -&gt; torch.Tensor:
        ones = torch.ones_like(p)[..., 0:1]
        zeros = torch.zeros_like(p)[..., 0:1]
        if axis == &#34;x&#34;:
            return torch.cat(
                [
                    p[:, 0:1],
                    p[:, 1:2],
                    ones,
                    zeros,
                    zeros,
                    zeros,
                    -p[:, 0:1] * q[:, 0:1],
                    -p[:, 1:2] * q[:, 0:1],
                ],
                dim=1,
            )

        if axis == &#34;y&#34;:
            return torch.cat(
                [
                    zeros,
                    zeros,
                    zeros,
                    p[:, 0:1],
                    p[:, 1:2],
                    ones,
                    -p[:, 0:1] * q[:, 1:2],
                    -p[:, 1:2] * q[:, 1:2],
                ],
                dim=1,
            )

        raise NotImplementedError(
            f&#34;perspective params for axis `{axis}` is not implemented.&#34;
        )

    @staticmethod
    def _get_perspective_transform(src, dst):
        r&#34;&#34;&#34;Calculate a perspective transform from four pairs of the corresponding
        points.

        The function calculates the matrix of a perspective transform so that:

        .. math ::

            \begin{bmatrix}
            t_{i}x_{i}^{&#39;} \\
            t_{i}y_{i}^{&#39;} \\
            t_{i} \\
            \end{bmatrix}
            =
            \textbf{map_matrix} \cdot
            \begin{bmatrix}
            x_{i} \\
            y_{i} \\
            1 \\
            \end{bmatrix}

        where

        .. math ::
            dst(i) = (x_{i}^{&#39;},y_{i}^{&#39;}), src(i) = (x_{i}, y_{i}), i = 0,1,2,3

        Args:
            src: coordinates of quadrangle vertices in the source image with shape :math:`(B, 4, 2)`.
            dst: coordinates of the corresponding quadrangle vertices in
                the destination image with shape :math:`(B, 4, 2)`.

        Returns:
            the perspective transformation with shape :math:`(B, 3, 3)`.
        &#34;&#34;&#34;

        # we build matrix A by using only 4 point correspondence. The linear
        # system is solved with the least square method, so here
        # we could even pass more correspondence
        p = []
        for i in [0, 1, 2, 3]:
            p.append(
                HomographicSampler._build_perspective_param(src[:, i], dst[:, i], &#34;x&#34;)
            )
            p.append(
                HomographicSampler._build_perspective_param(src[:, i], dst[:, i], &#34;y&#34;)
            )

        # A is Bx8x8
        A = torch.stack(p, dim=1)

        # b is a Bx8x1
        b = torch.stack(
            [
                dst[:, 0:1, 0],
                dst[:, 0:1, 1],
                dst[:, 1:2, 0],
                dst[:, 1:2, 1],
                dst[:, 2:3, 0],
                dst[:, 2:3, 1],
                dst[:, 3:4, 0],
                dst[:, 3:4, 1],
            ],
            dim=1,
        )

        # solve the system Ax = b
        X = torch.linalg.solve(A, b)

        # create variable to return
        batch_size = src.shape[0]
        M = torch.ones(batch_size, 9, device=src.device, dtype=src.dtype)
        M[..., :8] = torch.squeeze(X, dim=-1)

        return M.view(-1, 3, 3)  # Bx3x3

    def reset(self, batch_size: Optional[int] = None, device: Optional[str] = None):
        &#34;&#34;&#34;Resets all the crops to their initial position and sizes.

        Parameters
        ----------
        batch_size : int, optional
            Number of virtual crops to handle, by default None.
        device : str, optional
            Device on which operations will be done, by default None.
        &#34;&#34;&#34;
        batch_size = self.batch_size if batch_size is None else batch_size
        device = self.device if device is None else device

        self._dest_coords = HomographicSampler._DEST_COORD.to(device)
        self._dest_coords = self._dest_coords.unsqueeze(0)
        self._dest_coords = self._dest_coords.expand(batch_size, -1, -1)

        self._homog_src_coords = HomographicSampler._convert_points_to_homogeneous(
            self._dest_coords
        )

        self._clear_cache()

    def _clear_cache(self):
        &#34;&#34;&#34;Intermediate data are cached such that the same homographic sampler can efficiently be called several times using the same homographic transforms.&#34;&#34;&#34;
        self._src_coords = None
        self._forward_matrices = None
        self._backward_matrices = None

    def _to(self, device, name):
        attr = getattr(self, name)
        if attr is not None:
            setattr(self, name, attr.to(device))

    def to(self, device: str):
        &#34;&#34;&#34;Moves all operations to new device.

        Parameters
        ----------
        device : str
            Pytorch device.
        &#34;&#34;&#34;
        if device != self.device:
            self._to(device, &#34;_dest_coords&#34;)
            self._to(device, &#34;_src_coords&#34;)
            self._to(device, &#34;_homog_src_coords&#34;)
            self._to(device, &#34;_forward_matrices&#34;)
            self._to(device, &#34;_backward_matrices&#34;)
        return self

    @property
    def batch_size(self):
        return self._homog_src_coords.shape[0]

    @property
    def device(self):
        return self._homog_src_coords.device

    @property
    def dtype(self):
        return self._homog_src_coords.dtype

    @property
    def src_coords(self) -&gt; torch.Tensor:
        &#34;&#34;&#34;Coordinates of the homographic crop corners in the virtual image coordinate reference system.
        Those four points are ordered as : (top-left, top-right, bottom-left, bottom-right)

        Returns
        -------
        torch.Tensor
            :math:`(B, 4, 2)` tensor containing the homographic crop foud corners coordinates.
        &#34;&#34;&#34;
        if self._src_coords is None:
            self._src_coords = HomographicSampler._convert_points_from_homogeneous(
                self._homog_src_coords
            )
        return self._src_coords

    @property
    def dest_coords(self) -&gt; torch.Tensor:
        return self._dest_coords

    def _auto_expand(self, input, outer_dim_size=None, **kwargs):
        &#34;&#34;&#34;Auto-expand scalar or iterables to be batched.&#34;&#34;&#34;
        if not isinstance(input, torch.Tensor):
            input = torch.tensor(input, **kwargs)

        # scalar
        if len(input.shape) == 0:
            input = input.unsqueeze(0)
            if outer_dim_size is None:
                outer_dim_size = 1
            else:
                input = input.expand(outer_dim_size)

        # vector
        if len(input.shape) == 1:
            if outer_dim_size is None:
                outer_dim_size = input.shape[0]
            elif outer_dim_size != input.shape[0]:
                raise RuntimeError(
                    f&#34;provided outer dim size {outer_dim_size} doesn&#39;t match input shape {input.shape}&#34;
                )

            input = input.unsqueeze(0)
            input = input.expand(self.batch_size, -1)

        if len(input.shape) != 2:
            raise RuntimeError(f&#34;input should have size BxD (shape is {input.shape}&#34;)

        input = input.type(self.dtype)
        input = input.to(self.device)

        return input

    def rotate(
        self,
        angles: Union[float, torch.Tensor],
        clockwise: bool = False,
        axis: str = &#34;z&#34;,
        local_center: bool = False,
    ):
        &#34;&#34;&#34;Rotate virtual crops.

        Parameters
        ----------
        angles : Union[float, torch.Tensor]
            Angles of rotation. If scalar, applied to all crops. If :math:`(B, 1)` tensor, applied to each crop independently.
        clockwise : bool, optional
            Rotational direction, by default False
        axis : str, optional
            Axis of rotation, by default &#34;z&#34;. Valid values are &#34;x&#34;, &#34;y&#34; and &#34;z&#34;. &#34;z&#34; is in-plane rotation. &#34;x&#34; and &#34;y&#34; are out-of-plane rotations.
        local_center : bool, optional
            Rotate on the center of the crop, by default False. If False, use global center of rotation (i.e. initial crop center). This option is only relevant after a shift has been used.

        Raises
        ------
        RuntimeError
            Raised if provided axis is invalid.
        &#34;&#34;&#34;
        if axis not in HomographicSampler._VALID_AXIS:
            raise RuntimeError(
                f&#39;provided axis &#34;{axis}&#34; isn\&#39;t valid, should be one of {HomographicSampler._VALID_AXIS}&#39;
            )

        angles = self._auto_expand(angles, outer_dim_size=1)

        if clockwise:
            angles = -angles

        cos_a = torch.cos(angles)
        sin_a = torch.sin(angles)

        _1 = torch.ones_like(cos_a)
        _0 = torch.zeros_like(cos_a)

        if axis == &#34;z&#34;:
            flatmat = [+cos_a, -sin_a, _0, +sin_a, +cos_a, _0, _0, _0, _1]
        elif axis == &#34;y&#34;:
            flatmat = [+cos_a, _0, -sin_a, _0, _1, _0, +sin_a, _0, +cos_a]
        elif axis == &#34;x&#34;:
            flatmat = [_1, _0, _0, _0, +cos_a, +sin_a, _0, -sin_a, +cos_a]

        rot_matrix = torch.cat(flatmat, dim=-1)
        rot_matrix = rot_matrix.view(self.batch_size, 3, 3)

        self._clear_cache()

        if local_center:
            center = torch.mean(self._homog_src_coords, dim=1, keepdim=True)

            self._homog_src_coords -= center
            self._homog_src_coords = self._homog_src_coords @ rot_matrix
            self._homog_src_coords += center
        else:
            if axis != &#34;z&#34;:
                self._homog_src_coords[..., -1] -= 1.0
            self._homog_src_coords = self._homog_src_coords @ rot_matrix
            if axis != &#34;z&#34;:
                self._homog_src_coords[..., -1] += 1.0

    def shift(self, delta: Union[float, Tuple[float, float], torch.Tensor]):
        &#34;&#34;&#34;Shift virtual crops.

        Parameters
        ----------
        delta : Union[float, Tuple[float, float], torch.Tensor]
            Shift values. Scalar or Tuple will be applied to all crops. :math:`(B, 2)` tensors will be applied to each crop independently.
        &#34;&#34;&#34;

        delta = self._auto_expand(delta, outer_dim_size=2)
        delta = delta.unsqueeze(1)
        delta = delta * self._homog_src_coords[..., -1].unsqueeze(-1)

        self._clear_cache()
        self._homog_src_coords[..., :2] += delta

    def scale(
        self,
        factors: Union[float, Tuple[float, float], torch.Tensor],
        local_center: bool = False,
    ):
        &#34;&#34;&#34;Scale the virtual crops.

        Parameters
        ----------
        factors : Union[float, Tuple[float, float], torch.Tensor]
            Scaling factors. Scalar or Tuple will be applied to all crops. :math:`(B, 2)` tensors will be applied to each crop independently.
        local_center : bool, optional
            Scale on the center of the crop, by default False. If False, use global center of rotation (i.e. initial crop center). This option is only relevant after a shift has been used.
        &#34;&#34;&#34;
        factors = self._auto_expand(factors, outer_dim_size=2)
        factors = factors.unsqueeze(1)

        self._clear_cache()

        if local_center:
            center = torch.mean(self._homog_src_coords, dim=1, keepdim=True)

            self._homog_src_coords -= center
            self._homog_src_coords[..., :2] *= factors
            self._homog_src_coords += center
        else:
            self._homog_src_coords[..., :2] *= factors

    @property
    def forward_matrices(self):
        if self._forward_matrices is None:
            self._forward_matrices = HomographicSampler._get_perspective_transform(
                self.dest_coords,
                self.src_coords,
            )
        return self._forward_matrices

    @property
    def backward_matrices(self):
        if self._backward_matrices is None:
            self._backward_matrices = HomographicSampler._get_perspective_transform(
                self.src_coords,
                self.dest_coords,
            )
        return self._backward_matrices

    def extract_crop(
        self,
        images: torch.Tensor,
        sampling_size: Tuple[int, int],
        mode=&#34;bilinear&#34;,
        padding_mode=&#34;zeros&#34;,
        direction=&#34;forward&#34;,
    ) -&gt; torch.Tensor:
        &#34;&#34;&#34;Extract all crops from a set of images.

        It can handle one-image-to-many-crops and many-images-to-many-crops.
        If the number of images is smaller than the number of crops, a number of n crops will be asssigned to each image such that :math:`n_crops = n * n_images`.

        Parameters
        ----------
        images : torch.Tensor
            Tensor containing all images (valid shapes are :math:`(B,C,H,W)` and :math:`(C,H,W)`).
        sampling_size : Tuple[int, int]
            Spatial shape of the output crops.
        mode : str, optional
            Sampling mode passed to `grid_sample`, by default &#34;bilinear&#34;.
        padding_mode : str, optional
            Padding mode passed to `grid_sample`, by default &#34;zeros&#34;.
        direction : str, optional
            Direction of the crop sampling (`src -&gt; dest` or `dest -&gt; src`), by default &#34;forward&#34;. Valid are &#34;forward&#34; and &#34;backward&#34;.

        Returns
        -------
        torch.Tensor
            Sampled crops using transformed virtual crops.

        Raises
        ------
        RuntimeError
            Raised is `images` shape is invalid.
        RuntimeError
            Raised is `images` batch size isn&#39;t a multiple of the number of virtual crops.
        RuntimeError
            Raised is `direction` is invalid.
        &#34;&#34;&#34;
        if images.dim() == 3:
            images = images.unsqueeze(0)
        elif images.dim() != 4:
            raise RuntimeError(&#34;provided image(s) should be of shape BxCxHxW or CxHxW&#34;)

        if self.batch_size % images.shape[0] != 0:
            raise RuntimeError(
                f&#34;the sampler batch size ({self.batch_size}) should be a multiple of the image batch size (found {images.shape[0]})&#34;
            )

        if direction not in HomographicSampler._VALID_DIRECTIONS:
            raise RuntimeError(
                f&#39;invalid direction &#34;{direction}&#34; found, should be one of {self._VALID_DIRECTIONS}&#39;
            )

        # reshape images to handle multiple crops
        crop_per_image = self.batch_size // images.shape[0]
        images = images.unsqueeze(1)
        images = images.expand(-1, crop_per_image, -1, -1, -1)
        images = images.reshape(self.batch_size, *images.shape[2:])

        # select homography matrix
        if direction == &#34;forward&#34;:
            matrix = self.forward_matrices
        else:
            matrix = self.backward_matrices

        # create grid of coordinates used for image sampling
        grid = HomographicSampler._create_meshgrid(
            sampling_size[0],
            sampling_size[1],
            device=matrix.device,
            dtype=matrix.dtype,
        )
        grid = grid.expand(self.batch_size, -1, -1, -1)
        grid = HomographicSampler._transform_points(matrix[:, None, None], grid)
        grid = grid.type_as(images)

        # sample pixels using transformed grid coordinates
        return grid_sample(
            images,
            grid,
            mode=mode,
            padding_mode=padding_mode,
            align_corners=False,
        )

    def transform_points(
        self,
        points: Union[torch.Tensor, List[torch.Tensor]],
        image_shape: Optional[Tuple[int, int]] = None,
        direction: str = &#34;forward&#34;,
        ordering: str = &#34;xy&#34;,
    ) -&gt; Union[torch.Tensor, List[torch.Tensor]]:
        &#34;&#34;&#34;Apply homography to a set of points.

        Parameters
        ----------
        points : Union[torch.Tensor, List[torch.Tensor]]
            BxNx2 tensor or list of Nx2 tensors containing the coordinates to transform.
        image_shape : Optional[Tuple[int, int]], optional
            Shape of the tensor the coordinates references, as in (height, width), by default None.
            If not provided, the coordinates are assumed to be already normalized between [-1, +1].
        direction : str, optional
            Direction of the homography, by default &#34;forward&#34;.
        ordering : str, optional
            Specify the order in which the x,y coordinates are stored in &#34;points&#34;, by default &#34;xy&#34;.

        Returns
        -------
        Union[torch.Tensor, List[torch.Tensor]]
            Transformed coordinates.

        Raises
        ------
        RuntimeError
            If the provided direction is invalid.
        RuntimeError
            If the provided ordering is invalid.
        &#34;&#34;&#34;
        # check arguments
        if direction not in HomographicSampler._VALID_DIRECTIONS:
            raise RuntimeError(
                f&#39;invalid direction &#34;{direction}&#34; found, should be one of {self._VALID_DIRECTIONS}&#39;
            )
        if ordering not in HomographicSampler._VALID_ORDERING:
            raise RuntimeError(
                f&#39;invalid ordering &#34;{ordering}&#34; found, should be one of {self._VALID_ORDERING}&#39;
            )

        # select homography matrices
        if direction == &#34;forward&#34;:
            matrix = self.backward_matrices
        else:
            matrix = self.forward_matrices

        # pad input if using variable length
        lengths = None
        if not isinstance(points, torch.Tensor):
            lengths = [p.shape[0] for p in points]
            points = pad_sequence(points, batch_first=True)

        # convert to &#34;xy&#34; ordering
        if ordering == &#34;yx&#34;:
            points = points[..., [1, 0]]

        # bring coordinates to [-1, +1] range
        if image_shape is not None:
            image_shape = torch.tensor(
                [image_shape[1], image_shape[0]],
                dtype=points.dtype,
                device=points.device,
            )
            image_shape = image_shape[None, None, ...]
            image_shape_half = image_shape / 2.0
            pixel_shift = 0.5 / image_shape
            points = (points - image_shape_half) / image_shape_half + pixel_shift

        # reshape points to handle multiple transforms
        transform_per_points = self.batch_size // points.shape[0]
        points = points.unsqueeze(1)
        points = points.expand(-1, transform_per_points, -1, -1)
        points = points.reshape(self.batch_size, *points.shape[2:])

        # change lengths size accordingly
        if transform_per_points != 1:
            lengths = list(
                itertools.chain.from_iterable(
                    itertools.repeat(s, transform_per_points) for s in lengths
                )
            )

        # apply homography to point coordinates
        transformed_points = HomographicSampler._transform_points(
            matrix[:, None, None], points
        )

        # bring coordinates to original range
        if image_shape is not None:
            transformed_points = (
                (transformed_points - pixel_shift) * image_shape_half
            ) + image_shape_half

        # convert back to initial ordering
        if ordering == &#34;yx&#34;:
            transformed_points = transformed_points[..., [1, 0]]

        # remove padded results if input was variable length
        if lengths is not None:
            transformed_points = [
                transformed_points[i, :s] for i, s in enumerate(lengths)
            ]

        return transformed_points</code></pre>
</details>
</section>
<section>
</section>
<section>
</section>
<section>
<h2 class="section-title" id="header-functions">Functions</h2>
<dl>
<dt id="silk.cv.homography.resize_homography"><code class="name flex">
<span>def <span class="ident">resize_homography</span></span>(<span>homography: torch.Tensor, original_image_shape: Tuple[int, int], new_original_image_shape, warped_image_shape=None, new_warped_image_shape=None) ‑> torch.Tensor</span>
</code></dt>
<dd>
<div class="desc"><p>Change homography matrix when image sizes change.</p>
<h2 id="parameters">Parameters</h2>
<dl>
<dt><strong><code>homography</code></strong> :&ensp;<code>torch.Tensor</code></dt>
<dd>Homography matrix as a 3x3 Tensor.</dd>
<dt><strong><code>original_image_shape</code></strong> :&ensp;<code>Tuple[int, int]</code></dt>
<dd>Size of the original image the current homography applies to.</dd>
<dt><strong><code>new_original_image_shape</code></strong> :&ensp;<code>Tuple[int, int]</code></dt>
<dd>Size of the new original image the resized homography should apply to.</dd>
<dt><strong><code>warped_image_shape</code></strong> :&ensp;<code>Tuple[int, int]</code>, optional</dt>
<dd>Size of the warped image the current homography applies to, by default None. Set to <code>original_image_shape</code> when None.</dd>
<dt><strong><code>new_warped_image_shape</code></strong> :&ensp;<code>Tuple[int, int]</code>, optional</dt>
<dd>Size of the new warped image the resized homography should apply to, by default None. Set to <code>new_original_image_shape</code> when None.</dd>
</dl>
<h2 id="returns">Returns</h2>
<dl>
<dt><code>torch.Tensor</code></dt>
<dd>New homography operating on provided image sizes.</dd>
</dl></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">def resize_homography(
    homography: torch.Tensor,
    original_image_shape: Tuple[int, int],
    new_original_image_shape,
    warped_image_shape=None,
    new_warped_image_shape=None,
) -&gt; torch.Tensor:
    &#34;&#34;&#34;Change homography matrix when image sizes change.

    Parameters
    ----------
    homography : torch.Tensor
        Homography matrix as a 3x3 Tensor.
    original_image_shape : Tuple[int, int]
        Size of the original image the current homography applies to.
    new_original_image_shape : Tuple[int, int]
        Size of the new original image the resized homography should apply to.
    warped_image_shape : Tuple[int, int], optional
        Size of the warped image the current homography applies to, by default None. Set to `original_image_shape` when None.
    new_warped_image_shape : Tuple[int, int], optional
        Size of the new warped image the resized homography should apply to, by default None. Set to `new_original_image_shape` when None.

    Returns
    -------
    torch.Tensor
        New homography operating on provided image sizes.
    &#34;&#34;&#34;
    warped_image_shape = (
        original_image_shape if warped_image_shape is None else warped_image_shape
    )
    new_warped_image_shape = (
        new_original_image_shape
        if new_warped_image_shape is None
        else new_warped_image_shape
    )

    # compute resizing factors
    oh_factor = original_image_shape[0] / new_original_image_shape[0]
    ow_factor = original_image_shape[1] / new_original_image_shape[1]

    wh_factor = new_warped_image_shape[0] / warped_image_shape[0]
    ww_factor = new_warped_image_shape[1] / warped_image_shape[1]

    # build resizing diagonal matrices
    up_scale = torch.diag(
        torch.tensor(
            [ow_factor, oh_factor, 1.0],
            dtype=homography.dtype,
            device=homography.device,
        )
    )
    down_scale = torch.diag(
        torch.tensor(
            [ww_factor, wh_factor, 1.0],
            dtype=homography.dtype,
            device=homography.device,
        )
    )

    # apply resizing to homography
    homography = down_scale @ homography @ up_scale

    return homography</code></pre>
</details>
</dd>
</dl>
</section>
<section>
<h2 class="section-title" id="header-classes">Classes</h2>
<dl>
<dt id="silk.cv.homography.HomographicSampler"><code class="flex name class">
<span>class <span class="ident">HomographicSampler</span></span>
<span>(</span><span>batch_size: int, device: str)</span>
</code></dt>
<dd>
<div class="desc"><p>Samples multiple homographic crops from multiples batched images.</p>
<p>This sampler makes it very easy to sample homographic crops from multiple images by manipulating a virtual crop initially centered on the entire image.
Applying successive simple transformations (xyz-rotation, shift, scale) will modify the position and shape of that virtual crop.
Transformations operates on normalized coordinates independent of an image shape.
The initial virtual crop has its top-left position at (-1, -1), and bottom-right position at (+1, +1).
Thus the center being at position (0, 0).</p>
<h2 id="examples">Examples</h2>
<pre><code class="language-python">hc = HomographicSampler(2, &quot;cpu&quot;) # homographic sampler with 2 virtual crops

hc.scale(0.5) # reduce all virtual crops size by half
hc.shift(((-0.25, -0.25), (+0.25, +0.25))) # shift first virtual crop to top-left part, second virtual crop to bottom-right part
hc.rotate(3.14/4., axis=&quot;x&quot;, clockwise=True, local_center=True) # rotate both virtual crops locally by 45 degrees clockwise (around x-axis)

crops = hc.extract_crop(image, (100, 100)) # extract two homographic crops defined earlier as (100, 100) images
</code></pre>
<h2 id="parameters">Parameters</h2>
<dl>
<dt><strong><code>batch_size</code></strong> :&ensp;<code>int</code></dt>
<dd>Number of virtual crops to handle.</dd>
<dt><strong><code>device</code></strong> :&ensp;<code>str</code></dt>
<dd>Device on which operations will be done.</dd>
</dl></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">class HomographicSampler:
    &#34;&#34;&#34;Samples multiple homographic crops from multiples batched images.

    This sampler makes it very easy to sample homographic crops from multiple images by manipulating a virtual crop initially centered on the entire image.
    Applying successive simple transformations (xyz-rotation, shift, scale) will modify the position and shape of that virtual crop.
    Transformations operates on normalized coordinates independent of an image shape.
    The initial virtual crop has its top-left position at (-1, -1), and bottom-right position at (+1, +1).
    Thus the center being at position (0, 0).

    Examples
    --------

    ```python
    hc = HomographicSampler(2, &#34;cpu&#34;) # homographic sampler with 2 virtual crops

    hc.scale(0.5) # reduce all virtual crops size by half
    hc.shift(((-0.25, -0.25), (+0.25, +0.25))) # shift first virtual crop to top-left part, second virtual crop to bottom-right part
    hc.rotate(3.14/4., axis=&#34;x&#34;, clockwise=True, local_center=True) # rotate both virtual crops locally by 45 degrees clockwise (around x-axis)

    crops = hc.extract_crop(image, (100, 100)) # extract two homographic crops defined earlier as (100, 100) images
    ```

    &#34;&#34;&#34;

    _DEST_COORD = torch.tensor(
        [
            [-1.0, -1.0],  # top-left
            [+1.0, -1.0],  # top-right
            [-1.0, +1.0],  # bottom-left
            [+1.0, +1.0],  # bottom-right
        ],
        dtype=torch.double,
    )

    _VALID_AXIS = {&#34;x&#34;, &#34;y&#34;, &#34;z&#34;}
    _VALID_DIRECTIONS = {&#34;forward&#34;, &#34;backward&#34;}
    _VALID_ORDERING = {&#34;xy&#34;, &#34;yx&#34;}

    def __init__(self, batch_size: int, device: str):
        &#34;&#34;&#34;

        Parameters
        ----------
        batch_size : int
            Number of virtual crops to handle.
        device : str
            Device on which operations will be done.
        &#34;&#34;&#34;
        self.reset(batch_size, device)

    @staticmethod
    def _convert_points_from_homogeneous(
        points: torch.Tensor, eps: float = 1e-8
    ) -&gt; torch.Tensor:
        &#34;&#34;&#34;Function that converts points from homogeneous to Euclidean space.&#34;&#34;&#34;

        # we check for points at max_val
        z_vec: torch.Tensor = points[..., -1:]

        # set the results of division by zeror/near-zero to 1.0
        # follow the convention of opencv:
        # https://github.com/opencv/opencv/pull/14411/files
        mask: torch.Tensor = torch.abs(z_vec) &gt; eps
        scale = torch.where(mask, 1.0 / (z_vec + eps), torch.ones_like(z_vec))

        return scale * points[..., :-1]

    @staticmethod
    def _convert_points_to_homogeneous(points: torch.Tensor) -&gt; torch.Tensor:
        &#34;&#34;&#34;Function that converts points from Euclidean to homogeneous space.&#34;&#34;&#34;

        return torch.nn.functional.pad(points, [0, 1], &#34;constant&#34;, 1.0)

    @staticmethod
    def _transform_points(
        trans_01: torch.Tensor, points_1: torch.Tensor
    ) -&gt; torch.Tensor:
        &#34;&#34;&#34;Function that applies a linear transformations to a set of points.&#34;&#34;&#34;

        points_1 = points_1.to(trans_01.device)
        points_1 = points_1.to(trans_01.dtype)

        # We reshape to BxNxD in case we get more dimensions, e.g., MxBxNxD
        shape_inp = points_1.shape
        points_1 = points_1.reshape(-1, points_1.shape[-2], points_1.shape[-1])
        trans_01 = trans_01.reshape(-1, trans_01.shape[-2], trans_01.shape[-1])
        # We expand trans_01 to match the dimensions needed for bmm
        trans_01 = torch.repeat_interleave(
            trans_01, repeats=points_1.shape[0] // trans_01.shape[0], dim=0
        )
        # to homogeneous
        points_1_h = HomographicSampler._convert_points_to_homogeneous(
            points_1
        )  # BxNxD+1
        # transform coordinates
        points_0_h = torch.bmm(points_1_h, trans_01.permute(0, 2, 1))
        points_0_h = torch.squeeze(points_0_h, dim=-1)
        # to euclidean
        points_0 = HomographicSampler._convert_points_from_homogeneous(
            points_0_h
        )  # BxNxD
        # reshape to the input shape
        points_0 = points_0.reshape(shape_inp)
        return points_0

    @staticmethod
    def _create_meshgrid(
        height: int,
        width: int,
        device: Optional[torch.device] = None,
        dtype: Optional[torch.dtype] = None,
        normalized: bool = True,
    ) -&gt; torch.Tensor:
        &#34;&#34;&#34;Generate a coordinate grid for an image.&#34;&#34;&#34;
        if normalized:
            min_x = -1.0
            max_x = +1.0
            min_y = -1.0
            max_y = +1.0
        else:
            min_x = 0.5
            max_x = width - 0.5
            min_y = 0.5
            max_y = height - 0.5

        xs: torch.Tensor = torch.linspace(
            min_x,
            max_x,
            width,
            device=device,
            dtype=dtype,
        )
        ys: torch.Tensor = torch.linspace(
            min_y,
            max_y,
            height,
            device=device,
            dtype=dtype,
        )

        # generate grid by stacking coordinates
        base_grid: torch.Tensor = torch.stack(
            torch.meshgrid([xs, ys], indexing=&#34;ij&#34;), dim=-1
        )  # WxHx2
        return base_grid.permute(1, 0, 2).unsqueeze(0)  # 1xHxWx2

    @staticmethod
    def _build_perspective_param(
        p: torch.Tensor, q: torch.Tensor, axis: str
    ) -&gt; torch.Tensor:
        ones = torch.ones_like(p)[..., 0:1]
        zeros = torch.zeros_like(p)[..., 0:1]
        if axis == &#34;x&#34;:
            return torch.cat(
                [
                    p[:, 0:1],
                    p[:, 1:2],
                    ones,
                    zeros,
                    zeros,
                    zeros,
                    -p[:, 0:1] * q[:, 0:1],
                    -p[:, 1:2] * q[:, 0:1],
                ],
                dim=1,
            )

        if axis == &#34;y&#34;:
            return torch.cat(
                [
                    zeros,
                    zeros,
                    zeros,
                    p[:, 0:1],
                    p[:, 1:2],
                    ones,
                    -p[:, 0:1] * q[:, 1:2],
                    -p[:, 1:2] * q[:, 1:2],
                ],
                dim=1,
            )

        raise NotImplementedError(
            f&#34;perspective params for axis `{axis}` is not implemented.&#34;
        )

    @staticmethod
    def _get_perspective_transform(src, dst):
        r&#34;&#34;&#34;Calculate a perspective transform from four pairs of the corresponding
        points.

        The function calculates the matrix of a perspective transform so that:

        .. math ::

            \begin{bmatrix}
            t_{i}x_{i}^{&#39;} \\
            t_{i}y_{i}^{&#39;} \\
            t_{i} \\
            \end{bmatrix}
            =
            \textbf{map_matrix} \cdot
            \begin{bmatrix}
            x_{i} \\
            y_{i} \\
            1 \\
            \end{bmatrix}

        where

        .. math ::
            dst(i) = (x_{i}^{&#39;},y_{i}^{&#39;}), src(i) = (x_{i}, y_{i}), i = 0,1,2,3

        Args:
            src: coordinates of quadrangle vertices in the source image with shape :math:`(B, 4, 2)`.
            dst: coordinates of the corresponding quadrangle vertices in
                the destination image with shape :math:`(B, 4, 2)`.

        Returns:
            the perspective transformation with shape :math:`(B, 3, 3)`.
        &#34;&#34;&#34;

        # we build matrix A by using only 4 point correspondence. The linear
        # system is solved with the least square method, so here
        # we could even pass more correspondence
        p = []
        for i in [0, 1, 2, 3]:
            p.append(
                HomographicSampler._build_perspective_param(src[:, i], dst[:, i], &#34;x&#34;)
            )
            p.append(
                HomographicSampler._build_perspective_param(src[:, i], dst[:, i], &#34;y&#34;)
            )

        # A is Bx8x8
        A = torch.stack(p, dim=1)

        # b is a Bx8x1
        b = torch.stack(
            [
                dst[:, 0:1, 0],
                dst[:, 0:1, 1],
                dst[:, 1:2, 0],
                dst[:, 1:2, 1],
                dst[:, 2:3, 0],
                dst[:, 2:3, 1],
                dst[:, 3:4, 0],
                dst[:, 3:4, 1],
            ],
            dim=1,
        )

        # solve the system Ax = b
        X = torch.linalg.solve(A, b)

        # create variable to return
        batch_size = src.shape[0]
        M = torch.ones(batch_size, 9, device=src.device, dtype=src.dtype)
        M[..., :8] = torch.squeeze(X, dim=-1)

        return M.view(-1, 3, 3)  # Bx3x3

    def reset(self, batch_size: Optional[int] = None, device: Optional[str] = None):
        &#34;&#34;&#34;Resets all the crops to their initial position and sizes.

        Parameters
        ----------
        batch_size : int, optional
            Number of virtual crops to handle, by default None.
        device : str, optional
            Device on which operations will be done, by default None.
        &#34;&#34;&#34;
        batch_size = self.batch_size if batch_size is None else batch_size
        device = self.device if device is None else device

        self._dest_coords = HomographicSampler._DEST_COORD.to(device)
        self._dest_coords = self._dest_coords.unsqueeze(0)
        self._dest_coords = self._dest_coords.expand(batch_size, -1, -1)

        self._homog_src_coords = HomographicSampler._convert_points_to_homogeneous(
            self._dest_coords
        )

        self._clear_cache()

    def _clear_cache(self):
        &#34;&#34;&#34;Intermediate data are cached such that the same homographic sampler can efficiently be called several times using the same homographic transforms.&#34;&#34;&#34;
        self._src_coords = None
        self._forward_matrices = None
        self._backward_matrices = None

    def _to(self, device, name):
        attr = getattr(self, name)
        if attr is not None:
            setattr(self, name, attr.to(device))

    def to(self, device: str):
        &#34;&#34;&#34;Moves all operations to new device.

        Parameters
        ----------
        device : str
            Pytorch device.
        &#34;&#34;&#34;
        if device != self.device:
            self._to(device, &#34;_dest_coords&#34;)
            self._to(device, &#34;_src_coords&#34;)
            self._to(device, &#34;_homog_src_coords&#34;)
            self._to(device, &#34;_forward_matrices&#34;)
            self._to(device, &#34;_backward_matrices&#34;)
        return self

    @property
    def batch_size(self):
        return self._homog_src_coords.shape[0]

    @property
    def device(self):
        return self._homog_src_coords.device

    @property
    def dtype(self):
        return self._homog_src_coords.dtype

    @property
    def src_coords(self) -&gt; torch.Tensor:
        &#34;&#34;&#34;Coordinates of the homographic crop corners in the virtual image coordinate reference system.
        Those four points are ordered as : (top-left, top-right, bottom-left, bottom-right)

        Returns
        -------
        torch.Tensor
            :math:`(B, 4, 2)` tensor containing the homographic crop foud corners coordinates.
        &#34;&#34;&#34;
        if self._src_coords is None:
            self._src_coords = HomographicSampler._convert_points_from_homogeneous(
                self._homog_src_coords
            )
        return self._src_coords

    @property
    def dest_coords(self) -&gt; torch.Tensor:
        return self._dest_coords

    def _auto_expand(self, input, outer_dim_size=None, **kwargs):
        &#34;&#34;&#34;Auto-expand scalar or iterables to be batched.&#34;&#34;&#34;
        if not isinstance(input, torch.Tensor):
            input = torch.tensor(input, **kwargs)

        # scalar
        if len(input.shape) == 0:
            input = input.unsqueeze(0)
            if outer_dim_size is None:
                outer_dim_size = 1
            else:
                input = input.expand(outer_dim_size)

        # vector
        if len(input.shape) == 1:
            if outer_dim_size is None:
                outer_dim_size = input.shape[0]
            elif outer_dim_size != input.shape[0]:
                raise RuntimeError(
                    f&#34;provided outer dim size {outer_dim_size} doesn&#39;t match input shape {input.shape}&#34;
                )

            input = input.unsqueeze(0)
            input = input.expand(self.batch_size, -1)

        if len(input.shape) != 2:
            raise RuntimeError(f&#34;input should have size BxD (shape is {input.shape}&#34;)

        input = input.type(self.dtype)
        input = input.to(self.device)

        return input

    def rotate(
        self,
        angles: Union[float, torch.Tensor],
        clockwise: bool = False,
        axis: str = &#34;z&#34;,
        local_center: bool = False,
    ):
        &#34;&#34;&#34;Rotate virtual crops.

        Parameters
        ----------
        angles : Union[float, torch.Tensor]
            Angles of rotation. If scalar, applied to all crops. If :math:`(B, 1)` tensor, applied to each crop independently.
        clockwise : bool, optional
            Rotational direction, by default False
        axis : str, optional
            Axis of rotation, by default &#34;z&#34;. Valid values are &#34;x&#34;, &#34;y&#34; and &#34;z&#34;. &#34;z&#34; is in-plane rotation. &#34;x&#34; and &#34;y&#34; are out-of-plane rotations.
        local_center : bool, optional
            Rotate on the center of the crop, by default False. If False, use global center of rotation (i.e. initial crop center). This option is only relevant after a shift has been used.

        Raises
        ------
        RuntimeError
            Raised if provided axis is invalid.
        &#34;&#34;&#34;
        if axis not in HomographicSampler._VALID_AXIS:
            raise RuntimeError(
                f&#39;provided axis &#34;{axis}&#34; isn\&#39;t valid, should be one of {HomographicSampler._VALID_AXIS}&#39;
            )

        angles = self._auto_expand(angles, outer_dim_size=1)

        if clockwise:
            angles = -angles

        cos_a = torch.cos(angles)
        sin_a = torch.sin(angles)

        _1 = torch.ones_like(cos_a)
        _0 = torch.zeros_like(cos_a)

        if axis == &#34;z&#34;:
            flatmat = [+cos_a, -sin_a, _0, +sin_a, +cos_a, _0, _0, _0, _1]
        elif axis == &#34;y&#34;:
            flatmat = [+cos_a, _0, -sin_a, _0, _1, _0, +sin_a, _0, +cos_a]
        elif axis == &#34;x&#34;:
            flatmat = [_1, _0, _0, _0, +cos_a, +sin_a, _0, -sin_a, +cos_a]

        rot_matrix = torch.cat(flatmat, dim=-1)
        rot_matrix = rot_matrix.view(self.batch_size, 3, 3)

        self._clear_cache()

        if local_center:
            center = torch.mean(self._homog_src_coords, dim=1, keepdim=True)

            self._homog_src_coords -= center
            self._homog_src_coords = self._homog_src_coords @ rot_matrix
            self._homog_src_coords += center
        else:
            if axis != &#34;z&#34;:
                self._homog_src_coords[..., -1] -= 1.0
            self._homog_src_coords = self._homog_src_coords @ rot_matrix
            if axis != &#34;z&#34;:
                self._homog_src_coords[..., -1] += 1.0

    def shift(self, delta: Union[float, Tuple[float, float], torch.Tensor]):
        &#34;&#34;&#34;Shift virtual crops.

        Parameters
        ----------
        delta : Union[float, Tuple[float, float], torch.Tensor]
            Shift values. Scalar or Tuple will be applied to all crops. :math:`(B, 2)` tensors will be applied to each crop independently.
        &#34;&#34;&#34;

        delta = self._auto_expand(delta, outer_dim_size=2)
        delta = delta.unsqueeze(1)
        delta = delta * self._homog_src_coords[..., -1].unsqueeze(-1)

        self._clear_cache()
        self._homog_src_coords[..., :2] += delta

    def scale(
        self,
        factors: Union[float, Tuple[float, float], torch.Tensor],
        local_center: bool = False,
    ):
        &#34;&#34;&#34;Scale the virtual crops.

        Parameters
        ----------
        factors : Union[float, Tuple[float, float], torch.Tensor]
            Scaling factors. Scalar or Tuple will be applied to all crops. :math:`(B, 2)` tensors will be applied to each crop independently.
        local_center : bool, optional
            Scale on the center of the crop, by default False. If False, use global center of rotation (i.e. initial crop center). This option is only relevant after a shift has been used.
        &#34;&#34;&#34;
        factors = self._auto_expand(factors, outer_dim_size=2)
        factors = factors.unsqueeze(1)

        self._clear_cache()

        if local_center:
            center = torch.mean(self._homog_src_coords, dim=1, keepdim=True)

            self._homog_src_coords -= center
            self._homog_src_coords[..., :2] *= factors
            self._homog_src_coords += center
        else:
            self._homog_src_coords[..., :2] *= factors

    @property
    def forward_matrices(self):
        if self._forward_matrices is None:
            self._forward_matrices = HomographicSampler._get_perspective_transform(
                self.dest_coords,
                self.src_coords,
            )
        return self._forward_matrices

    @property
    def backward_matrices(self):
        if self._backward_matrices is None:
            self._backward_matrices = HomographicSampler._get_perspective_transform(
                self.src_coords,
                self.dest_coords,
            )
        return self._backward_matrices

    def extract_crop(
        self,
        images: torch.Tensor,
        sampling_size: Tuple[int, int],
        mode=&#34;bilinear&#34;,
        padding_mode=&#34;zeros&#34;,
        direction=&#34;forward&#34;,
    ) -&gt; torch.Tensor:
        &#34;&#34;&#34;Extract all crops from a set of images.

        It can handle one-image-to-many-crops and many-images-to-many-crops.
        If the number of images is smaller than the number of crops, a number of n crops will be asssigned to each image such that :math:`n_crops = n * n_images`.

        Parameters
        ----------
        images : torch.Tensor
            Tensor containing all images (valid shapes are :math:`(B,C,H,W)` and :math:`(C,H,W)`).
        sampling_size : Tuple[int, int]
            Spatial shape of the output crops.
        mode : str, optional
            Sampling mode passed to `grid_sample`, by default &#34;bilinear&#34;.
        padding_mode : str, optional
            Padding mode passed to `grid_sample`, by default &#34;zeros&#34;.
        direction : str, optional
            Direction of the crop sampling (`src -&gt; dest` or `dest -&gt; src`), by default &#34;forward&#34;. Valid are &#34;forward&#34; and &#34;backward&#34;.

        Returns
        -------
        torch.Tensor
            Sampled crops using transformed virtual crops.

        Raises
        ------
        RuntimeError
            Raised is `images` shape is invalid.
        RuntimeError
            Raised is `images` batch size isn&#39;t a multiple of the number of virtual crops.
        RuntimeError
            Raised is `direction` is invalid.
        &#34;&#34;&#34;
        if images.dim() == 3:
            images = images.unsqueeze(0)
        elif images.dim() != 4:
            raise RuntimeError(&#34;provided image(s) should be of shape BxCxHxW or CxHxW&#34;)

        if self.batch_size % images.shape[0] != 0:
            raise RuntimeError(
                f&#34;the sampler batch size ({self.batch_size}) should be a multiple of the image batch size (found {images.shape[0]})&#34;
            )

        if direction not in HomographicSampler._VALID_DIRECTIONS:
            raise RuntimeError(
                f&#39;invalid direction &#34;{direction}&#34; found, should be one of {self._VALID_DIRECTIONS}&#39;
            )

        # reshape images to handle multiple crops
        crop_per_image = self.batch_size // images.shape[0]
        images = images.unsqueeze(1)
        images = images.expand(-1, crop_per_image, -1, -1, -1)
        images = images.reshape(self.batch_size, *images.shape[2:])

        # select homography matrix
        if direction == &#34;forward&#34;:
            matrix = self.forward_matrices
        else:
            matrix = self.backward_matrices

        # create grid of coordinates used for image sampling
        grid = HomographicSampler._create_meshgrid(
            sampling_size[0],
            sampling_size[1],
            device=matrix.device,
            dtype=matrix.dtype,
        )
        grid = grid.expand(self.batch_size, -1, -1, -1)
        grid = HomographicSampler._transform_points(matrix[:, None, None], grid)
        grid = grid.type_as(images)

        # sample pixels using transformed grid coordinates
        return grid_sample(
            images,
            grid,
            mode=mode,
            padding_mode=padding_mode,
            align_corners=False,
        )

    def transform_points(
        self,
        points: Union[torch.Tensor, List[torch.Tensor]],
        image_shape: Optional[Tuple[int, int]] = None,
        direction: str = &#34;forward&#34;,
        ordering: str = &#34;xy&#34;,
    ) -&gt; Union[torch.Tensor, List[torch.Tensor]]:
        &#34;&#34;&#34;Apply homography to a set of points.

        Parameters
        ----------
        points : Union[torch.Tensor, List[torch.Tensor]]
            BxNx2 tensor or list of Nx2 tensors containing the coordinates to transform.
        image_shape : Optional[Tuple[int, int]], optional
            Shape of the tensor the coordinates references, as in (height, width), by default None.
            If not provided, the coordinates are assumed to be already normalized between [-1, +1].
        direction : str, optional
            Direction of the homography, by default &#34;forward&#34;.
        ordering : str, optional
            Specify the order in which the x,y coordinates are stored in &#34;points&#34;, by default &#34;xy&#34;.

        Returns
        -------
        Union[torch.Tensor, List[torch.Tensor]]
            Transformed coordinates.

        Raises
        ------
        RuntimeError
            If the provided direction is invalid.
        RuntimeError
            If the provided ordering is invalid.
        &#34;&#34;&#34;
        # check arguments
        if direction not in HomographicSampler._VALID_DIRECTIONS:
            raise RuntimeError(
                f&#39;invalid direction &#34;{direction}&#34; found, should be one of {self._VALID_DIRECTIONS}&#39;
            )
        if ordering not in HomographicSampler._VALID_ORDERING:
            raise RuntimeError(
                f&#39;invalid ordering &#34;{ordering}&#34; found, should be one of {self._VALID_ORDERING}&#39;
            )

        # select homography matrices
        if direction == &#34;forward&#34;:
            matrix = self.backward_matrices
        else:
            matrix = self.forward_matrices

        # pad input if using variable length
        lengths = None
        if not isinstance(points, torch.Tensor):
            lengths = [p.shape[0] for p in points]
            points = pad_sequence(points, batch_first=True)

        # convert to &#34;xy&#34; ordering
        if ordering == &#34;yx&#34;:
            points = points[..., [1, 0]]

        # bring coordinates to [-1, +1] range
        if image_shape is not None:
            image_shape = torch.tensor(
                [image_shape[1], image_shape[0]],
                dtype=points.dtype,
                device=points.device,
            )
            image_shape = image_shape[None, None, ...]
            image_shape_half = image_shape / 2.0
            pixel_shift = 0.5 / image_shape
            points = (points - image_shape_half) / image_shape_half + pixel_shift

        # reshape points to handle multiple transforms
        transform_per_points = self.batch_size // points.shape[0]
        points = points.unsqueeze(1)
        points = points.expand(-1, transform_per_points, -1, -1)
        points = points.reshape(self.batch_size, *points.shape[2:])

        # change lengths size accordingly
        if transform_per_points != 1:
            lengths = list(
                itertools.chain.from_iterable(
                    itertools.repeat(s, transform_per_points) for s in lengths
                )
            )

        # apply homography to point coordinates
        transformed_points = HomographicSampler._transform_points(
            matrix[:, None, None], points
        )

        # bring coordinates to original range
        if image_shape is not None:
            transformed_points = (
                (transformed_points - pixel_shift) * image_shape_half
            ) + image_shape_half

        # convert back to initial ordering
        if ordering == &#34;yx&#34;:
            transformed_points = transformed_points[..., [1, 0]]

        # remove padded results if input was variable length
        if lengths is not None:
            transformed_points = [
                transformed_points[i, :s] for i, s in enumerate(lengths)
            ]

        return transformed_points</code></pre>
</details>
<h3>Subclasses</h3>
<ul class="hlist">
<li><a title="silk.transforms.cv.homography.RandomHomographicSampler" href="../transforms/cv/homography.html#silk.transforms.cv.homography.RandomHomographicSampler">RandomHomographicSampler</a></li>
</ul>
<h3>Instance variables</h3>
<dl>
<dt id="silk.cv.homography.HomographicSampler.backward_matrices"><code class="name">var <span class="ident">backward_matrices</span></code></dt>
<dd>
<div class="desc"></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">@property
def backward_matrices(self):
    if self._backward_matrices is None:
        self._backward_matrices = HomographicSampler._get_perspective_transform(
            self.src_coords,
            self.dest_coords,
        )
    return self._backward_matrices</code></pre>
</details>
</dd>
<dt id="silk.cv.homography.HomographicSampler.batch_size"><code class="name">var <span class="ident">batch_size</span></code></dt>
<dd>
<div class="desc"></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">@property
def batch_size(self):
    return self._homog_src_coords.shape[0]</code></pre>
</details>
</dd>
<dt id="silk.cv.homography.HomographicSampler.dest_coords"><code class="name">var <span class="ident">dest_coords</span> : torch.Tensor</code></dt>
<dd>
<div class="desc"></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">@property
def dest_coords(self) -&gt; torch.Tensor:
    return self._dest_coords</code></pre>
</details>
</dd>
<dt id="silk.cv.homography.HomographicSampler.device"><code class="name">var <span class="ident">device</span></code></dt>
<dd>
<div class="desc"></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">@property
def device(self):
    return self._homog_src_coords.device</code></pre>
</details>
</dd>
<dt id="silk.cv.homography.HomographicSampler.dtype"><code class="name">var <span class="ident">dtype</span></code></dt>
<dd>
<div class="desc"></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">@property
def dtype(self):
    return self._homog_src_coords.dtype</code></pre>
</details>
</dd>
<dt id="silk.cv.homography.HomographicSampler.forward_matrices"><code class="name">var <span class="ident">forward_matrices</span></code></dt>
<dd>
<div class="desc"></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">@property
def forward_matrices(self):
    if self._forward_matrices is None:
        self._forward_matrices = HomographicSampler._get_perspective_transform(
            self.dest_coords,
            self.src_coords,
        )
    return self._forward_matrices</code></pre>
</details>
</dd>
<dt id="silk.cv.homography.HomographicSampler.src_coords"><code class="name">var <span class="ident">src_coords</span> : torch.Tensor</code></dt>
<dd>
<div class="desc"><p>Coordinates of the homographic crop corners in the virtual image coordinate reference system.
Those four points are ordered as : (top-left, top-right, bottom-left, bottom-right)</p>
<h2 id="returns">Returns</h2>
<dl>
<dt><code>torch.Tensor</code></dt>
<dd>:math:<code>(B, 4, 2)</code> tensor containing the homographic crop foud corners coordinates.</dd>
</dl></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">@property
def src_coords(self) -&gt; torch.Tensor:
    &#34;&#34;&#34;Coordinates of the homographic crop corners in the virtual image coordinate reference system.
    Those four points are ordered as : (top-left, top-right, bottom-left, bottom-right)

    Returns
    -------
    torch.Tensor
        :math:`(B, 4, 2)` tensor containing the homographic crop foud corners coordinates.
    &#34;&#34;&#34;
    if self._src_coords is None:
        self._src_coords = HomographicSampler._convert_points_from_homogeneous(
            self._homog_src_coords
        )
    return self._src_coords</code></pre>
</details>
</dd>
</dl>
<h3>Methods</h3>
<dl>
<dt id="silk.cv.homography.HomographicSampler.extract_crop"><code class="name flex">
<span>def <span class="ident">extract_crop</span></span>(<span>self, images: torch.Tensor, sampling_size: Tuple[int, int], mode='bilinear', padding_mode='zeros', direction='forward') ‑> torch.Tensor</span>
</code></dt>
<dd>
<div class="desc"><p>Extract all crops from a set of images.</p>
<p>It can handle one-image-to-many-crops and many-images-to-many-crops.
If the number of images is smaller than the number of crops, a number of n crops will be asssigned to each image such that :math:<code>n_crops = n * n_images</code>.</p>
<h2 id="parameters">Parameters</h2>
<dl>
<dt><strong><code>images</code></strong> :&ensp;<code>torch.Tensor</code></dt>
<dd>Tensor containing all images (valid shapes are :math:<code>(B,C,H,W)</code> and :math:<code>(C,H,W)</code>).</dd>
<dt><strong><code>sampling_size</code></strong> :&ensp;<code>Tuple[int, int]</code></dt>
<dd>Spatial shape of the output crops.</dd>
<dt><strong><code>mode</code></strong> :&ensp;<code>str</code>, optional</dt>
<dd>Sampling mode passed to <code>grid_sample</code>, by default "bilinear".</dd>
<dt><strong><code>padding_mode</code></strong> :&ensp;<code>str</code>, optional</dt>
<dd>Padding mode passed to <code>grid_sample</code>, by default "zeros".</dd>
<dt><strong><code>direction</code></strong> :&ensp;<code>str</code>, optional</dt>
<dd>Direction of the crop sampling (<code>src -&gt; dest</code> or <code>dest -&gt; src</code>), by default "forward". Valid are "forward" and "backward".</dd>
</dl>
<h2 id="returns">Returns</h2>
<dl>
<dt><code>torch.Tensor</code></dt>
<dd>Sampled crops using transformed virtual crops.</dd>
</dl>
<h2 id="raises">Raises</h2>
<dl>
<dt><code>RuntimeError</code></dt>
<dd>Raised is <code>images</code> shape is invalid.</dd>
<dt><code>RuntimeError</code></dt>
<dd>Raised is <code>images</code> batch size isn't a multiple of the number of virtual crops.</dd>
<dt><code>RuntimeError</code></dt>
<dd>Raised is <code>direction</code> is invalid.</dd>
</dl></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">def extract_crop(
    self,
    images: torch.Tensor,
    sampling_size: Tuple[int, int],
    mode=&#34;bilinear&#34;,
    padding_mode=&#34;zeros&#34;,
    direction=&#34;forward&#34;,
) -&gt; torch.Tensor:
    &#34;&#34;&#34;Extract all crops from a set of images.

    It can handle one-image-to-many-crops and many-images-to-many-crops.
    If the number of images is smaller than the number of crops, a number of n crops will be asssigned to each image such that :math:`n_crops = n * n_images`.

    Parameters
    ----------
    images : torch.Tensor
        Tensor containing all images (valid shapes are :math:`(B,C,H,W)` and :math:`(C,H,W)`).
    sampling_size : Tuple[int, int]
        Spatial shape of the output crops.
    mode : str, optional
        Sampling mode passed to `grid_sample`, by default &#34;bilinear&#34;.
    padding_mode : str, optional
        Padding mode passed to `grid_sample`, by default &#34;zeros&#34;.
    direction : str, optional
        Direction of the crop sampling (`src -&gt; dest` or `dest -&gt; src`), by default &#34;forward&#34;. Valid are &#34;forward&#34; and &#34;backward&#34;.

    Returns
    -------
    torch.Tensor
        Sampled crops using transformed virtual crops.

    Raises
    ------
    RuntimeError
        Raised is `images` shape is invalid.
    RuntimeError
        Raised is `images` batch size isn&#39;t a multiple of the number of virtual crops.
    RuntimeError
        Raised is `direction` is invalid.
    &#34;&#34;&#34;
    if images.dim() == 3:
        images = images.unsqueeze(0)
    elif images.dim() != 4:
        raise RuntimeError(&#34;provided image(s) should be of shape BxCxHxW or CxHxW&#34;)

    if self.batch_size % images.shape[0] != 0:
        raise RuntimeError(
            f&#34;the sampler batch size ({self.batch_size}) should be a multiple of the image batch size (found {images.shape[0]})&#34;
        )

    if direction not in HomographicSampler._VALID_DIRECTIONS:
        raise RuntimeError(
            f&#39;invalid direction &#34;{direction}&#34; found, should be one of {self._VALID_DIRECTIONS}&#39;
        )

    # reshape images to handle multiple crops
    crop_per_image = self.batch_size // images.shape[0]
    images = images.unsqueeze(1)
    images = images.expand(-1, crop_per_image, -1, -1, -1)
    images = images.reshape(self.batch_size, *images.shape[2:])

    # select homography matrix
    if direction == &#34;forward&#34;:
        matrix = self.forward_matrices
    else:
        matrix = self.backward_matrices

    # create grid of coordinates used for image sampling
    grid = HomographicSampler._create_meshgrid(
        sampling_size[0],
        sampling_size[1],
        device=matrix.device,
        dtype=matrix.dtype,
    )
    grid = grid.expand(self.batch_size, -1, -1, -1)
    grid = HomographicSampler._transform_points(matrix[:, None, None], grid)
    grid = grid.type_as(images)

    # sample pixels using transformed grid coordinates
    return grid_sample(
        images,
        grid,
        mode=mode,
        padding_mode=padding_mode,
        align_corners=False,
    )</code></pre>
</details>
</dd>
<dt id="silk.cv.homography.HomographicSampler.reset"><code class="name flex">
<span>def <span class="ident">reset</span></span>(<span>self, batch_size: Optional[int] = None, device: Optional[str] = None)</span>
</code></dt>
<dd>
<div class="desc"><p>Resets all the crops to their initial position and sizes.</p>
<h2 id="parameters">Parameters</h2>
<dl>
<dt><strong><code>batch_size</code></strong> :&ensp;<code>int</code>, optional</dt>
<dd>Number of virtual crops to handle, by default None.</dd>
<dt><strong><code>device</code></strong> :&ensp;<code>str</code>, optional</dt>
<dd>Device on which operations will be done, by default None.</dd>
</dl></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">def reset(self, batch_size: Optional[int] = None, device: Optional[str] = None):
    &#34;&#34;&#34;Resets all the crops to their initial position and sizes.

    Parameters
    ----------
    batch_size : int, optional
        Number of virtual crops to handle, by default None.
    device : str, optional
        Device on which operations will be done, by default None.
    &#34;&#34;&#34;
    batch_size = self.batch_size if batch_size is None else batch_size
    device = self.device if device is None else device

    self._dest_coords = HomographicSampler._DEST_COORD.to(device)
    self._dest_coords = self._dest_coords.unsqueeze(0)
    self._dest_coords = self._dest_coords.expand(batch_size, -1, -1)

    self._homog_src_coords = HomographicSampler._convert_points_to_homogeneous(
        self._dest_coords
    )

    self._clear_cache()</code></pre>
</details>
</dd>
<dt id="silk.cv.homography.HomographicSampler.rotate"><code class="name flex">
<span>def <span class="ident">rotate</span></span>(<span>self, angles: Union[float, torch.Tensor], clockwise: bool = False, axis: str = 'z', local_center: bool = False)</span>
</code></dt>
<dd>
<div class="desc"><p>Rotate virtual crops.</p>
<h2 id="parameters">Parameters</h2>
<dl>
<dt><strong><code>angles</code></strong> :&ensp;<code>Union[float, torch.Tensor]</code></dt>
<dd>Angles of rotation. If scalar, applied to all crops. If :math:<code>(B, 1)</code> tensor, applied to each crop independently.</dd>
<dt><strong><code>clockwise</code></strong> :&ensp;<code>bool</code>, optional</dt>
<dd>Rotational direction, by default False</dd>
<dt><strong><code>axis</code></strong> :&ensp;<code>str</code>, optional</dt>
<dd>Axis of rotation, by default "z". Valid values are "x", "y" and "z". "z" is in-plane rotation. "x" and "y" are out-of-plane rotations.</dd>
<dt><strong><code>local_center</code></strong> :&ensp;<code>bool</code>, optional</dt>
<dd>Rotate on the center of the crop, by default False. If False, use global center of rotation (i.e. initial crop center). This option is only relevant after a shift has been used.</dd>
</dl>
<h2 id="raises">Raises</h2>
<dl>
<dt><code>RuntimeError</code></dt>
<dd>Raised if provided axis is invalid.</dd>
</dl></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">def rotate(
    self,
    angles: Union[float, torch.Tensor],
    clockwise: bool = False,
    axis: str = &#34;z&#34;,
    local_center: bool = False,
):
    &#34;&#34;&#34;Rotate virtual crops.

    Parameters
    ----------
    angles : Union[float, torch.Tensor]
        Angles of rotation. If scalar, applied to all crops. If :math:`(B, 1)` tensor, applied to each crop independently.
    clockwise : bool, optional
        Rotational direction, by default False
    axis : str, optional
        Axis of rotation, by default &#34;z&#34;. Valid values are &#34;x&#34;, &#34;y&#34; and &#34;z&#34;. &#34;z&#34; is in-plane rotation. &#34;x&#34; and &#34;y&#34; are out-of-plane rotations.
    local_center : bool, optional
        Rotate on the center of the crop, by default False. If False, use global center of rotation (i.e. initial crop center). This option is only relevant after a shift has been used.

    Raises
    ------
    RuntimeError
        Raised if provided axis is invalid.
    &#34;&#34;&#34;
    if axis not in HomographicSampler._VALID_AXIS:
        raise RuntimeError(
            f&#39;provided axis &#34;{axis}&#34; isn\&#39;t valid, should be one of {HomographicSampler._VALID_AXIS}&#39;
        )

    angles = self._auto_expand(angles, outer_dim_size=1)

    if clockwise:
        angles = -angles

    cos_a = torch.cos(angles)
    sin_a = torch.sin(angles)

    _1 = torch.ones_like(cos_a)
    _0 = torch.zeros_like(cos_a)

    if axis == &#34;z&#34;:
        flatmat = [+cos_a, -sin_a, _0, +sin_a, +cos_a, _0, _0, _0, _1]
    elif axis == &#34;y&#34;:
        flatmat = [+cos_a, _0, -sin_a, _0, _1, _0, +sin_a, _0, +cos_a]
    elif axis == &#34;x&#34;:
        flatmat = [_1, _0, _0, _0, +cos_a, +sin_a, _0, -sin_a, +cos_a]

    rot_matrix = torch.cat(flatmat, dim=-1)
    rot_matrix = rot_matrix.view(self.batch_size, 3, 3)

    self._clear_cache()

    if local_center:
        center = torch.mean(self._homog_src_coords, dim=1, keepdim=True)

        self._homog_src_coords -= center
        self._homog_src_coords = self._homog_src_coords @ rot_matrix
        self._homog_src_coords += center
    else:
        if axis != &#34;z&#34;:
            self._homog_src_coords[..., -1] -= 1.0
        self._homog_src_coords = self._homog_src_coords @ rot_matrix
        if axis != &#34;z&#34;:
            self._homog_src_coords[..., -1] += 1.0</code></pre>
</details>
</dd>
<dt id="silk.cv.homography.HomographicSampler.scale"><code class="name flex">
<span>def <span class="ident">scale</span></span>(<span>self, factors: Union[float, Tuple[float, float], torch.Tensor], local_center: bool = False)</span>
</code></dt>
<dd>
<div class="desc"><p>Scale the virtual crops.</p>
<h2 id="parameters">Parameters</h2>
<dl>
<dt><strong><code>factors</code></strong> :&ensp;<code>Union[float, Tuple[float, float], torch.Tensor]</code></dt>
<dd>Scaling factors. Scalar or Tuple will be applied to all crops. :math:<code>(B, 2)</code> tensors will be applied to each crop independently.</dd>
<dt><strong><code>local_center</code></strong> :&ensp;<code>bool</code>, optional</dt>
<dd>Scale on the center of the crop, by default False. If False, use global center of rotation (i.e. initial crop center). This option is only relevant after a shift has been used.</dd>
</dl></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">def scale(
    self,
    factors: Union[float, Tuple[float, float], torch.Tensor],
    local_center: bool = False,
):
    &#34;&#34;&#34;Scale the virtual crops.

    Parameters
    ----------
    factors : Union[float, Tuple[float, float], torch.Tensor]
        Scaling factors. Scalar or Tuple will be applied to all crops. :math:`(B, 2)` tensors will be applied to each crop independently.
    local_center : bool, optional
        Scale on the center of the crop, by default False. If False, use global center of rotation (i.e. initial crop center). This option is only relevant after a shift has been used.
    &#34;&#34;&#34;
    factors = self._auto_expand(factors, outer_dim_size=2)
    factors = factors.unsqueeze(1)

    self._clear_cache()

    if local_center:
        center = torch.mean(self._homog_src_coords, dim=1, keepdim=True)

        self._homog_src_coords -= center
        self._homog_src_coords[..., :2] *= factors
        self._homog_src_coords += center
    else:
        self._homog_src_coords[..., :2] *= factors</code></pre>
</details>
</dd>
<dt id="silk.cv.homography.HomographicSampler.shift"><code class="name flex">
<span>def <span class="ident">shift</span></span>(<span>self, delta: Union[float, Tuple[float, float], torch.Tensor])</span>
</code></dt>
<dd>
<div class="desc"><p>Shift virtual crops.</p>
<h2 id="parameters">Parameters</h2>
<dl>
<dt><strong><code>delta</code></strong> :&ensp;<code>Union[float, Tuple[float, float], torch.Tensor]</code></dt>
<dd>Shift values. Scalar or Tuple will be applied to all crops. :math:<code>(B, 2)</code> tensors will be applied to each crop independently.</dd>
</dl></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">def shift(self, delta: Union[float, Tuple[float, float], torch.Tensor]):
    &#34;&#34;&#34;Shift virtual crops.

    Parameters
    ----------
    delta : Union[float, Tuple[float, float], torch.Tensor]
        Shift values. Scalar or Tuple will be applied to all crops. :math:`(B, 2)` tensors will be applied to each crop independently.
    &#34;&#34;&#34;

    delta = self._auto_expand(delta, outer_dim_size=2)
    delta = delta.unsqueeze(1)
    delta = delta * self._homog_src_coords[..., -1].unsqueeze(-1)

    self._clear_cache()
    self._homog_src_coords[..., :2] += delta</code></pre>
</details>
</dd>
<dt id="silk.cv.homography.HomographicSampler.to"><code class="name flex">
<span>def <span class="ident">to</span></span>(<span>self, device: str)</span>
</code></dt>
<dd>
<div class="desc"><p>Moves all operations to new device.</p>
<h2 id="parameters">Parameters</h2>
<dl>
<dt><strong><code>device</code></strong> :&ensp;<code>str</code></dt>
<dd>Pytorch device.</dd>
</dl></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">def to(self, device: str):
    &#34;&#34;&#34;Moves all operations to new device.

    Parameters
    ----------
    device : str
        Pytorch device.
    &#34;&#34;&#34;
    if device != self.device:
        self._to(device, &#34;_dest_coords&#34;)
        self._to(device, &#34;_src_coords&#34;)
        self._to(device, &#34;_homog_src_coords&#34;)
        self._to(device, &#34;_forward_matrices&#34;)
        self._to(device, &#34;_backward_matrices&#34;)
    return self</code></pre>
</details>
</dd>
<dt id="silk.cv.homography.HomographicSampler.transform_points"><code class="name flex">
<span>def <span class="ident">transform_points</span></span>(<span>self, points: Union[torch.Tensor, List[torch.Tensor]], image_shape: Optional[Tuple[int, int]] = None, direction: str = 'forward', ordering: str = 'xy') ‑> Union[torch.Tensor, List[torch.Tensor]]</span>
</code></dt>
<dd>
<div class="desc"><p>Apply homography to a set of points.</p>
<h2 id="parameters">Parameters</h2>
<dl>
<dt><strong><code>points</code></strong> :&ensp;<code>Union[torch.Tensor, List[torch.Tensor]]</code></dt>
<dd>BxNx2 tensor or list of Nx2 tensors containing the coordinates to transform.</dd>
<dt><strong><code>image_shape</code></strong> :&ensp;<code>Optional[Tuple[int, int]]</code>, optional</dt>
<dd>Shape of the tensor the coordinates references, as in (height, width), by default None.
If not provided, the coordinates are assumed to be already normalized between [-1, +1].</dd>
<dt><strong><code>direction</code></strong> :&ensp;<code>str</code>, optional</dt>
<dd>Direction of the homography, by default "forward".</dd>
<dt><strong><code>ordering</code></strong> :&ensp;<code>str</code>, optional</dt>
<dd>Specify the order in which the x,y coordinates are stored in "points", by default "xy".</dd>
</dl>
<h2 id="returns">Returns</h2>
<dl>
<dt><code>Union[torch.Tensor, List[torch.Tensor]]</code></dt>
<dd>Transformed coordinates.</dd>
</dl>
<h2 id="raises">Raises</h2>
<dl>
<dt><code>RuntimeError</code></dt>
<dd>If the provided direction is invalid.</dd>
<dt><code>RuntimeError</code></dt>
<dd>If the provided ordering is invalid.</dd>
</dl></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">def transform_points(
    self,
    points: Union[torch.Tensor, List[torch.Tensor]],
    image_shape: Optional[Tuple[int, int]] = None,
    direction: str = &#34;forward&#34;,
    ordering: str = &#34;xy&#34;,
) -&gt; Union[torch.Tensor, List[torch.Tensor]]:
    &#34;&#34;&#34;Apply homography to a set of points.

    Parameters
    ----------
    points : Union[torch.Tensor, List[torch.Tensor]]
        BxNx2 tensor or list of Nx2 tensors containing the coordinates to transform.
    image_shape : Optional[Tuple[int, int]], optional
        Shape of the tensor the coordinates references, as in (height, width), by default None.
        If not provided, the coordinates are assumed to be already normalized between [-1, +1].
    direction : str, optional
        Direction of the homography, by default &#34;forward&#34;.
    ordering : str, optional
        Specify the order in which the x,y coordinates are stored in &#34;points&#34;, by default &#34;xy&#34;.

    Returns
    -------
    Union[torch.Tensor, List[torch.Tensor]]
        Transformed coordinates.

    Raises
    ------
    RuntimeError
        If the provided direction is invalid.
    RuntimeError
        If the provided ordering is invalid.
    &#34;&#34;&#34;
    # check arguments
    if direction not in HomographicSampler._VALID_DIRECTIONS:
        raise RuntimeError(
            f&#39;invalid direction &#34;{direction}&#34; found, should be one of {self._VALID_DIRECTIONS}&#39;
        )
    if ordering not in HomographicSampler._VALID_ORDERING:
        raise RuntimeError(
            f&#39;invalid ordering &#34;{ordering}&#34; found, should be one of {self._VALID_ORDERING}&#39;
        )

    # select homography matrices
    if direction == &#34;forward&#34;:
        matrix = self.backward_matrices
    else:
        matrix = self.forward_matrices

    # pad input if using variable length
    lengths = None
    if not isinstance(points, torch.Tensor):
        lengths = [p.shape[0] for p in points]
        points = pad_sequence(points, batch_first=True)

    # convert to &#34;xy&#34; ordering
    if ordering == &#34;yx&#34;:
        points = points[..., [1, 0]]

    # bring coordinates to [-1, +1] range
    if image_shape is not None:
        image_shape = torch.tensor(
            [image_shape[1], image_shape[0]],
            dtype=points.dtype,
            device=points.device,
        )
        image_shape = image_shape[None, None, ...]
        image_shape_half = image_shape / 2.0
        pixel_shift = 0.5 / image_shape
        points = (points - image_shape_half) / image_shape_half + pixel_shift

    # reshape points to handle multiple transforms
    transform_per_points = self.batch_size // points.shape[0]
    points = points.unsqueeze(1)
    points = points.expand(-1, transform_per_points, -1, -1)
    points = points.reshape(self.batch_size, *points.shape[2:])

    # change lengths size accordingly
    if transform_per_points != 1:
        lengths = list(
            itertools.chain.from_iterable(
                itertools.repeat(s, transform_per_points) for s in lengths
            )
        )

    # apply homography to point coordinates
    transformed_points = HomographicSampler._transform_points(
        matrix[:, None, None], points
    )

    # bring coordinates to original range
    if image_shape is not None:
        transformed_points = (
            (transformed_points - pixel_shift) * image_shape_half
        ) + image_shape_half

    # convert back to initial ordering
    if ordering == &#34;yx&#34;:
        transformed_points = transformed_points[..., [1, 0]]

    # remove padded results if input was variable length
    if lengths is not None:
        transformed_points = [
            transformed_points[i, :s] for i, s in enumerate(lengths)
        ]

    return transformed_points</code></pre>
</details>
</dd>
</dl>
</dd>
</dl>
</section>
</article>
<nav id="sidebar">
<h1>Index</h1>
<div class="toc">
<ul></ul>
</div>
<ul id="index">
<li><h3>Super-module</h3>
<ul>
<li><code><a title="silk.cv" href="index.html">silk.cv</a></code></li>
</ul>
</li>
<li><h3><a href="#header-functions">Functions</a></h3>
<ul class="">
<li><code><a title="silk.cv.homography.resize_homography" href="#silk.cv.homography.resize_homography">resize_homography</a></code></li>
</ul>
</li>
<li><h3><a href="#header-classes">Classes</a></h3>
<ul>
<li>
<h4><code><a title="silk.cv.homography.HomographicSampler" href="#silk.cv.homography.HomographicSampler">HomographicSampler</a></code></h4>
<ul class="two-column">
<li><code><a title="silk.cv.homography.HomographicSampler.backward_matrices" href="#silk.cv.homography.HomographicSampler.backward_matrices">backward_matrices</a></code></li>
<li><code><a title="silk.cv.homography.HomographicSampler.batch_size" href="#silk.cv.homography.HomographicSampler.batch_size">batch_size</a></code></li>
<li><code><a title="silk.cv.homography.HomographicSampler.dest_coords" href="#silk.cv.homography.HomographicSampler.dest_coords">dest_coords</a></code></li>
<li><code><a title="silk.cv.homography.HomographicSampler.device" href="#silk.cv.homography.HomographicSampler.device">device</a></code></li>
<li><code><a title="silk.cv.homography.HomographicSampler.dtype" href="#silk.cv.homography.HomographicSampler.dtype">dtype</a></code></li>
<li><code><a title="silk.cv.homography.HomographicSampler.extract_crop" href="#silk.cv.homography.HomographicSampler.extract_crop">extract_crop</a></code></li>
<li><code><a title="silk.cv.homography.HomographicSampler.forward_matrices" href="#silk.cv.homography.HomographicSampler.forward_matrices">forward_matrices</a></code></li>
<li><code><a title="silk.cv.homography.HomographicSampler.reset" href="#silk.cv.homography.HomographicSampler.reset">reset</a></code></li>
<li><code><a title="silk.cv.homography.HomographicSampler.rotate" href="#silk.cv.homography.HomographicSampler.rotate">rotate</a></code></li>
<li><code><a title="silk.cv.homography.HomographicSampler.scale" href="#silk.cv.homography.HomographicSampler.scale">scale</a></code></li>
<li><code><a title="silk.cv.homography.HomographicSampler.shift" href="#silk.cv.homography.HomographicSampler.shift">shift</a></code></li>
<li><code><a title="silk.cv.homography.HomographicSampler.src_coords" href="#silk.cv.homography.HomographicSampler.src_coords">src_coords</a></code></li>
<li><code><a title="silk.cv.homography.HomographicSampler.to" href="#silk.cv.homography.HomographicSampler.to">to</a></code></li>
<li><code><a title="silk.cv.homography.HomographicSampler.transform_points" href="#silk.cv.homography.HomographicSampler.transform_points">transform_points</a></code></li>
</ul>
</li>
</ul>
</li>
</ul>
</nav>
</main>
<footer id="footer">
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.10.0</a>.</p>
</footer>
</body>
</html>