File size: 2,462 Bytes
79df973
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
// Copyright (C) 2021 NVIDIA CORPORATION & AFFILIATES.  All rights reserved.
//
// This work is made available under the Nvidia Source Code License-NC.
// To view a copy of this license, check out LICENSE.md
#ifndef VOXLIB_COMMON_H
#define VOXLIB_COMMON_H

#define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x)                                                    \
  TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x)                                                         \
  CHECK_CUDA(x);                                                               \
  CHECK_CONTIGUOUS(x)
#define CHECK_CPU(x)                                                           \
  TORCH_CHECK(x.device().is_cpu(), #x " must be a CPU tensor")

#include <cuda.h>
#include <cuda_runtime.h>
// CUDA vector math functions
__host__ __device__ __forceinline__ int floor_div(int a, int b) {
  int c = a / b;

  if (c * b > a) {
    c--;
  }

  return c;
}

template <typename scalar_t>
__host__ __forceinline__ void cross(scalar_t *r, const scalar_t *a,
                                    const scalar_t *b) {
  r[0] = a[1] * b[2] - a[2] * b[1];
  r[1] = a[2] * b[0] - a[0] * b[2];
  r[2] = a[0] * b[1] - a[1] * b[0];
}

__device__ __host__ __forceinline__ float dot(const float *a, const float *b) {
  return a[0] * b[0] + a[1] * b[1] + a[2] * b[2];
}

template <typename scalar_t, int ndim>
__device__ __host__ __forceinline__ void copyarr(scalar_t *r,
                                                 const scalar_t *a) {
#pragma unroll
  for (int i = 0; i < ndim; i++) {
    r[i] = a[i];
  }
}

// TODO: use rsqrt to speed up
// inplace version
template <typename scalar_t, int ndim>
__device__ __host__ __forceinline__ void normalize(scalar_t *a) {
  scalar_t vec_len = 0.0f;
#pragma unroll
  for (int i = 0; i < ndim; i++) {
    vec_len += a[i] * a[i];
  }
  vec_len = sqrtf(vec_len);
#pragma unroll
  for (int i = 0; i < ndim; i++) {
    a[i] /= vec_len;
  }
}

// normalize + copy
template <typename scalar_t, int ndim>
__device__ __host__ __forceinline__ void normalize(scalar_t *r,
                                                   const scalar_t *a) {
  scalar_t vec_len = 0.0f;
#pragma unroll
  for (int i = 0; i < ndim; i++) {
    vec_len += a[i] * a[i];
  }
  vec_len = sqrtf(vec_len);
#pragma unroll
  for (int i = 0; i < ndim; i++) {
    r[i] = a[i] / vec_len;
  }
}

#endif // VOXLIB_COMMON_H