/* Copyright (c) 2021-2025, InterDigital Communications, Inc
 * All rights reserved.
 *
 * This modified version adds an optional CUDA backend for entropy coding
 * while preserving the public API, data types (int32), and class names.
 * Build with -DUSE_CUDA and NVCC to enable the GPU path; otherwise it
 * transparently falls back to the original CPU implementation.
 *
 * CUDA strategy:
 *  - 4-way interleaved rANS per stream (lanes 0..3) processed on GPU.
 *  - Each thread handles one interleaved lane with reverse traversal.
 *  - Device buffers collect 32-bit words emitted during renormalization.
 *  - Host concatenates the 4 lanes (in lane order 0..3) then appends
 *    the final encoder states to produce a byte stream identical in format.
 *
 * Public API is unchanged (vectors of int32_t, pybind layer unchanged).
 * The decoder remains on CPU for simplicity/compatibility. You can add
 * a similar GPU path for decoding if desired.
 */

#include "rans_interface.hpp"

#include <pybind11/pybind11.h>
#include <pybind11/stl.h>

#include <algorithm>
#include <array>
#include <cassert>
#include <cstdint>
#include <cstring>
#include <numeric>
#include <stdexcept>
#include <string>
#include <vector>

#include "rans64.h"

#ifdef USE_CUDA
#include <cuda_runtime.h>
#include <stdint.h>

// Simple CUDA error check macro
#define CUDA_CHECK(expr)                                                       \
  do {                                                                         \
    cudaError_t _e = (expr);                                                   \
    if (_e != cudaSuccess) {                                                   \
      throw std::runtime_error(std::string("CUDA error: ") +                   \
                               cudaGetErrorString(_e));                        \
    }                                                                          \
  } while (0)

#endif

namespace py = pybind11;

/* probability range */
constexpr int precision = 16;
constexpr uint16_t bypass_precision = 4; /* number of bits in bypass mode */
constexpr uint16_t max_bypass_val = (1 << bypass_precision) - 1;

namespace {

void assert_cdfs(const std::vector<std::vector<int>> &cdfs,
                 const std::vector<int> &cdfs_sizes) {
#ifndef NDEBUG
  for (int i = 0; i < static_cast<int>(cdfs.size()); ++i) {
    assert(cdfs[i][0] == 0);
    assert(cdfs[i][cdfs_sizes[i] - 1] == (1 << precision));
    for (int j = 0; j < cdfs_sizes[i] - 1; ++j) {
      assert(cdfs[i][j + 1] > cdfs[i][j]);
    }
  }
#endif
}

/* Support only 16 bits word max */
inline void Rans64EncPutBits(Rans64State *r, uint32_t **pptr, uint32_t val,
                             uint32_t nbits) {
  assert(nbits <= 16);
  assert(val < (1u << nbits));

  /* Re-normalize */
  uint64_t x = *r;
  uint32_t freq = 1 << (16 - nbits);
  uint64_t x_max = ((RANS64_L >> 16) << 32) * freq;
  if (x >= x_max) {
    *pptr -= 1;
    **pptr = (uint32_t)x;
    x >>= 32;
    Rans64Assert(x < x_max);
  }

  /* x = C(s, x) */
  *r = (x << nbits) | val;
}

inline uint32_t Rans64DecGetBits(Rans64State *r, uint32_t **pptr,
                                 uint32_t n_bits) {
  uint64_t x = *r;
  uint32_t val = x & ((1u << n_bits) - 1);

  /* Re-normalize */
  x = x >> n_bits;
  if (x < RANS64_L) {
    x = (x << 32) | **pptr;
    *pptr += 1;
    Rans64Assert(x >= RANS64_L);
  }

  *r = x;

  return val;
}

} // namespace

#ifdef USE_CUDA
// --------------------------- CUDA path ---------------------------

struct DeviceCDFView {
  const int32_t *cdf_data;    // flattened CDFs
  const int32_t *cdf_offsets; // start offset of each CDF in cdf_data
  const int32_t *cdf_sizes;   // size of each CDF row
  const int32_t *offsets;     // symbol offsets per CDF
  int32_t num_cdfs;
};

// Minimal device rANS helpers (64-bit state). We mirror the constants.
__device__ inline void d_Rans64EncInit(uint64_t *x) { *x = RANS64_L; }

__device__ inline void
d_Rans64EncPut(uint64_t *x,
               uint32_t *out, // reverse-growing buffer
               int *out_idx,  // current write index (decrementing)
               uint32_t start, uint32_t range, int scale_bits) {
  // Renormalize
  const uint64_t x_max = ((uint64_t)RANS64_L >> scale_bits) << 32;
  while ((*x) >= x_max * range) {
    out[(*out_idx)--] = (uint32_t)(*x);
    (*x) >>= 32;
  }
  // x = floor(x/range) << scale_bits + (x % range) + start
  uint64_t q = (*x) / range;
  uint32_t r = (uint32_t)((*x) - q * range);
  *x = (q << scale_bits) + r + start;
}

__device__ inline void d_Rans64EncPutBits(uint64_t *x, uint32_t *out,
                                          int *out_idx, uint32_t val,
                                          uint32_t nbits) {
  const uint32_t freq = 1u << (16 - nbits);
  const uint64_t x_max = ((uint64_t)(RANS64_L >> 16) << 32) * freq;
  if ((*x) >= x_max) {
    out[(*out_idx)--] = (uint32_t)(*x);
    (*x) >>= 32;
  }
  *x = ((*x) << nbits) | val;
}

// Binary search on a single CDF row (strictly increasing, last == 1<<precision)
__device__ inline uint32_t d_cdf_upper_bound(const int32_t *row, int row_size,
                                             uint32_t target) {
  int lo = 0, hi = row_size; // hi is end
  while (lo < hi) {
    int mid = (lo + hi) >> 1;
    if ((uint32_t)row[mid] > target)
      hi = mid;
    else
      lo = mid + 1;
  }
  return (uint32_t)lo; // index of first element > target
}

// Kernel: 4-way interleaved rANS encoding per block (one stream per block).
// Each block handles one chunk of symbols; threadIdx.x in [0..3] is the lane.
__global__ void encode_interleaved4_kernel(
    const int32_t *__restrict__ symbols, // N symbols (host provided order)
    const int32_t *__restrict__ indexes, // N cdf indices
    int N, DeviceCDFView cdfv,
    // Pre-allocated per-lane reverse output buffers (uint32 words)
    uint32_t *__restrict__ lane_buf, // shape [4, max_words], flattened
    int *__restrict__ lane_head,     // starting from max_words-1 downwards
    int max_words_per_lane,
    // Final states per lane (to be flushed by host)
    uint64_t *__restrict__ lane_states) {
  const int lane = threadIdx.x; // 0..3
  if (lane >= 4)
    return;

  int out_idx = max_words_per_lane - 1; // reverse growing
  uint32_t *out = lane_buf + lane * max_words_per_lane;

  uint64_t x;
  d_Rans64EncInit(&x);

  // Reverse traversal, take symbols where (i & 3) == lane
  for (int i = N - 1 - lane; i >= 0; i -= 4) {
    const int cdf_idx = indexes[i];
    const int32_t cdf_off = cdfv.cdf_offsets[cdf_idx];
    const int32_t cdf_size = cdfv.cdf_sizes[cdf_idx];
    const int32_t sym_off = cdfv.offsets[cdf_idx];
    const int32_t max_value = cdf_size - 2;

    int32_t value = symbols[i] - sym_off;
    uint32_t raw_val = 0;
    if (value < 0) {
      raw_val = (uint32_t)(-2 * value - 1);
      value = max_value;
    } else if (value >= max_value) {
      raw_val = (uint32_t)(2 * (value - max_value));
      value = max_value;
    }

    const int32_t *row = cdfv.cdf_data + cdf_off;
    // safety in debug: row[0]==0, row[last]==1<<precision

    // Range coding for the bounded symbol
    const uint32_t start = (uint32_t)row[value];
    const uint32_t range = (uint32_t)(row[value + 1] - row[value]);
    d_Rans64EncPut(&x, out, &out_idx, start, range, precision);

    // bypass if value == max_value
    if (value == max_value) {
      // count n_bypass in 4-bit chunks
      int32_t n_bypass = 0;
      uint32_t tmp = raw_val;
      while ((tmp >> (n_bypass * bypass_precision)) != 0)
        ++n_bypass;

      // encode n_bypass using 4-bit chunks saturated to max_bypass_val
      int32_t vb = n_bypass;
      while (vb >= max_bypass_val) {
        d_Rans64EncPutBits(&x, out, &out_idx, max_bypass_val, bypass_precision);
        vb -= max_bypass_val;
      }
      d_Rans64EncPutBits(&x, out, &out_idx, (uint32_t)vb, bypass_precision);

      // encode raw_val in n_bypass chunks
      for (int32_t j = 0; j < n_bypass; ++j) {
        const uint32_t v = (raw_val >> (j * bypass_precision)) & max_bypass_val;
        d_Rans64EncPutBits(&x, out, &out_idx, v, bypass_precision);
      }
    }
  }

  lane_states[lane] = x;
  lane_head[lane] = out_idx; // last written position
}

// Host helper to run the kernel and assemble final buffer into a std::string
static std::string
encode_with_cuda_impl(const std::vector<int32_t> &symbols,
                      const std::vector<int32_t> &indexes,
                      const std::vector<std::vector<int32_t>> &cdfs,
                      const std::vector<int32_t> &cdfs_sizes,
                      const std::vector<int32_t> &offsets) {
  const int N = (int)symbols.size();
  if (N == 0)
    return std::string();

  // Flatten CDFs
  int num_cdfs = (int)cdfs.size();
  std::vector<int32_t> h_cdf_offsets(num_cdfs);
  std::vector<int32_t> h_cdf_sizes = cdfs_sizes;
  std::vector<int32_t> h_offsets = offsets;
  int total_cdf_elems = 0;
  for (int i = 0; i < num_cdfs; ++i) {
    h_cdf_offsets[i] = total_cdf_elems;
    total_cdf_elems += cdfs_sizes[i];
  }
  std::vector<int32_t> h_cdf_data(total_cdf_elems);
  for (int i = 0; i < num_cdfs; ++i) {
    std::memcpy(&h_cdf_data[h_cdf_offsets[i]], cdfs[i].data(),
                sizeof(int32_t) * cdfs_sizes[i]);
  }

  // Device buffers
  int32_t *d_symbols = nullptr, *d_indexes = nullptr;
  int32_t *d_cdf_data = nullptr, *d_cdf_offsets = nullptr,
          *d_cdf_sizes = nullptr, *d_offsets = nullptr;

  CUDA_CHECK(cudaMalloc((void **)&d_symbols, sizeof(int32_t) * N));
  CUDA_CHECK(cudaMalloc((void **)&d_indexes, sizeof(int32_t) * N));
  CUDA_CHECK(cudaMemcpy(d_symbols, symbols.data(), sizeof(int32_t) * N,
                        cudaMemcpyHostToDevice));
  CUDA_CHECK(cudaMemcpy(d_indexes, indexes.data(), sizeof(int32_t) * N,
                        cudaMemcpyHostToDevice));

  CUDA_CHECK(
      cudaMalloc((void **)&d_cdf_data, sizeof(int32_t) * total_cdf_elems));
  CUDA_CHECK(cudaMalloc((void **)&d_cdf_offsets, sizeof(int32_t) * num_cdfs));
  CUDA_CHECK(cudaMalloc((void **)&d_cdf_sizes, sizeof(int32_t) * num_cdfs));
  CUDA_CHECK(cudaMalloc((void **)&d_offsets, sizeof(int32_t) * num_cdfs));

  CUDA_CHECK(cudaMemcpy(d_cdf_data, h_cdf_data.data(),
                        sizeof(int32_t) * total_cdf_elems,
                        cudaMemcpyHostToDevice));
  CUDA_CHECK(cudaMemcpy(d_cdf_offsets, h_cdf_offsets.data(),
                        sizeof(int32_t) * num_cdfs, cudaMemcpyHostToDevice));
  CUDA_CHECK(cudaMemcpy(d_cdf_sizes, h_cdf_sizes.data(),
                        sizeof(int32_t) * num_cdfs, cudaMemcpyHostToDevice));
  CUDA_CHECK(cudaMemcpy(d_offsets, h_offsets.data(), sizeof(int32_t) * num_cdfs,
                        cudaMemcpyHostToDevice));

  DeviceCDFView dview{d_cdf_data, d_cdf_offsets, d_cdf_sizes, d_offsets,
                      num_cdfs};

  // Output buffers per lane (reverse write). Heuristic upper bound: each symbol
  // can flush at most once, so N words across all lanes is usually enough.
  // We over-allocate 2*N to be safe, divided evenly per lane.
  const int max_words_total = std::max(8, 2 * N); // conservative
  const int max_words_per_lane = (max_words_total + 3) / 4;

  uint32_t *d_lane_buf = nullptr;
  int *d_lane_head = nullptr;
  uint64_t *d_lane_states = nullptr;
  CUDA_CHECK(cudaMalloc((void **)&d_lane_buf,
                        sizeof(uint32_t) * 4 * max_words_per_lane));
  CUDA_CHECK(cudaMalloc((void **)&d_lane_head, sizeof(int) * 4));
  CUDA_CHECK(cudaMalloc((void **)&d_lane_states, sizeof(uint64_t) * 4));

  // Initialize lane buffers with a pattern (optional)
  CUDA_CHECK(
      cudaMemset(d_lane_buf, 0xCC, sizeof(uint32_t) * 4 * max_words_per_lane));

  // Launch one block, 4 threads
  encode_interleaved4_kernel<<<1, 4>>>(d_symbols, d_indexes, N, dview,
                                       d_lane_buf, d_lane_head,
                                       max_words_per_lane, d_lane_states);
  CUDA_CHECK(cudaDeviceSynchronize());

  // Copy results back
  std::vector<uint32_t> h_lane_buf(4 * max_words_per_lane);
  std::vector<int> h_lane_head(4);
  std::vector<uint64_t> h_lane_states(4);
  CUDA_CHECK(cudaMemcpy(h_lane_buf.data(), d_lane_buf,
                        sizeof(uint32_t) * 4 * max_words_per_lane,
                        cudaMemcpyDeviceToHost));
  CUDA_CHECK(cudaMemcpy(h_lane_head.data(), d_lane_head, sizeof(int) * 4,
                        cudaMemcpyDeviceToHost));
  CUDA_CHECK(cudaMemcpy(h_lane_states.data(), d_lane_states,
                        sizeof(uint64_t) * 4, cudaMemcpyDeviceToHost));

  // Assemble final output:
  // For each lane, valid data are h_lane_buf[lane][head+1 ...
  // max_words_per_lane-1] (reverse order). We must append the rANS final states
  // (x) for the 4 lanes.
  std::vector<uint32_t> words;
  words.reserve(max_words_total + 4);
  for (int lane = 0; lane < 4; ++lane) {
    int start = h_lane_head[lane] + 1;
    if (start < 0)
      start = 0;
    for (int idx = start; idx < max_words_per_lane; ++idx) {
      words.push_back(h_lane_buf[lane * max_words_per_lane + idx]);
    }
  }
  // Append final states (flush): write x as two 32-bit words each (high then
  // low), like CPU flush. To mimic Rans64EncFlush, we push last state: two
  // 32-bit chunks.
  for (int lane = 0; lane < 4; ++lane) {
    uint64_t x = h_lane_states[lane];
    words.push_back((uint32_t)(x >> 32));
    words.push_back((uint32_t)(x & 0xffffffffu));
  }

  // Convert to bytes (little-endian, contiguous)
  const int nbytes = (int)words.size() * sizeof(uint32_t);
  std::string encoded;
  encoded.resize(nbytes);
  std::memcpy(encoded.data(), words.data(), nbytes);

  // Cleanup
  cudaFree(d_symbols);
  cudaFree(d_indexes);
  cudaFree(d_cdf_data);
  cudaFree(d_cdf_offsets);
  cudaFree(d_cdf_sizes);
  cudaFree(d_offsets);
  cudaFree(d_lane_buf);
  cudaFree(d_lane_head);
  cudaFree(d_lane_states);

  return encoded;
}
#endif // USE_CUDA

// --------------------------- Original CPU path ---------------------------

void BufferedRansEncoder::encode_with_indexes(
    const std::vector<int32_t> &symbols, const std::vector<int32_t> &indexes,
    const std::vector<std::vector<int32_t>> &cdfs,
    const std::vector<int32_t> &cdfs_sizes,
    const std::vector<int32_t> &offsets) {
  assert(cdfs.size() == cdfs_sizes.size());
  assert_cdfs(cdfs, cdfs_sizes);

#ifdef USE_CUDA
  // GPU path uses the non-buffered RansEncoder for simplicity;
  // we keep BufferedRansEncoder behavior by delegating to flush()-like flow.
  (void)symbols;
  (void)indexes;
  (void)cdfs;
  (void)cdfs_sizes;
  (void)offsets;
  throw std::runtime_error(
      "BufferedRansEncoder.encode_with_indexes is not supported with USE_CUDA. "
      "Use RansEncoder.encode_with_indexes instead.");
#else
  // CPU original behavior
  for (size_t i = 0; i < symbols.size(); ++i) {
    const int32_t cdf_idx = indexes[i];
    assert(cdf_idx >= 0);
    assert(cdf_idx < (int)cdfs.size());

    const auto &cdf = cdfs[cdf_idx];

    const int32_t max_value = cdfs_sizes[cdf_idx] - 2;
    assert(max_value >= 0);
    assert((max_value + 1) < (int)cdf.size());

    int32_t value = symbols[i] - offsets[cdf_idx];

    uint32_t raw_val = 0;
    if (value < 0) {
      raw_val = -2 * value - 1;
      value = max_value;
    } else if (value >= max_value) {
      raw_val = 2 * (value - max_value);
      value = max_value;
    }

    assert(value >= 0);
    assert(value < cdfs_sizes[cdf_idx] - 1);

    _syms.push_back({static_cast<uint16_t>(cdf[value]),
                     static_cast<uint16_t>(cdf[value + 1] - cdf[value]),
                     false});

    if (value == max_value) {
      int32_t n_bypass = 0;
      while ((raw_val >> (n_bypass * bypass_precision)) != 0)
        ++n_bypass;

      int32_t val = n_bypass;
      while (val >= max_bypass_val) {
        _syms.push_back({max_bypass_val, max_bypass_val + 1, true});
        val -= max_bypass_val;
      }
      _syms.push_back(
          {static_cast<uint16_t>(val), static_cast<uint16_t>(val + 1), true});

      for (int32_t j = 0; j < n_bypass; ++j) {
        const int32_t v = (raw_val >> (j * bypass_precision)) & max_bypass_val;
        _syms.push_back(
            {static_cast<uint16_t>(v), static_cast<uint16_t>(v + 1), true});
      }
    }
  }
#endif
}

py::bytes BufferedRansEncoder::flush() {
#ifdef USE_CUDA
  throw std::runtime_error(
      "BufferedRansEncoder.flush() is not supported with USE_CUDA. Use "
      "RansEncoder.encode_with_indexes instead.");
#else
  Rans64State rans;
  Rans64EncInit(&rans);

  std::vector<uint32_t> output(_syms.size(), 0xCC);
  uint32_t *ptr = output.data() + output.size();
  assert(ptr != nullptr);

  while (!_syms.empty()) {
    const RansSymbol sym = _syms.back();
    if (!sym.bypass) {
      Rans64EncPut(&rans, &ptr, sym.start, sym.range, precision);
    } else {
      Rans64EncPutBits(&rans, &ptr, sym.start, bypass_precision);
    }
    _syms.pop_back();
  }
  Rans64EncFlush(&rans, &ptr);

  const int nbytes =
      std::distance(ptr, output.data() + output.size()) * sizeof(uint32_t);
  return std::string(reinterpret_cast<char *>(ptr), nbytes);
#endif
}

py::bytes
RansEncoder::encode_with_indexes(const std::vector<int32_t> &symbols,
                                 const std::vector<int32_t> &indexes,
                                 const std::vector<std::vector<int32_t>> &cdfs,
                                 const std::vector<int32_t> &cdfs_sizes,
                                 const std::vector<int32_t> &offsets) {
  assert(cdfs.size() == cdfs_sizes.size());
  assert_cdfs(cdfs, cdfs_sizes);

#ifdef USE_CUDA
  // GPU implementation (4-way interleaved)
  std::string encoded =
      encode_with_cuda_impl(symbols, indexes, cdfs, cdfs_sizes, offsets);
  return py::bytes(encoded);
#else
  BufferedRansEncoder buffered_rans_enc;
  buffered_rans_enc.encode_with_indexes(symbols, indexes, cdfs, cdfs_sizes,
                                        offsets);
  return buffered_rans_enc.flush();
#endif
}

std::vector<int32_t>
RansDecoder::decode_with_indexes(const std::string &encoded,
                                 const std::vector<int32_t> &indexes,
                                 const std::vector<std::vector<int32_t>> &cdfs,
                                 const std::vector<int32_t> &cdfs_sizes,
                                 const std::vector<int32_t> &offsets) {
  assert(cdfs.size() == cdfs_sizes.size());
  assert_cdfs(cdfs, cdfs_sizes);

  std::vector<int32_t> output(indexes.size());

  Rans64State rans;
  uint32_t *ptr = (uint32_t *)encoded.data();
  assert(ptr != nullptr);
  Rans64DecInit(&rans, &ptr);

  for (int i = 0; i < static_cast<int>(indexes.size()); ++i) {
    const int32_t cdf_idx = indexes[i];
    assert(cdf_idx >= 0);
    assert(cdf_idx < (int)cdfs.size());

    const auto &cdf = cdfs[cdf_idx];

    const int32_t max_value = cdfs_sizes[cdf_idx] - 2;
    assert(max_value >= 0);
    assert((max_value + 1) < (int)cdf.size());

    const int32_t offset = offsets[cdf_idx];

    const uint32_t cum_freq = Rans64DecGet(&rans, precision);

    const auto cdf_end = cdf.begin() + cdfs_sizes[cdf_idx];
    const auto it = std::find_if(
        cdf.begin(), cdf_end, [cum_freq](uint32_t v) { return v > cum_freq; });
    assert(it != cdf_end + 1);
    const uint32_t s = std::distance(cdf.begin(), it) - 1;

    Rans64DecAdvance(&rans, &ptr, cdf[s], cdf[s + 1] - cdf[s], precision);

    int32_t value = static_cast<int32_t>(s);

    if (value == max_value) {
      int32_t val = Rans64DecGetBits(&rans, &ptr, bypass_precision);
      int32_t n_bypass = val;

      while (val == max_bypass_val) {
        val = Rans64DecGetBits(&rans, &ptr, bypass_precision);
        n_bypass += val;
      }

      int32_t raw_val = 0;
      for (int j = 0; j < n_bypass; ++j) {
        val = Rans64DecGetBits(&rans, &ptr, bypass_precision);
        assert(val <= max_bypass_val);
        raw_val |= val << (j * bypass_precision);
      }
      value = raw_val >> 1;
      if (raw_val & 1)
        value = -value - 1;
      else
        value += max_value;
    }

    output[i] = value + offset;
  }

  return output;
}

void RansDecoder::set_stream(const std::string &encoded) {
  _stream = encoded;
  uint32_t *ptr = (uint32_t *)_stream.data();
  assert(ptr != nullptr);
  _ptr = ptr;
  Rans64DecInit(&_rans, &_ptr);
}

std::vector<int32_t>
RansDecoder::decode_stream(const std::vector<int32_t> &indexes,
                           const std::vector<std::vector<int32_t>> &cdfs,
                           const std::vector<int32_t> &cdfs_sizes,
                           const std::vector<int32_t> &offsets) {
  assert(cdfs.size() == cdfs_sizes.size());
  assert_cdfs(cdfs, cdfs_sizes);

  std::vector<int32_t> output(indexes.size());
  assert(_ptr != nullptr);

  for (int i = 0; i < static_cast<int>(indexes.size()); ++i) {
    const int32_t cdf_idx = indexes[i];
    assert(cdf_idx >= 0);
    assert(cdf_idx < (int)cdfs.size());

    const auto &cdf = cdfs[cdf_idx];

    const int32_t max_value = cdfs_sizes[cdf_idx] - 2;
    assert(max_value >= 0);
    assert((max_value + 1) < (int)cdf.size());

    const int32_t offset = offsets[cdf_idx];

    const uint32_t cum_freq = Rans64DecGet(&_rans, precision);

    const auto cdf_end = cdf.begin() + cdfs_sizes[cdf_idx];
    const auto it = std::find_if(cdf.begin(), cdf_end,
                                 [cum_freq](int v) { return v > cum_freq; });
    assert(it != cdf_end + 1);
    const uint32_t s = std::distance(cdf.begin(), it) - 1;

    Rans64DecAdvance(&_rans, &_ptr, cdf[s], cdf[s + 1] - cdf[s], precision);

    int32_t value = static_cast<int32_t>(s);

    if (value == max_value) {
      int32_t val = Rans64DecGetBits(&_rans, &_ptr, bypass_precision);
      int32_t n_bypass = val;

      while (val == max_bypass_val) {
        val = Rans64DecGetBits(&_rans, &_ptr, bypass_precision);
        n_bypass += val;
      }

      int32_t raw_val = 0;
      for (int j = 0; j < n_bypass; ++j) {
        val = Rans64DecGetBits(&_rans, &_ptr, bypass_precision);
        assert(val <= max_bypass_val);
        raw_val |= val << (j * bypass_precision);
      }
      value = raw_val >> 1;
      if (raw_val & 1)
        value = -value - 1;
      else
        value += max_value;
    }

    output[i] = value + offset;
  }

  return output;
}

PYBIND11_MODULE(ans, m) {
  m.attr("__name__") = "compressai.ans";
  m.doc() = "range Asymmetric Numeral System python bindings (CPU/CUDA)";

  py::class_<BufferedRansEncoder>(m, "BufferedRansEncoder")
      .def(py::init<>())
      .def("encode_with_indexes",
           py::overload_cast<
               const std::vector<int32_t> &, const std::vector<int32_t> &,
               const std::vector<std::vector<int32_t>> &,
               const std::vector<int32_t> &, const std::vector<int32_t> &>(
               &BufferedRansEncoder::encode_with_indexes))
      .def("flush", &BufferedRansEncoder::flush);

  py::class_<RansEncoder>(m, "RansEncoder")
      .def(py::init<>())
      .def("encode_with_indexes",
           py::overload_cast<
               const std::vector<int32_t> &, const std::vector<int32_t> &,
               const std::vector<std::vector<int32_t>> &,
               const std::vector<int32_t> &, const std::vector<int32_t> &>(
               &RansEncoder::encode_with_indexes));

  py::class_<RansDecoder>(m, "RansDecoder")
      .def(py::init<>())
      .def("set_stream", &RansDecoder::set_stream)
      .def("decode_stream",
           py::overload_cast<const std::vector<int32_t> &,
                             const std::vector<std::vector<int32_t>> &,
                             const std::vector<int32_t> &,
                             const std::vector<int32_t> &>(
               &RansDecoder::decode_stream))
      .def("decode_with_indexes",
           py::overload_cast<const std::string &, const std::vector<int32_t> &,
                             const std::vector<std::vector<int32_t>> &,
                             const std::vector<int32_t> &,
                             const std::vector<int32_t> &>(
               &RansDecoder::decode_with_indexes),
           "Decode a string to a list of symbols");
}
