#include <torch/extension.h>
#include <vector>
#include <immintrin.h>
#include <omp.h>

enum ROUNDING_MODES {
  ROUND_RTZ = 0,
  ROUND_RNE = 1,
  ROUND_STOCHASTIC = 2,
  ROUND_RNAZ = 3,
  ROUND_RNTZ = 4,
  ROUND_PINF = 5,
  ROUND_NINF = 6
}; // The enum defines different rounding modes (rules that determine how to round a floating-point number or integer to the nearest value).

namespace {

  typedef union half_t {
    unsigned short u;
      at::Half f;
  } __half_t;

  typedef union ufloat32 {
    unsigned u;
    float f;
  } __float_t;

/* Following implementation of xoroshiro128++ PRNG is borrowed from here:
    http://prng.di.unimi.it/xoshiro128plusplus.c
    main page: http://prng.di.unimi.it/
*/
  static uint32_t s1_[4] = { 1387366120, 2798441831, 888998500 , 1099633400 };
  static uint32_t s2_[4] = { 2034269327, 2125325156, 1209715489, 1931656721 };
  static uint32_t s3_[4] = { 1555452618, 650181557 , 883695203 , 627677842  };
  static uint32_t s4_[4] = { 4195248041, 2146478152, 480059239 , 1468956197 };
  static uint32_t s5_[4] = { 1252084877, 500390994 , 977516591 , 1950666000 };
  static uint32_t s6_[4] = { 3936597502, 834151069 , 1477014702, 734008143  };
  static uint32_t s7_[4] = { 1983400973, 1164103095, 2110188261, 2019272068 };
  static uint32_t s8_[4] = { 1877096364, 2833629967, 4196320416, 1774181187 };
  static uint32_t s9_[4] = { 702309618 , 4077815558, 1512057936, 1868769368 };
  static uint32_t s10_[4] =
    { 510001215 , 966559856 , 776583255 , 1475621065 };
  static uint32_t s11_[4] =
    { 1271806057, 1881312534, 478635452 , 814821902  };
  static uint32_t s12_[4] =
    { 733990058 , 1889991804, 1108257970, 1093480892 };
  static uint32_t s13_[4] =
    { 4273743809, 4167473370, 558000409 , 1594848927 };
  static uint32_t s14_[4] =
    { 444870959 , 1595722866, 1064124488, 3637102547 };
  static uint32_t s15_[4] =
    { 703721499 , 3896407831, 1002360059, 1427395742 };
  static uint32_t s16_[4] =
    { 1295231497, 1254972431, 1423497865, 861918264  };

/* seed pointer array */
  static uint32_t *sptr_[16] = { s1_, s2_, s3_, s4_, s5_, s6_, s7_, s8_, s9_,
    s10_, s11_, s12_, s13_, s14_, s15_, s16_
  };

  static inline uint32_t rotl (const uint32_t x, int k) {
    return (x << k) | (x >> (32 - k));
  }

  uint32_t rand_xorshft128plus_scalar (uint32_t * ps) {
    const uint32_t result_plus = ps[0] + ps[3];
    const uint32_t t = ps[1] << 9;

    ps[2] ^= ps[0];
    ps[3] ^= ps[1];
    ps[1] ^= ps[2];
    ps[0] ^= ps[3];

    ps[2] ^= t;

    ps[3] = rotl (ps[3], 11);

    return result_plus;
  }

  float __double2float_rn (double inval) {
    float out[4] = { 0 };
    __m128 vout = _mm_cvtpd_ps (_mm_set1_pd (inval));

    _mm_store_ps (&out[0], vout);
    return out[0];
  }

  unsigned short __float2half_rn (float inval) {
    return _cvtss_sh (inval, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
  }

  float __half2float (unsigned short h_val) {
    return _cvtsh_ss (h_val);
  }

  template < typename scalar_t > float __anyfloat2float_rn (scalar_t a_) {
    float f_;

    if (std::is_same < scalar_t, double >::value) {
      f_ = __double2float_rn (a_);
    } else if (std::is_same < scalar_t, float >::value) {
      f_ = a_;
    } else if (std::is_same < scalar_t, at::Half >::value) {
      f_ = __half2float ((at::Half) a_);
    }
    return f_;
  }

  template < typename scalar_t >
    void __float2anyfloat_rn (float f_, scalar_t * out) {
    scalar_t a_;

    if (std::is_same < scalar_t, double >::value) {
      a_ = (scalar_t) (f_);
    } else if (std::is_same < scalar_t, float >::value) {
      a_ = f_;
    } else if (std::is_same < scalar_t, at::Half >::value) {
      a_ = (at::Half) __float2half_rn (f_);
    }
    *out = a_;
  }

  template < typename scalar_t >
    unsigned short __anyfloat2half_rn (scalar_t f_) {
    unsigned short h_;

    if (std::is_same < scalar_t, double >::value) {
      h_ = __float2half_rn (__double2float_rn (f_));
    } else if (std::is_same < scalar_t, float >::value) {
      h_ = __float2half_rn (f_);
    } else if (std::is_same < scalar_t, at::Half >::value) {
      unsigned short *ptrh_ = (unsigned short *) &f_;
      h_ = *ptrh_;
    }
    return h_;
  }

  template < typename scalar_t >
    void __half2anyfloat (unsigned short h_, scalar_t * out, scalar_t scale=1.0) {
    scalar_t f_;

    if (std::is_same < scalar_t, double >::value) {
      f_ = (scalar_t) __half2float (h_);
    } else if (std::is_same < scalar_t, float >::value) {
      f_ = __half2float (h_);
    } else if (std::is_same < scalar_t, at::Half >::value) {
      f_ = *((at::Half *) & h_);
    }
    *out = scale*f_;
  }

  template < typename scalar_t >
    inline void reduce0 (scalar_t * g_data, float *g_odata, unsigned int n) {
    float sum = 0.0, sumsq = 0.0;

#pragma omp parallel for reduction(+: sum), reduction(+: sumsq)
    for (unsigned int i = 0; i < n; i++) {
      sum += __anyfloat2float_rn (g_data[i]);
      sumsq += __anyfloat2float_rn (g_data[i]) * __anyfloat2float_rn (g_data[i]);
    }
    g_odata[0] = sum;
    g_odata[1] = sumsq;
  }

  template < typename scalar_t >
    inline void absmax0 (scalar_t * g_data, float *g_odata, unsigned int n) {
    float absmax = 0.0;

#pragma omp parallel for reduction(max: absmax)
    for (unsigned int i = 0; i < n; i++) {
      absmax = fmaxf (absmax, fabsf (__anyfloat2float_rn (g_data[i])));
    }
    g_odata[0] = absmax;
  }

  static inline __m256i _mm256_rand_xorshft128plus_epi32(uint32_t *vs0,
                                                         uint32_t *vs1,
                                                         uint32_t *vs2,
                                                         uint32_t *vs3) {
    const __m256i vrplus = _mm256_add_epi32(_mm256_load_si256((__m256i *)vs0),
                                            _mm256_load_si256((__m256i *)vs3));
    const __m256i vt =
        _mm256_sll_epi32(_mm256_load_si256((__m256i *)vs1), _mm_cvtsi32_si128(9));

    _mm256_store_si256((__m256i *)vs2,
                      _mm256_xor_si256(_mm256_load_si256((__m256i *)vs2),
                                       _mm256_load_si256((__m256i *)vs0)));
    _mm256_store_si256((__m256i *)vs3,
                      _mm256_xor_si256(_mm256_load_si256((__m256i *)vs3),
                                       _mm256_load_si256((__m256i *)vs1)));
    _mm256_store_si256((__m256i *)vs1,
                      _mm256_xor_si256(_mm256_load_si256((__m256i *)vs1),
                                       _mm256_load_si256((__m256i *)vs2)));
    _mm256_store_si256((__m256i *)vs0,
                      _mm256_xor_si256(_mm256_load_si256((__m256i *)vs0),
                                       _mm256_load_si256((__m256i *)vs3)));
    _mm256_store_si256((__m256i *)vs2,
                      _mm256_xor_si256(_mm256_load_si256((__m256i *)vs2), vt));

    __m256i vl = _mm256_slli_epi32(_mm256_load_si256((__m256i *)vs3), 11);
    __m256i vr = _mm256_srli_epi32(_mm256_load_si256((__m256i *)vs3), 32 - 11);

    _mm256_store_si256((__m256i *)vs3, _mm256_or_si256(vl, vr));

    return vrplus;
  }

  void cvt_fp32_e5m2_rne_intrinsic (const float *__restrict__ in, float *out,
           int size, float scale) {

#pragma omp parallel for
    for (int i = 0; i < size; i += 16){
      const __m256i vnaninf    = _mm256_set1_epi16 (0x7c00);
      const __m256i vrneadd    = _mm256_set1_epi16 (0x007f);
      const __m256i vfixup     = _mm256_set1_epi16 (0x0001);
      const __m256i vfixupmask = _mm256_set1_epi16 (0x0100);

      __m256 s_ = _mm256_set1_ps (scale);
      __m256 sr_ = _mm256_set1_ps (1.0 / scale);
      __m256 b = _mm256_loadu_ps (&in[i]);
      __m256 a = _mm256_loadu_ps (&in[i + 8]);

      b = _mm256_mul_ps(b, s_);
      a = _mm256_mul_ps(a, s_);

      __m128i ah_ = _mm256_cvtps_ph(a, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
      __m128i bh_ = _mm256_cvtps_ph(b, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
      const __m256i a_ = _mm256_insertf128_si256(_mm256_insertf128_si256(_mm256_setzero_si256(), bh_, 0), ah_, 1);
      const __m256i maska1_ = _mm256_cmpeq_epi16(_mm256_and_si256(a_, vnaninf), vnaninf);
      const __m256i maska2_ = _mm256_cmpeq_epi16(_mm256_and_si256(a_, vfixupmask), vfixupmask);
      __m256i a_rne_ = _mm256_blendv_epi8(a_, _mm256_add_epi16(a_, vrneadd), _mm256_and_si256(maska1_, maska2_));
      a_rne_ = _mm256_slli_epi16(_mm256_srli_epi16(a_rne_, 8), 8);  

      bh_ = _mm256_extracti128_si256(a_rne_, 0);
      ah_ = _mm256_extracti128_si256(a_rne_, 1);
      b = _mm256_cvtph_ps(bh_);
      a = _mm256_cvtph_ps(ah_);   

      _mm256_storeu_ps(&out[i], _mm256_mul_ps(b, sr_));
      _mm256_storeu_ps(&out[i + 8], _mm256_mul_ps(a, sr_)); 
    }
  }

  void cvt_fp32_e5m2_stochastic_intrinsic(const float *__restrict__ in, float *out,
                                          int size, float scale) {
    uint32_t vs0[8] __attribute__((aligned(32))) = {
        1387366120, 279844183, 888998500, 1099633400,
        1252084877, 500390994, 977516591, 1950666000};
    uint32_t vs1[8] __attribute__((aligned(32))) = {
        2034269327, 2125325156, 1209715489, 193165672,
        187709636,  28336299,   419632041,  1774181187};
    uint32_t vs2[8] __attribute__((aligned(32))) = {
        1555452618, 650181557,  883695203, 62767784,
        127180605,  1881312534, 478635452, 814821902};
    uint32_t vs3[8] __attribute__((aligned(32))) = {
        419524804, 2146478152, 480059239,  1468956197,
        444870959, 1595722866, 1064124488, 363710254};

#pragma omp parallel for firstprivate(vs0, vs1, vs2, vs3)
  for (int i = 0; i < size; i += 16) {
    const __m256i vnaninf    = _mm256_set1_epi16(0x7c00);
    const __m256i vfixup     = _mm256_set1_epi16(0x0001);
    const __m256i vfixupmask = _mm256_set1_epi16(0x0100);
    const __m256i vrneadd    = _mm256_set1_epi16(0x007f);
    const __m256i vdenorm    = _mm256_set1_epi16(0x03ff);
    const __m256i vexmant    = _mm256_set1_epi16(0x7fff);

    __m256i rnd256 = _mm256_rand_xorshft128plus_epi32(vs0, vs1, vs2, vs3);
    __m128i rnbits = _mm256_extractf128_si256(rnd256, 0);

    __m256 s_ = _mm256_set1_ps(scale);
    __m256 sr_ = _mm256_set1_ps(1.0 / scale);

    __m256 b = _mm256_loadu_ps(&in[i]);
    __m256 a = _mm256_loadu_ps(&in[i + 8]);

    b = _mm256_mul_ps(b, s_);
    a = _mm256_mul_ps(a, s_);

    __m128i ah_ = _mm256_cvtps_ph(a, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
    __m128i bh_ = _mm256_cvtps_ph(b, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));

    const __m256i a_ =  _mm256_insertf128_si256(_mm256_castsi128_si256(bh_), ah_, 1);
    const __m256i maska1_ = _mm256_cmpeq_epi16(_mm256_and_si256(a_, vnaninf), vnaninf);
    const __m256i maska2_ = _mm256_cmpeq_epi16(_mm256_and_si256(a_, vfixupmask), vfixupmask);
    const __m256i maska4_ = _mm256_cmpgt_epi16(vdenorm, _mm256_and_si256(a_, vexmant));

    __m256i a_sr_ = _mm256_blendv_epi8(a_, _mm256_add_epi16(a_, _mm256_cvtepu8_epi16(rnbits)), _mm256_andnot_si256(maska4_, maska1_));
    a_sr_ = _mm256_blendv_epi8(a_sr_, _mm256_add_epi16(a_sr_, vrneadd), _mm256_and_si256(maska4_, maska2_));
    a_sr_ = _mm256_slli_epi16(_mm256_srli_epi16(a_sr_, 8), 8);

    bh_ = _mm256_extracti128_si256(a_sr_, 0);
    ah_ = _mm256_extracti128_si256(a_sr_, 1);

    b = _mm256_cvtph_ps(bh_);
    a = _mm256_cvtph_ps(ah_);

    _mm256_storeu_ps(&out[i], _mm256_mul_ps(b, sr_));
    _mm256_storeu_ps(&out[i + 8], _mm256_mul_ps(a, sr_));
    }
  }
  void cvt_fp32_e5m2_scalar (const float *__restrict__ in, float *out,
		  int size, float scale, int rmode) {
    int non_mant_bits = 5 /*exp_bits */  + 1;	/* exponent + sign */
    int lshift = 10 - (8 /*mbits */  - non_mant_bits);

    unsigned short rne_mask = 0;	/* round to nearest even mask */
    unsigned short rnaz_mask = 0;	/* round to nearest away from zero mask */
    unsigned short rntz_mask = 0;	/* round to nearest towards zero mask */
    unsigned short sr_mask = 0;	/* stochastic rounding mask */
    unsigned short rpinf_mask = 0;	/* round to +INF */
    unsigned short rminf_mask = 0;	/* round to -INF */

    if (rmode == ROUND_RNE) rne_mask = 1;
    if (rmode == ROUND_RNAZ) rnaz_mask = 1;
    if (rmode == ROUND_RNTZ) rntz_mask = 1;
    if (rmode == ROUND_STOCHASTIC) sr_mask = 1;
    if (rmode == ROUND_PINF) rpinf_mask = 1;
    if (rmode == ROUND_NINF) rminf_mask = 1;

    unsigned short mask_mant = (unsigned short) (0xFFFF << lshift);
    unsigned short grs_bitmask = 0x00FF;
    unsigned short rne_tie = 0x0180;

    float scale_reciprocal = 1.0 / scale;

    for (int gid = 0; gid < size; gid++) {
      __half_t h;
      float inval = scale * in[gid];

      h.u = __anyfloat2half_rn (inval);

      unsigned short can_round = ((h.u & 0x7F00) <= 0x7B00) ? 1 : 0;
      unsigned short is_normal = (((h.u & 0x7C00) <= 0x7800)
				  && ((h.u & 0x7C00) >= 0x0400)) ? 1 : 0;
      unsigned short is_denorm = ((h.u & 0x7C00) == 0x0) ? 1 : 0;
      unsigned short is_naninf = ((h.u & 0x7C00) == 0x7C00) ? 1 : 0;

      /* nearest rounding masks */
      unsigned short rnmask = (h.u & grs_bitmask);
      unsigned short rnmask_tie = (h.u & rne_tie);

      if (is_naninf == 0) {
	if (sr_mask) {
	  /* stochastic with 16 seeds */
	  int seed_index = (gid / 16);
	  unsigned short rand = (unsigned short)
	    rand_xorshft128plus_scalar (sptr_[(seed_index % 16)]);
	  /* apply stochastic rounding before truncation if sr_mask is enabled */
	  h.u += can_round * is_normal * (rand & 0xFF);
	  /* stochastic round:  denormals --> rne rounding */
	  h.u += can_round * is_denorm *
	    (((rnmask > 0x0080) || (rnmask_tie == rne_tie)) << lshift);
	} else {
	  /* round to nearest even, if rne_mask is enabled */
	  h.u += can_round * rne_mask *
	    (((rnmask > 0x0080) || (rnmask_tie == rne_tie)) << lshift);
	  /* round to nearest away from zero, if rnaz_mask is enabled */
	  h.u += can_round * rnaz_mask * ((rnmask >= 0x0080) << lshift);
	  /* round to nearest towards zero, if rntz_mask is enabled */
	  h.u += can_round * rntz_mask * ((rnmask > 0x0080) << lshift);
	  /* round to +INF, if rpinf_mask is enabled */
	  h.u += can_round * rpinf_mask * (h.f > 0) * ((rnmask >= 0x0080) << lshift);
	  /* round to -INF, if rminf_mask is enabled */
	  h.u += can_round * rminf_mask * (h.f < 0) * ((rnmask >= 0x0080) << lshift);
	}
      }
      /* truncation */
      h.u = (h.u & mask_mant);
      float f_;
      __half2anyfloat (h.u, &f_);
      out[gid] = f_ * scale_reciprocal;
    }
  }
  void cvt_fp32_e4m3_rne_intrinsic (const float *__restrict__ in, float *out,
				   int size, float scale) {
    const __m256i vnaninf    = _mm256_set1_epi16 (0x7c00);
    const __m256i vrneadd    = _mm256_set1_epi16 (0x003f);
    const __m256i vfixup     = _mm256_set1_epi16 (0x0001);
    const __m256i vfixupmask = _mm256_set1_epi16 (0x0080);
    const __m256i vzero      = _mm256_set1_epi16 (0x0000);
    const __m256i vsign      = _mm256_set1_epi16 (0x8000);
    const __m256i vsatuval   = _mm256_set1_epi16 (0x5F00);/* 2^8*1.110 a.k.a 448.0, largest value */
    const __m256i vflush     = _mm256_set1_epi16 (0x1800);/* 2^-9, smallest denormal */
    const __m256i vxdnorm    = _mm256_set1_epi16 (0x2400);/* 2^-6 smallest normal */

    for (int i = 0; i < size; i += 16){
      __m256 s_ = _mm256_set1_ps(scale);
      __m256 sr_ = _mm256_set1_ps(1.0 / scale);
      __m256 b = _mm256_loadu_ps(&in[i]);
      __m256 a = _mm256_loadu_ps(&in[i + 8]);

      b = _mm256_mul_ps(b, s_);
      a = _mm256_mul_ps(a, s_);

      __m128i ah_ = _mm256_cvtps_ph(a, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
      __m128i bh_ = _mm256_cvtps_ph(b, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
      __m256i a_ = _mm256_insertf128_si256(_mm256_insertf128_si256(_mm256_setzero_si256(), bh_, 0), ah_, 1);
      const __m256i maska1_ = _mm256_cmpeq_epi16(_mm256_and_si256(a_, vnaninf), vnaninf);
      const __m256i maska2_ = _mm256_cmpeq_epi16(_mm256_and_si256(a_, vfixupmask), vfixupmask);
      const __m256i maska3_ = _mm256_cmpgt_epi16(vsatuval, _mm256_and_si256(a_, _mm256_set1_epi16(0x7FFF)));
      const __m256i maska4_ = _mm256_cmpgt_epi16(vflush, _mm256_and_si256(a_, vnaninf));
      const __m256i maska5_ = _mm256_cmpgt_epi16(vxdnorm, _mm256_and_si256(a_, vnaninf));

      __m256i v_shft = _mm256_sub_epi16(_mm256_srli_epi16(vxdnorm, 10), _mm256_srli_epi16(_mm256_and_si256(a_, vnaninf), 10));
      __m256i a_rne_ = _mm256_blendv_epi8(a_, _mm256_add_epi16(a_rne_, vrneadd), _mm256_and_si256(maska1_, maska2_));
      a_rne_ = _mm256_slli_epi16(_mm256_srli_epi16(a_rne_, 8), 8);  
      bh_ = _mm256_extracti128_si256(a_rne_, 0);
      ah_ = _mm256_extracti128_si256(a_rne_, 1);
      b = _mm256_cvtph_ps(bh_);
      a = _mm256_cvtph_ps(ah_);   
      _mm256_storeu_ps(&out[i], _mm256_mul_ps(b, sr_));
      _mm256_storeu_ps(&out[i + 8], _mm256_mul_ps(a, sr_)); 
    }
  }

  void cvt_fp32_e4m3_stochastic_intrinsic (const float *__restrict__ in,
					  float *out, int size, float scale) {
    const __m256i vnaninf    = _mm256_set1_epi16(0x7c00);
    const __m256i vfixup     = _mm256_set1_epi16(0x0001);
    const __m256i vfixupmask = _mm256_set1_epi16(0x0100);
    const __m256i vrneadd    = _mm256_set1_epi16(0x003f);
    const __m256i vdenorm    = _mm256_set1_epi16(0x03ff);
    const __m256i vexmant    = _mm256_set1_epi16(0x7fff);

    for (int i = 0; i < size; i += 16) {
      unsigned int rndbuf[16];
      /* generate 128 random bits */
      for (int r = 0; r < 8; r++) {
	rndbuf[r] = (unsigned int) rand_xorshft128plus_scalar (sptr_[r]);
      }
      __m128i rnbits = _mm_load_si128 ((const __m128i *) &rndbuf[0]);

      __m256 s_  = _mm256_set1_ps (scale);
      __m256 sr_ = _mm256_set1_ps (1.0 / scale);
      __m256 b   = _mm256_loadu_ps (&in[i]);
      __m256 a   = _mm256_loadu_ps (&in[i + 8]);

      b = _mm256_mul_ps (b, s_);
      a = _mm256_mul_ps (a, s_);

    __m128i ah_ =
        _mm256_cvtps_ph(a, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
    __m128i bh_ =
        _mm256_cvtps_ph(b, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));

    const __m256i a_ =
        _mm256_insertf128_si256(_mm256_castsi128_si256(bh_), ah_, 1);

    const __m256i maska1_ =
        _mm256_cmpeq_epi16(_mm256_and_si256(a_, vnaninf), vnaninf);
    const __m256i maska2_ =
        _mm256_cmpeq_epi16(_mm256_and_si256(a_, vfixupmask), vfixupmask);
    const __m256i maska4_ =
        _mm256_cmpgt_epi16(vdenorm, _mm256_and_si256(a_, vexmant));

    __m256i a_sr_ = _mm256_blendv_epi8(
        a_, _mm256_add_epi16(a_, _mm256_cvtepu8_epi16(rnbits)),
        _mm256_andnot_si256(maska4_, maska1_));

    a_sr_ = _mm256_blendv_epi8(a_sr_, _mm256_add_epi16(a_sr_, vrneadd),
                               _mm256_and_si256(maska4_, maska2_));

    a_sr_ = _mm256_slli_epi16(_mm256_srli_epi16(a_sr_, 8), 8);

    bh_ = _mm256_extracti128_si256(a_sr_, 0);
    ah_ = _mm256_extracti128_si256(a_sr_, 1);

    b = _mm256_cvtph_ps(bh_);
    a = _mm256_cvtph_ps(ah_);

    _mm256_storeu_ps(&out[i], _mm256_mul_ps(b, sr_));
    _mm256_storeu_ps(&out[i + 8], _mm256_mul_ps(a, sr_));
    }
  }
  void cvt_fp32_e4m3_scalar (const float *__restrict__ in, float *out,
			    int size, float scale, int rmode) {
    int non_mant_bits = 4 /*exp_bits */  + 1;	/* exponent + sign */
    int lshift = 10 - (8 /*mbits */  - non_mant_bits);

    unsigned short rne_mask = 0;	/* round to nearest even mask */
    unsigned short rnaz_mask = 0;	/* round to nearest away from zero mask */
    unsigned short rntz_mask = 0;	/* round to nearest towards zero mask */
    unsigned short sr_mask = 0;	/* stochastic rounding mask */
    unsigned short rpinf_mask = 0;	/* round to +INF */
    unsigned short rminf_mask = 0;	/* round to -INF */

    if (rmode == ROUND_RNE) rne_mask = 1;
    if (rmode == ROUND_RNAZ) rnaz_mask = 1;
    if (rmode == ROUND_RNTZ) rntz_mask = 1;
    if (rmode == ROUND_STOCHASTIC) sr_mask = 1;
    if (rmode == ROUND_PINF) rpinf_mask = 1;
    if (rmode == ROUND_NINF) rminf_mask = 1;

    unsigned short mask_mant = (unsigned short) (0xFFFF << lshift);
    unsigned short grs_bitmask = 0x007F;
    unsigned short rne_tie = 0x00C0;
    float scale_reciprocal = 1.0 / scale;

#pragma omp parallel for
    for (int gid = 0; gid < size; gid++) {
      __half_t h;
      float inval = scale * in[gid];

      h.u = __anyfloat2half_rn (inval);
      short exp_h = (short) ((h.u & 0x7C00) >> 10) - 15;
      short sign_h = (h.u & 0x8000);
      short mantissa_h = (h.u & 0x03FF);

      unsigned short can_round = ((h.u & 0x7FFF) < 0x5F00) ? 1 : 0;
      unsigned short is_normal = (((h.u & 0x7C00) <= 0x7800)
				  && ((h.u & 0x7C00) >= 0x0400)) ? 1 : 0;
      unsigned short is_denorm = ((h.u & 0x7C00) == 0x0) ? 1 : 0;
      unsigned short is_naninf = ((h.u & 0x7C00) == 0x7C00) ? 1 : 0;

      int dshift = 0;

      if (exp_h > 8 || (can_round == 0)) {
	/* Software : saturate values above to +/-448.0 to +/-448.0 */
	mantissa_h = 0x0300;
	exp_h = 8;
	can_round = 0;
      } else if (exp_h < -9) {
	/* flush values below 1-4-3 subnormal range to zero */
	exp_h = -15;
	mantissa_h = 0;
      } else if (exp_h < -6) {
	dshift = (-6 - exp_h);
	/* handle denormals */
	mantissa_h = mantissa_h >> dshift;
        mantissa_h <<= dshift;
      }
      /* nearest rounding masks */
      unsigned short rnmask = (mantissa_h & grs_bitmask);
      unsigned short rnmask_tie = (mantissa_h & rne_tie);

      if (is_naninf == 0) {
	if (sr_mask) {
	  /* stochastic with 16 seeds */
	  int seed_index = (gid / 16);
	  unsigned short rand = (unsigned short)
	    rand_xorshft128plus_scalar (sptr_[(seed_index % 16)]);
	  /* apply stochastic rounding before truncation if sr_mask is enabled */
	  mantissa_h += can_round * is_normal * (rand & 0x7F);
	  /* stochastic round:  denormals --> rne rounding */
	  mantissa_h += can_round * is_denorm *
	    (((rnmask > 0x0040) || (rnmask_tie == rne_tie)) << lshift);
	} else {
	  /* round to nearest even, if rne_mask is enabled */
	  mantissa_h += can_round * rne_mask * (((rnmask > 0x0040) || (rnmask_tie == rne_tie)) << lshift);
	  /* round to nearest away from zero, if rnaz_mask is enabled */
	  mantissa_h += can_round * rnaz_mask * ((rnmask >= 0x0040) << lshift);
	  /* round to nearest towards zero, if rntz_mask is enabled */
	  mantissa_h += can_round * rntz_mask * ((rnmask > 0x0040) << lshift);
	  /* round to +INF, if rpinf_mask is enabled */
	  mantissa_h += can_round * rpinf_mask * (h.f > 0) * ((rnmask >= 0x0040) << lshift);
	  /* round to -INF, if rminf_mask is enabled */
	  mantissa_h += can_round * rminf_mask * (h.f < 0) * ((rnmask >= 0x0040) << lshift);
	}
      }
      /* truncation */
      mantissa_h &= mask_mant;
      mantissa_h += ((exp_h + 15) << 10);
      mantissa_h |= sign_h;
      h.u = mantissa_h;
      float f;

      __half2anyfloat (h.u, &f);
      out[gid] = (f * scale_reciprocal);
    }
  }

  template < typename scalar_t >
    void E4M3_Kernel (const scalar_t * __restrict__ in,
	  	      scalar_t * __restrict__ out,
		      const int size,
		      const scalar_t in_scale,
		      bool block_norm,
		      int block_size, int rmode) {
    float scale = in_scale;
    float fmax = std::numeric_limits<scalar_t>::max();
    if (scale > fmax) {
      fprintf(stderr,"Error: Invalid scale factor : %.2e, make sure the scale is not larger than : %.2e\n", scale, fmax);
      exit(1);
    }

    if (block_norm == true) {
      int nblocks = (size + (block_size - 1)) / block_size;

#pragma omp parallel for
      for (int b = 0; b < nblocks; b++) {
	int start_index = (b * block_size);

	/* handle the last block */
	if (start_index + block_size > size)
	  block_size = (size - start_index);

	float maxval = 0.0;

#pragma omp parallel for reduction (max:maxval)
	for (int gid = start_index; gid < start_index + block_size; gid++) {
	  maxval = (maxval < fabs (in[gid])) ? fabs (in[gid]) : maxval;
	}
	__float_t f;

	f.f = maxval;
	f.u = (f.u & 0x7F800000);
	scale = 2.0 * f.f;
	scale /= 8.0;

	if ((block_size % 32) == 0) {
	  if (rmode == ROUND_STOCHASTIC) {
	    cvt_fp32_e4m3_stochastic_intrinsic (&in[start_index], &out[start_index], block_size, scale);
	  } else {
	    cvt_fp32_e4m3_rne_intrinsic (&in[start_index], &out[start_index], block_size, scale);
	  }
	} else {
	  cvt_fp32_e4m3_scalar (&in[start_index], &out[start_index], block_size, scale, rmode);
	}
      }
    } else {
      if ((size % 32) == 0) {
	if (rmode == ROUND_STOCHASTIC) {
	  cvt_fp32_e4m3_stochastic_intrinsic (in, out, size, scale);
	} else {
	  cvt_fp32_e4m3_rne_intrinsic (in, out, size, scale);
	}
      } else {
	int vec_size = ((int) (size / 32)) * 32;

	if (vec_size > 0) {
	  if (rmode == ROUND_STOCHASTIC) {
	    cvt_fp32_e4m3_stochastic_intrinsic (in, out, vec_size, scale);
	  } else {
	    cvt_fp32_e4m3_rne_intrinsic (in, out, vec_size, scale);
	  }
	}
	cvt_fp32_e4m3_scalar (&in[vec_size], &out[vec_size], size - vec_size, scale, rmode);
      }
    }
  }

  template < typename scalar_t >
    void E5M2_Kernel (const scalar_t * __restrict__ in,
		      scalar_t * __restrict__ out,
		      const int size,
		      const scalar_t in_scale,
		      bool block_norm, int block_size, int rmode) {
    float scale = in_scale;
    float fmax = std::numeric_limits<scalar_t>::max();
    if (scale > fmax) {
      fprintf(stderr,"Error: Invalid scale factor : %.2e, make sure the scale is not larger than : %.2e\n", scale, fmax);
      exit(1);
    }

    if (block_norm == true) {
      int nblocks = (size + (block_size - 1)) / block_size;

#pragma omp parallel for
      for (int b = 0; b < nblocks; b++) {
	int start_index = (b * block_size);
	/* handle the last block */
	if (start_index + block_size > size)
	  block_size = (size - start_index);

	float maxval = 0.0;

#pragma omp parallel for reduction (max:maxval)
	for (int gid = start_index; gid < start_index + block_size; gid++) {
	  maxval = (maxval < fabs (in[gid])) ? fabs (in[gid]) : maxval;
	}
	__float_t f;

	f.f = maxval;
	f.u = (f.u & 0x7F800000);
	scale = 2.0 * f.f;
	scale /= 16384.0;

	if ((block_size % 32) == 0) {
	  if (rmode == ROUND_STOCHASTIC) {
	    cvt_fp32_e5m2_stochastic_intrinsic (&in[start_index], &out[start_index], block_size, scale);
	  } else {
	    cvt_fp32_e5m2_rne_intrinsic (&in[start_index], &out[start_index], block_size, scale);
 	    //cvt_fp32_e5m2_noinf_rne_intrinsic (&in[start_index], &out[start_index], block_size, scale);
	    //cvt_fp32_e5m2_flex_intrinsic(&in[start_index], &out[start_index], block_size, scale);
	  }
	} else {
	  cvt_fp32_e5m2_scalar (&in[start_index], &out[start_index], block_size, scale, rmode);
	}
      }
    } else {
      if ((size % 32) == 0) {
	if (rmode == ROUND_STOCHASTIC) {
	  cvt_fp32_e5m2_stochastic_intrinsic (in, out, size, scale);
	} else {
	  cvt_fp32_e5m2_rne_intrinsic (in, out, size, scale);
	  //cvt_fp32_e5m2_noinf_rne_intrinsic (in, out, size, scale);
        }
      } else {
	int vec_size = ((int) (size / 32)) * 32;

	if (vec_size > 0) {
	  if (rmode == ROUND_STOCHASTIC) {
	    cvt_fp32_e5m2_stochastic_intrinsic (in, out, vec_size, scale);
	  } else {
	    cvt_fp32_e5m2_rne_intrinsic (in, out, vec_size, scale);
	    //cvt_fp32_e5m2_noinf_rne_intrinsic (in, out, vec_size, scale);
	    //cvt_fp32_e5m2_flex_intrinsic(in, out, vec_size, scale);
	  }
	}
	cvt_fp32_e5m2_scalar (&in[vec_size], &out[vec_size], size - vec_size, scale, rmode);
      }
    }
  }

  std::vector < torch::Tensor > fpemu_common_function (torch::Tensor input,
					std::string mode,
					int size,
					bool inplace,
					float scale,
					bool block_norm,
					int block_size) {

    torch::Tensor output;
    if (!inplace)
      output = torch::zeros_like (input);

    if (!mode.compare ("E4M3_STOCHASTIC")) {
      E4M3_Kernel < float >(input.data_ptr < float >(),
			    (inplace) ? input.data_ptr <
			    float >() : output.data_ptr <
			    float >(), size, scale, block_norm,
			    block_size, ROUND_STOCHASTIC);
    } else if (!mode.compare ("E4M3_RNE")) {
      E4M3_Kernel < float >(input.data_ptr < float >(),
			    (inplace) ? input.data_ptr <
			    float >() : output.data_ptr <
			    float >(), size, scale, block_norm,
			    block_size, ROUND_RNE);
    } else if (!mode.compare ("E5M2_STOCHASTIC")) {
      E5M2_Kernel < float >(input.data_ptr < float >(),
          (inplace) ? input.data_ptr <
          float >() : output.data_ptr <
          float >(), size, scale, block_norm,
          block_size, ROUND_STOCHASTIC);
    } else if (!mode.compare ("E5M2_RNE")) {
      E5M2_Kernel < float >(input.data_ptr < float >(),
			    (inplace) ? input.data_ptr <
			    float >() : output.data_ptr < float >(),
			    size, scale, block_norm, block_size, ROUND_RNE);
    }
    
    if (!inplace) {
      return {
      output,};
    } else {
      return {
      input,};
    }
  }

}//namespace

std::vector < torch::Tensor > fpemu_forward (torch::Tensor input,
					std::string mode,
					int size,
					bool inplace,
					float scale,
					bool block_norm,
					int block_size) {
  if (block_norm == true && block_size != size) {
    if (size % block_size) {
      block_norm = false;
      block_size = 1;
    }
  }
  return fpemu_common_function (input, mode, size, inplace, scale, block_norm,
				   block_size);
}

std::vector < torch::Tensor > fpemu_backward (torch::Tensor grad,
					std::string mode,
					int size,
					bool inplace,
					float scale,
					bool block_norm,
					int block_size) {
  if (block_norm == true && block_size != size) {
    if (size % block_size) {
      block_norm = false;
      block_size = 1;
    }
  }
  return fpemu_common_function (grad, mode, size, inplace, scale, block_norm,
				   block_size);
}

PYBIND11_MODULE (TORCH_EXTENSION_NAME, m) {
  m.def ("forward", &fpemu_forward, "FPEmu forward");
  m.def ("backward", &fpemu_backward, "FPEmu backward");
}