#include <stdint.h>
#include <stddef.h>
#include <math.h>
#include <arm_neon.h>
#include <float.h>
#include <cassert>
#include <stdio.h>

typedef float16_t ggml_half;
typedef float32_t ggml_half2;
typedef float16_t ggml_fp16_t;
typedef float16_t ggml_float;
typedef float16_t f16;

#define QK4_0 32
typedef struct {
    ggml_half d;          // delta
    uint8_t qs[QK4_0 / 2];  // nibbles / quants  ggml_half
} block_q4_0;

#define QK4_1 32
typedef struct {
    float   d;          // delta
    float   m;          // min
    uint8_t qs[QK4_1 / 2];  // nibbles / quants
} block_q4_1;

#define QK8_0 32
typedef struct {
    ggml_half d;       // delta
    int8_t  qs[QK8_0]; // quants
} block_q8_0;

#define QK8_1 32
typedef struct {
    union {
        struct {
            ggml_half d; // delta
            ggml_half s; // d * sum(qs[i])
        } GGML_COMMON_AGGR_S;
        ggml_half2 ds;
    } GGML_COMMON_AGGR_U;
    int8_t qs[QK8_1]; // quants
} block_q8_1;

#define QK_K 256  //目前不支持GGML_QKK_64
typedef struct {
    uint8_t ql[QK_K/2];      // quants, lower 4 bits
    uint8_t qh[QK_K/4];      // quants, upper 2 bits
    int8_t  scales[QK_K/16]; // scales, quantized with 8 bits
    ggml_half d;             // super-block scale ggml_half
} block_q6_K;

typedef struct {
    uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits
    uint8_t qs[QK_K/4];
    union {
        struct {
            ggml_half d;    // super-block scale for quantized scales
            ggml_half dmin; // super-block scale for quantized mins
        } GGML_COMMON_AGGR;
        ggml_half2 dm;
    };
} block_q2_K;

typedef struct {
    uint8_t hmask[QK_K/8]; // quants - high bit
    uint8_t qs[QK_K/4];    // quants - low 2 bits
    uint8_t scales[12];    // scales, quantized with 6 bits
    ggml_half d;           // super-block scale
} block_q3_K;

typedef struct {
    float   d;              // delta
    int8_t  qs[QK_K];       // quants
    int16_t bsums[QK_K/16]; // sum of quants in groups of 16
} block_q8_K;

#define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1)


void dequantize_row_q2_K(const block_q2_K *__restrict__  src, float *__restrict__  dst, int64_t k);
void dequantize_row_q4_0(const block_q4_0 * __restrict__ src, float * __restrict__ dst, int64_t k);
void dequantize_row_q4_1(const block_q4_1 * __restrict__ src, float * __restrict__ dst, int64_t k);
void dequantize_row_q8_0(const block_q8_0 *__restrict__ x, float *__restrict__ y, int64_t k);

void quantize_row_q8_K(const float *__restrict__ x, block_q8_K *__restrict__ y, int64_t k);
void quantize_row_q6_K(const float *__restrict__ x, block_q6_K *__restrict__ y, int64_t k);
void quantize_row_q3_K(const float *__restrict__ x, block_q3_K *__restrict__ y, int64_t k);
void quantize_row_q2_K(const float *__restrict__ x, block_q2_K *__restrict__ y, int64_t k);
void quantize_row_q4_0(const float *__restrict__ x, block_q4_0 *__restrict__ y, int64_t k);
void quantize_row_q4_1(const float *__restrict__ x, block_q4_1 *__restrict__ y, int64_t k);
void quantize_row_q8_0(const float *__restrict__ x, block_q8_0 *__restrict__ y, int64_t k);
void quantize_row_q8_1(const float * __restrict__ x, block_q8_1 * __restrict__ y, int64_t k);
void ggml_vec_dot_q3_K_q8_K(int n, float * __restrict__ s, size_t bs, const void * __restrict__ vx,
                            size_t bx, const void * __restrict__ vy, size_t by, int nrc);
void ggml_vec_dot_q2_K_q8_K(int n, float * __restrict__ s, size_t bs, const void * __restrict__ vx,
                            size_t bx, const void * __restrict__ vy, size_t by, int nrc);
void ggml_vec_dot_q6_K_q8_K(int n, float * __restrict__ s, size_t bs, const void * __restrict__ vx,
                            size_t bx, const void * __restrict__ vy, size_t by, int nrc);
void ggml_vec_dot_q4_0_q8_0(int n, float *__restrict__ s, size_t bs, const void *__restrict__ vx,
                            size_t bx, const void *__restrict__ vy, size_t by, int nrc);
void ggml_vec_dot_q4_1_q8_1(int n, float * __restrict__ s, size_t bs, const void * __restrict__ vx,
                            size_t bx, const void * __restrict__ vy, size_t by, int nrc);
void ggml_vec_dot_q8_0_q8_0(int n, float *__restrict__ s, size_t bs, const void *__restrict__ vx,
                            size_t bx, const void *__restrict__ vy, size_t by, int nrc);

void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, int64_t n);
void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, int64_t n);

void ggml_vec_dot_f16(int n, float * __restrict__ s, size_t bs, ggml_fp16_t * __restrict__ x, size_t bx, ggml_fp16_t * __restrict__ y, size_t by, int nrc);
typedef void (*ggml_to_float_t)(const void  *__restrict__ x, float *__restrict__ y, int64_t k);
typedef void (*ggml_vec_dot_t)(int n, float *__restrict__ s, size_t bs, const void *__restrict__ x, size_t bx,
                               const void *__restrict__ y, size_t by, int nrc);
typedef void (*ggml_from_float_t)(const float *__restrict__ x, void  *__restrict__ y, int64_t k);

static inline float GGML_FP16_TO_FP32(ggml_half h) {
    //ggml_half tmp;
    //memcpy(&tmp, &h, sizeof(ggml_half));
    return (float)h;
}

static inline float fp32_from_bits(uint32_t w) {
    union {
        uint32_t as_bits;
        float as_value;
    } fp32;
    fp32.as_bits = w;
    return fp32.as_value;
}

static inline uint32_t fp32_to_bits(float f) {
    union {
        float as_value;
        uint32_t as_bits;
    } fp32;
    fp32.as_value = f;
    return fp32.as_bits;
}

#define GGML_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
#if __AVX__ || __AVX2__ || __AVX512F__
static inline uint16_t ggml_compute_fp32_to_fp16(float f) {
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
    const float scale_to_inf = 0x1.0p+112f;
    const float scale_to_zero = 0x1.0p-110f;
#else
    const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000));
    const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000));
#endif
    float base = (fabsf(f) * scale_to_inf) * scale_to_zero;

    const uint32_t w = fp32_to_bits(f);
    const uint32_t shl1_w = w + w;
    const uint32_t sign = w & UINT32_C(0x80000000);
    uint32_t bias = shl1_w & UINT32_C(0xFF000000);
    if (bias < UINT32_C(0x71000000)) {
        bias = UINT32_C(0x71000000);
    }

    base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
    const uint32_t bits = fp32_to_bits(base);
    const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
    const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
    const uint32_t nonsign = exp_bits + mantissa_bits;
    return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign);
}
#elif defined(__ARM_NEON)
static inline ggml_half ggml_compute_fp32_to_fp16(float f) {
    ggml_half res;
    __fp16 tmp = f;
    memcpy(&res, &tmp, sizeof(ggml_half));
    return res;
}
#endif

#if defined(__ARM_NEON) && defined(__ARM_FEATURE_FMA)

#define GGML_SIMD

// F16 NEON

#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
    #define GGML_F16_STEP 32
    #define GGML_F16_EPR  8

    #define GGML_F16x8              float16x8_t
    #define GGML_F16x8_ZERO         vdupq_n_f16(0.0f)
    #define GGML_F16x8_SET1(x)      vdupq_n_f16(x)
    #define GGML_F16x8_LOAD(x)      vld1q_f16((const f16 *)(x))
    #define GGML_F16x8_STORE        vst1q_f16
    #define GGML_F16x8_FMA(a, b, c) vfmaq_f16(a, b, c)
    #define GGML_F16x8_ADD          vaddq_f16
    #define GGML_F16x8_MUL          vmulq_f16
    #define GGML_F16x8_REDUCE(res, x)                               \
    do {                                                            \
        int offset = GGML_F16_ARR >> 1;                             \
        for (int i = 0; i < offset; ++i) {                          \
            (x)[i] = vaddq_f16((x)[i], (x)[offset+i]);              \
        }                                                           \
        offset >>= 1;                                               \
        for (int i = 0; i < offset; ++i) {                          \
            (x)[i] = vaddq_f16((x)[i], (x)[offset+i]);              \
        }                                                           \
        offset >>= 1;                                               \
        for (int i = 0; i < offset; ++i) {                          \
            (x)[i] = vaddq_f16((x)[i], (x)[offset+i]);              \
        }                                                           \
        const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 ((x)[0])); \
        const float32x4_t t1 = vcvt_f32_f16(vget_high_f16((x)[0])); \
        (res) = (ggml_float) vaddvq_f32(vaddq_f32(t0, t1));         \
    } while (0)

    #define GGML_F16_VEC                GGML_F16x8
    #define GGML_F16_VEC_ZERO           GGML_F16x8_ZERO
    #define GGML_F16_VEC_SET1           GGML_F16x8_SET1
    #define GGML_F16_VEC_LOAD(p, i)     GGML_F16x8_LOAD(p)
    #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x8_STORE((f16 *)(p), (r)[i])
    #define GGML_F16_VEC_FMA            GGML_F16x8_FMA
    #define GGML_F16_VEC_ADD            GGML_F16x8_ADD
    #define GGML_F16_VEC_MUL            GGML_F16x8_MUL
    #define GGML_F16_VEC_REDUCE         GGML_F16x8_REDUCE
#else
    // if FP16 vector arithmetic is not supported, we use FP32 instead
    // and take advantage of the vcvt_ functions to convert to/from FP16

    #define GGML_F16_STEP 16
    #define GGML_F16_EPR  4

    #define GGML_F32Cx4              float32x4_t
    #define GGML_F32Cx4_ZERO         vdupq_n_f32(0.0f)
    #define GGML_F32Cx4_SET1(x)      vdupq_n_f32(x)
    #define GGML_F32Cx4_LOAD(x)      vcvt_f32_f16(vld1_f16((const f16 *)(x)))
    #define GGML_F32Cx4_STORE(x, y)  vst1_f16(x, vcvt_f16_f32(y))
    #define GGML_F32Cx4_FMA(a, b, c) vfmaq_f32(a, b, c)
    #define GGML_F32Cx4_ADD          vaddq_f32
    #define GGML_F32Cx4_MUL          vmulq_f32
    #define GGML_F32Cx4_REDUCE       GGML_F32x4_REDUCE

    #define GGML_F16_VEC                GGML_F32Cx4
    #define GGML_F16_VEC_ZERO           GGML_F32Cx4_ZERO
    #define GGML_F16_VEC_SET1           GGML_F32Cx4_SET1
    #define GGML_F16_VEC_LOAD(p, i)     GGML_F32Cx4_LOAD(p)
    #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE((f16 *)(p), r[i])
    #define GGML_F16_VEC_FMA            GGML_F32Cx4_FMA
    #define GGML_F16_VEC_ADD            GGML_F32Cx4_ADD
    #define GGML_F16_VEC_MUL            GGML_F32Cx4_MUL
    #define GGML_F16_VEC_REDUCE         GGML_F32Cx4_REDUCE
#endif

    #define GGML_F16_ARR (GGML_F16_STEP/GGML_F16_EPR)
#endif

