Spaces:
Runtime error
Runtime error
// GGML CPU internal header | |
//#include <stddef.h> | |
extern "C" { | |
/** | |
* Converts brain16 to float32. | |
* | |
* The bfloat16 floating point format has the following structure: | |
* | |
* ┌sign | |
* │ | |
* │ ┌exponent | |
* │ │ | |
* │ │ ┌mantissa | |
* │ │ │ | |
* │┌──┴───┐┌─┴───┐ | |
* 0b0000000000000000 brain16 | |
* | |
* Since bf16 has the same number of exponent bits as a 32bit float, | |
* encoding and decoding numbers becomes relatively straightforward. | |
* | |
* ┌sign | |
* │ | |
* │ ┌exponent | |
* │ │ | |
* │ │ ┌mantissa | |
* │ │ │ | |
* │┌──┴───┐┌─┴───────────────────┐ | |
* 0b00000000000000000000000000000000 IEEE binary32 | |
* | |
* For comparison, the standard fp16 format has fewer exponent bits. | |
* | |
* ┌sign | |
* │ | |
* │ ┌exponent | |
* │ │ | |
* │ │ ┌mantissa | |
* │ │ │ | |
* │┌─┴─┐┌─┴──────┐ | |
* 0b0000000000000000 IEEE binary16 | |
* | |
* @see IEEE 754-2008 | |
*/ | |
static inline float ggml_compute_bf16_to_fp32(ggml_bf16_t h) { | |
union { | |
float f; | |
uint32_t i; | |
} u; | |
u.i = (uint32_t)h.bits << 16; | |
return u.f; | |
} | |
/** | |
* Converts float32 to brain16. | |
* | |
* This is binary identical with Google Brain float conversion. | |
* Floats shall round to nearest even, and NANs shall be quiet. | |
* Subnormals aren't flushed to zero, except perhaps when used. | |
* This code should vectorize nicely if using modern compilers. | |
*/ | |
static inline ggml_bf16_t ggml_compute_fp32_to_bf16(float s) { | |
ggml_bf16_t h; | |
union { | |
float f; | |
uint32_t i; | |
} u; | |
u.f = s; | |
if ((u.i & 0x7fffffff) > 0x7f800000) { /* nan */ | |
h.bits = (u.i >> 16) | 64; /* force to quiet */ | |
return h; | |
} | |
h.bits = (u.i + (0x7fff + ((u.i >> 16) & 1))) >> 16; | |
return h; | |
} | |
// __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512 | |
// __SSE3__ and __SSSE3__ are not defined in MSVC, but SSE3/SSSE3 are present when AVX/AVX2/AVX512 are available | |
// 16-bit float | |
// on Arm, we use __fp16 | |
// on x86, we use uint16_t | |
// if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example: | |
// | |
// $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/ | |
// | |
typedef uint16_t ggml_fp16_internal_t; | |
typedef __fp16 ggml_fp16_internal_t; | |
// 32-bit ARM compatibility | |
// vaddlvq_s16 | |
// vpaddq_s16 | |
// vpaddq_s32 | |
// vaddvq_s32 | |
// vaddvq_f32 | |
// vmaxvq_f32 | |
// vcvtnq_s32_f32 | |
// vzip1_u8 | |
// vzip2_u8 | |
inline static int32_t vaddlvq_s16(int16x8_t v) { | |
int32x4_t v0 = vreinterpretq_s32_s64(vpaddlq_s32(vpaddlq_s16(v))); | |
return vgetq_lane_s32(v0, 0) + vgetq_lane_s32(v0, 2); | |
} | |
inline static int16x8_t vpaddq_s16(int16x8_t a, int16x8_t b) { | |
int16x4_t a0 = vpadd_s16(vget_low_s16(a), vget_high_s16(a)); | |
int16x4_t b0 = vpadd_s16(vget_low_s16(b), vget_high_s16(b)); | |
return vcombine_s16(a0, b0); | |
} | |
inline static int32x4_t vpaddq_s32(int32x4_t a, int32x4_t b) { | |
int32x2_t a0 = vpadd_s32(vget_low_s32(a), vget_high_s32(a)); | |
int32x2_t b0 = vpadd_s32(vget_low_s32(b), vget_high_s32(b)); | |
return vcombine_s32(a0, b0); | |
} | |
inline static int32_t vaddvq_s32(int32x4_t v) { | |
return vgetq_lane_s32(v, 0) + vgetq_lane_s32(v, 1) + vgetq_lane_s32(v, 2) + vgetq_lane_s32(v, 3); | |
} | |
inline static float vaddvq_f32(float32x4_t v) { | |
return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3); | |
} | |
inline static float vmaxvq_f32(float32x4_t v) { | |
return | |
MAX(MAX(vgetq_lane_f32(v, 0), vgetq_lane_f32(v, 1)), | |
MAX(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3))); | |
} | |
inline static int32x4_t vcvtnq_s32_f32(float32x4_t v) { | |
int32x4_t res; | |
res[0] = roundf(vgetq_lane_f32(v, 0)); | |
res[1] = roundf(vgetq_lane_f32(v, 1)); | |
res[2] = roundf(vgetq_lane_f32(v, 2)); | |
res[3] = roundf(vgetq_lane_f32(v, 3)); | |
return res; | |
} | |
inline static uint8x8_t vzip1_u8(uint8x8_t a, uint8x8_t b) { | |
uint8x8_t res; | |
res[0] = a[0]; res[1] = b[0]; | |
res[2] = a[1]; res[3] = b[1]; | |
res[4] = a[2]; res[5] = b[2]; | |
res[6] = a[3]; res[7] = b[3]; | |
return res; | |
} | |
inline static uint8x8_t vzip2_u8(uint8x8_t a, uint8x8_t b) { | |
uint8x8_t res; | |
res[0] = a[4]; res[1] = b[4]; | |
res[2] = a[5]; res[3] = b[5]; | |
res[4] = a[6]; res[5] = b[6]; | |
res[6] = a[7]; res[7] = b[7]; | |
return res; | |
} | |
// vld1q_s16_x2 | |
// vld1q_u8_x2 | |
// vld1q_u8_x4 | |
// vld1q_s8_x2 | |
// vld1q_s8_x4 | |
// TODO: double-check these work correctly | |
typedef struct ggml_int16x8x2_t { | |
int16x8_t val[2]; | |
} ggml_int16x8x2_t; | |
inline static ggml_int16x8x2_t ggml_vld1q_s16_x2(const int16_t * ptr) { | |
ggml_int16x8x2_t res; | |
res.val[0] = vld1q_s16(ptr + 0); | |
res.val[1] = vld1q_s16(ptr + 8); | |
return res; | |
} | |
typedef struct ggml_uint8x16x2_t { | |
uint8x16_t val[2]; | |
} ggml_uint8x16x2_t; | |
inline static ggml_uint8x16x2_t ggml_vld1q_u8_x2(const uint8_t * ptr) { | |
ggml_uint8x16x2_t res; | |
res.val[0] = vld1q_u8(ptr + 0); | |
res.val[1] = vld1q_u8(ptr + 16); | |
return res; | |
} | |
typedef struct ggml_uint8x16x4_t { | |
uint8x16_t val[4]; | |
} ggml_uint8x16x4_t; | |
inline static ggml_uint8x16x4_t ggml_vld1q_u8_x4(const uint8_t * ptr) { | |
ggml_uint8x16x4_t res; | |
res.val[0] = vld1q_u8(ptr + 0); | |
res.val[1] = vld1q_u8(ptr + 16); | |
res.val[2] = vld1q_u8(ptr + 32); | |
res.val[3] = vld1q_u8(ptr + 48); | |
return res; | |
} | |
typedef struct ggml_int8x16x2_t { | |
int8x16_t val[2]; | |
} ggml_int8x16x2_t; | |
inline static ggml_int8x16x2_t ggml_vld1q_s8_x2(const int8_t * ptr) { | |
ggml_int8x16x2_t res; | |
res.val[0] = vld1q_s8(ptr + 0); | |
res.val[1] = vld1q_s8(ptr + 16); | |
return res; | |
} | |
typedef struct ggml_int8x16x4_t { | |
int8x16_t val[4]; | |
} ggml_int8x16x4_t; | |
inline static ggml_int8x16x4_t ggml_vld1q_s8_x4(const int8_t * ptr) { | |
ggml_int8x16x4_t res; | |
res.val[0] = vld1q_s8(ptr + 0); | |
res.val[1] = vld1q_s8(ptr + 16); | |
res.val[2] = vld1q_s8(ptr + 32); | |
res.val[3] = vld1q_s8(ptr + 48); | |
return res; | |
} | |
// NOTE: not tested | |
inline static int8x16_t ggml_vqtbl1q_s8(int8x16_t a, uint8x16_t b) { | |
int8x16_t res; | |
res[ 0] = a[b[ 0]]; | |
res[ 1] = a[b[ 1]]; | |
res[ 2] = a[b[ 2]]; | |
res[ 3] = a[b[ 3]]; | |
res[ 4] = a[b[ 4]]; | |
res[ 5] = a[b[ 5]]; | |
res[ 6] = a[b[ 6]]; | |
res[ 7] = a[b[ 7]]; | |
res[ 8] = a[b[ 8]]; | |
res[ 9] = a[b[ 9]]; | |
res[10] = a[b[10]]; | |
res[11] = a[b[11]]; | |
res[12] = a[b[12]]; | |
res[13] = a[b[13]]; | |
res[14] = a[b[14]]; | |
res[15] = a[b[15]]; | |
return res; | |
} | |
// NOTE: not tested | |
inline static uint8x16_t ggml_vqtbl1q_u8(uint8x16_t a, uint8x16_t b) { | |
uint8x16_t res; | |
res[ 0] = a[b[ 0]]; | |
res[ 1] = a[b[ 1]]; | |
res[ 2] = a[b[ 2]]; | |
res[ 3] = a[b[ 3]]; | |
res[ 4] = a[b[ 4]]; | |
res[ 5] = a[b[ 5]]; | |
res[ 6] = a[b[ 6]]; | |
res[ 7] = a[b[ 7]]; | |
res[ 8] = a[b[ 8]]; | |
res[ 9] = a[b[ 9]]; | |
res[10] = a[b[10]]; | |
res[11] = a[b[11]]; | |
res[12] = a[b[12]]; | |
res[13] = a[b[13]]; | |
res[14] = a[b[14]]; | |
res[15] = a[b[15]]; | |
return res; | |
} | |
inline static int32x4_t ggml_vdotq_s32(int32x4_t acc, int8x16_t a, int8x16_t b) { | |
const int16x8_t p0 = vmull_s8(vget_low_s8 (a), vget_low_s8 (b)); | |
const int16x8_t p1 = vmull_s8(vget_high_s8(a), vget_high_s8(b)); | |
return vaddq_s32(acc, vaddq_s32(vpaddlq_s16(p0), vpaddlq_s16(p1))); | |
} | |
static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) { | |
ggml_fp16_internal_t tmp; | |
memcpy(&tmp, &h, sizeof(ggml_fp16_t)); | |
return (float)tmp; | |
} | |
static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { | |
ggml_fp16_t res; | |
ggml_fp16_internal_t tmp = f; | |
memcpy(&res, &tmp, sizeof(ggml_fp16_t)); | |
return res; | |
} | |
typedef union { | |
int32_t i; | |
float f; | |
} ft_union; | |
/* float type data load instructions */ | |
static __m128 __lsx_vreplfr2vr_s(float val) { | |
ft_union fi_tmpval = {.f = val}; | |
return (__m128)__lsx_vreplgr2vr_w(fi_tmpval.i); | |
} | |
static __m256 __lasx_xvreplfr2vr_s(float val) { | |
ft_union fi_tmpval = {.f = val}; | |
return (__m256)__lasx_xvreplgr2vr_w(fi_tmpval.i); | |
} | |
/* the inline asm below is about 12% faster than the lookup method */ | |
static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) { | |
register float f; | |
register double d; | |
__asm__( | |
"mtfprd %0,%2\n" | |
"xscvhpdp %0,%0\n" | |
"frsp %1,%0\n" : | |
/* temp */ "=d"(d), | |
/* out */ "=f"(f): | |
/* in */ "r"(h)); | |
return f; | |
} | |
static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { | |
register double d; | |
register ggml_fp16_t r; | |
__asm__( /* xscvdphp can work on double or single precision */ | |
"xscvdphp %0,%2\n" | |
"mffprd %1,%0\n" : | |
/* temp */ "=d"(d), | |
/* out */ "=r"(r): | |
/* in */ "f"(f)); | |
return r; | |
} | |
// FP16 <-> FP32 | |
// ref: https://github.com/Maratyszcza/FP16 | |
static inline float fp32_from_bits(uint32_t w) { | |
union { | |
uint32_t as_bits; | |
float as_value; | |
} fp32; | |
fp32.as_bits = w; | |
return fp32.as_value; | |
} | |
static inline uint32_t fp32_to_bits(float f) { | |
union { | |
float as_value; | |
uint32_t as_bits; | |
} fp32; | |
fp32.as_value = f; | |
return fp32.as_bits; | |
} | |
static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) { | |
const uint32_t w = (uint32_t) h << 16; | |
const uint32_t sign = w & UINT32_C(0x80000000); | |
const uint32_t two_w = w + w; | |
const uint32_t exp_offset = UINT32_C(0xE0) << 23; | |
const float exp_scale = 0x1.0p-112f; | |
const float exp_scale = fp32_from_bits(UINT32_C(0x7800000)); | |
const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale; | |
const uint32_t magic_mask = UINT32_C(126) << 23; | |
const float magic_bias = 0.5f; | |
const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias; | |
const uint32_t denormalized_cutoff = UINT32_C(1) << 27; | |
const uint32_t result = sign | | |
(two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value)); | |
return fp32_from_bits(result); | |
} | |
static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { | |
const float scale_to_inf = 0x1.0p+112f; | |
const float scale_to_zero = 0x1.0p-110f; | |
const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000)); | |
const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000)); | |
float base = (fabsf(f) * scale_to_inf) * scale_to_zero; | |
const uint32_t w = fp32_to_bits(f); | |
const uint32_t shl1_w = w + w; | |
const uint32_t sign = w & UINT32_C(0x80000000); | |
uint32_t bias = shl1_w & UINT32_C(0xFF000000); | |
if (bias < UINT32_C(0x71000000)) { | |
bias = UINT32_C(0x71000000); | |
} | |
base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base; | |
const uint32_t bits = fp32_to_bits(base); | |
const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00); | |
const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF); | |
const uint32_t nonsign = exp_bits + mantissa_bits; | |
return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign); | |
} | |
// precomputed f32 table for f16 (256 KB) | |
// defined in ggml.c, initialized in ggml_init() | |
extern float ggml_table_f32_f16[1 << 16]; | |
// On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32, | |
// so we define GGML_FP16_TO_FP32 and GGML_FP32_TO_FP16 elsewhere for NEON. | |
// This is also true for POWER9. | |
inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) { | |
uint16_t s; | |
memcpy(&s, &f, sizeof(uint16_t)); | |
return ggml_table_f32_f16[s]; | |
} | |
} | |