#include "iqk_gemm_1bit.h"

#ifdef IQK_IMPLEMENT

#include "ggml-impl.h"

#define GGML_COMMON_IMPL_C
#include "ggml-common.h"

namespace {

static const uint64_t iq1s_grid_us[2048] = {
    0x0000000000000000, 0x0000000000000002, 0x0000000000000101, 0x0000000000000200,
    0x0000000000000202, 0x0000000000010001, 0x0000000000010101, 0x0000000000020000,
    0x0000000000020002, 0x0000000000020200, 0x0000000000020202, 0x0000000001000101,
    0x0000000001010001, 0x0000000001010100, 0x0000000001010102, 0x0000000001020101,
    0x0000000002000000, 0x0000000002000002, 0x0000000002000200, 0x0000000002000202,
    0x0000000002010101, 0x0000000002020000, 0x0000000002020002, 0x0000000002020200,
    0x0000000002020202, 0x0000000100000100, 0x0000000100000101, 0x0000000100010001,
    0x0000000100010100, 0x0000000100010102, 0x0000000100010201, 0x0000000100010202,
    0x0000000100020101, 0x0000000101000001, 0x0000000101000102, 0x0000000101000201,
    0x0000000101010002, 0x0000000101010101, 0x0000000101010202, 0x0000000101020001,
    0x0000000101020100, 0x0000000101020102, 0x0000000101020200, 0x0000000102000101,
    0x0000000102010001, 0x0000000102010100, 0x0000000102010102, 0x0000000102020101,
    0x0000000200000000, 0x0000000200000002, 0x0000000200000200, 0x0000000200000202,
    0x0000000200010101, 0x0000000200020000, 0x0000000200020002, 0x0000000200020200,
    0x0000000200020202, 0x0000000201000101, 0x0000000201010001, 0x0000000201010201,
    0x0000000201020100, 0x0000000201020201, 0x0000000202000000, 0x0000000202000002,
    0x0000000202000200, 0x0000000202000202, 0x0000000202010001, 0x0000000202010101,
    0x0000000202010201, 0x0000000202020000, 0x0000000202020002, 0x0000000202020200,
    0x0000000202020202, 0x0000010000010001, 0x0000010000010100, 0x0000010000010102,
    0x0000010000020101, 0x0000010001000001, 0x0000010001000201, 0x0000010001010101,
    0x0000010001010202, 0x0000010001020100, 0x0000010001020101, 0x0000010002010001,
    0x0000010002010201, 0x0000010002020101, 0x0000010100000001, 0x0000010100000100,
    0x0000010100000101, 0x0000010100000102, 0x0000010100010101, 0x0000010100010200,
    0x0000010100010202, 0x0000010100020201, 0x0000010101000000, 0x0000010101000101,
    0x0000010101000202, 0x0000010101010000, 0x0000010101010001, 0x0000010101010100,
    0x0000010101010101, 0x0000010101010102, 0x0000010101010201, 0x0000010101020000,
    0x0000010101020002, 0x0000010101020101, 0x0000010101020200, 0x0000010101020202,
    0x0000010102000001, 0x0000010102010001, 0x0000010102010101, 0x0000010102010200,
    0x0000010102010202, 0x0000010102020001, 0x0000010102020100, 0x0000010102020101,
    0x0000010102020102, 0x0000010102020201, 0x0000010200010100, 0x0000010200010201,
    0x0000010201000001, 0x0000010201000100, 0x0000010201010000, 0x0000010201010002,
    0x0000010201010101, 0x0000010201010200, 0x0000010201020000, 0x0000010201020001,
    0x0000010201020102, 0x0000010201020201, 0x0000010202000101, 0x0000010202010001,
    0x0000010202010100, 0x0000010202010201, 0x0000020000000000, 0x0000020000000002,
    0x0000020000000200, 0x0000020000000202, 0x0000020000010101, 0x0000020000020000,
    0x0000020000020002, 0x0000020000020200, 0x0000020000020202, 0x0000020001000101,
    0x0000020001010001, 0x0000020001010102, 0x0000020001020101, 0x0000020002000000,
    0x0000020002000002, 0x0000020002000200, 0x0000020002000202, 0x0000020002010101,
    0x0000020002020000, 0x0000020002020002, 0x0000020002020200, 0x0000020002020202,
    0x0000020100000101, 0x0000020100010001, 0x0000020100010100, 0x0000020100010201,
    0x0000020100020100, 0x0000020100020101, 0x0000020101000001, 0x0000020101010000,
    0x0000020101010001, 0x0000020101010101, 0x0000020101020001, 0x0000020101020100,
    0x0000020101020201, 0x0000020102010001, 0x0000020102010100, 0x0000020102010102,
    0x0000020102010201, 0x0000020102020101, 0x0000020200000000, 0x0000020200000002,
    0x0000020200000200, 0x0000020200000202, 0x0000020200010101, 0x0000020200020000,
    0x0000020200020002, 0x0000020200020200, 0x0000020200020202, 0x0000020201000101,
    0x0000020201010001, 0x0000020201010201, 0x0000020201020001, 0x0000020201020101,
    0x0000020202000000, 0x0000020202000002, 0x0000020202000101, 0x0000020202000200,
    0x0000020202000202, 0x0000020202010101, 0x0000020202020000, 0x0000020202020002,
    0x0000020202020200, 0x0000020202020202, 0x0001000000010000, 0x0001000000010001,
    0x0001000000010100, 0x0001000000010201, 0x0001000000020100, 0x0001000000020101,
    0x0001000001000001, 0x0001000001000100, 0x0001000001010000, 0x0001000001010101,
    0x0001000001010200, 0x0001000001020001, 0x0001000001020100, 0x0001000001020101,
    0x0001000001020201, 0x0001000002010001, 0x0001000002010100, 0x0001000002010102,
    0x0001000002020001, 0x0001000002020101, 0x0001000100000001, 0x0001000100000100,
    0x0001000100000102, 0x0001000100000201, 0x0001000100010000, 0x0001000100010002,
    0x0001000100010101, 0x0001000100010200, 0x0001000100020001, 0x0001000100020100,
    0x0001000100020201, 0x0001000101000101, 0x0001000101000202, 0x0001000101010000,
    0x0001000101010001, 0x0001000101010002, 0x0001000101010100, 0x0001000101010101,
    0x0001000101010102, 0x0001000101010201, 0x0001000101020000, 0x0001000101020101,
    0x0001000102000100, 0x0001000102010002, 0x0001000102010101, 0x0001000102020001,
    0x0001000102020100, 0x0001000200010001, 0x0001000200010100, 0x0001000200010102,
    0x0001000200020101, 0x0001000201000000, 0x0001000201000102, 0x0001000201000201,
    0x0001000201010002, 0x0001000201010101, 0x0001000201010200, 0x0001000201010202,
    0x0001000201020100, 0x0001000201020102, 0x0001000202000101, 0x0001000202010001,
    0x0001000202010100, 0x0001000202010102, 0x0001000202020101, 0x0001010000000001,
    0x0001010000000102, 0x0001010000000201, 0x0001010000010100, 0x0001010000010101,
    0x0001010000010200, 0x0001010000010201, 0x0001010000020001, 0x0001010000020102,
    0x0001010001000001, 0x0001010001000101, 0x0001010001000102, 0x0001010001000200,
    0x0001010001000202, 0x0001010001010001, 0x0001010001010100, 0x0001010001010101,
    0x0001010001010102, 0x0001010001010201, 0x0001010001020002, 0x0001010001020101,
    0x0001010001020200, 0x0001010002000100, 0x0001010002000201, 0x0001010002010000,
    0x0001010002010100, 0x0001010002010101, 0x0001010002010200, 0x0001010002010201,
    0x0001010002010202, 0x0001010002020001, 0x0001010002020100, 0x0001010002020101,
    0x0001010002020201, 0x0001010100000002, 0x0001010100000101, 0x0001010100000202,
    0x0001010100010001, 0x0001010100010100, 0x0001010100010101, 0x0001010100010102,
    0x0001010100010201, 0x0001010100020000, 0x0001010100020002, 0x0001010100020101,
    0x0001010100020200, 0x0001010100020202, 0x0001010101000001, 0x0001010101000100,
    0x0001010101000101, 0x0001010101000102, 0x0001010101010001, 0x0001010101010002,
    0x0001010101010100, 0x0001010101010101, 0x0001010101010102, 0x0001010101010201,
    0x0001010101010202, 0x0001010101020001, 0x0001010101020100, 0x0001010101020101,
    0x0001010101020102, 0x0001010101020201, 0x0001010102000000, 0x0001010102000002,
    0x0001010102000100, 0x0001010102000101, 0x0001010102000200, 0x0001010102000202,
    0x0001010102010000, 0x0001010102010001, 0x0001010102010100, 0x0001010102010101,
    0x0001010102010102, 0x0001010102010201, 0x0001010102010202, 0x0001010102020000,
    0x0001010102020002, 0x0001010102020101, 0x0001010200000001, 0x0001010200000100,
    0x0001010200000101, 0x0001010200000102, 0x0001010200010101, 0x0001010200010102,
    0x0001010200010200, 0x0001010200010202, 0x0001010200020001, 0x0001010200020102,
    0x0001010201000000, 0x0001010201000002, 0x0001010201000100, 0x0001010201000101,
    0x0001010201000200, 0x0001010201000202, 0x0001010201010001, 0x0001010201010101,
    0x0001010201010102, 0x0001010201010200, 0x0001010201010201, 0x0001010201020001,
    0x0001010201020100, 0x0001010201020101, 0x0001010201020200, 0x0001010201020201,
    0x0001010201020202, 0x0001010202000102, 0x0001010202000202, 0x0001010202010002,
    0x0001010202010101, 0x0001010202020100, 0x0001010202020201, 0x0001020000010001,
    0x0001020000010102, 0x0001020000020101, 0x0001020001000001, 0x0001020001000100,
    0x0001020001000102, 0x0001020001000201, 0x0001020001010000, 0x0001020001010101,
    0x0001020001010200, 0x0001020001010202, 0x0001020001020000, 0x0001020001020001,
    0x0001020001020100, 0x0001020001020102, 0x0001020001020201, 0x0001020002000101,
    0x0001020002010001, 0x0001020002010100, 0x0001020002020101, 0x0001020100010000,
    0x0001020100010002, 0x0001020100010101, 0x0001020100010202, 0x0001020100020001,
    0x0001020100020101, 0x0001020101000002, 0x0001020101000100, 0x0001020101000101,
    0x0001020101000200, 0x0001020101010001, 0x0001020101010100, 0x0001020101010101,
    0x0001020101010102, 0x0001020101010201, 0x0001020101010202, 0x0001020101020000,
    0x0001020101020101, 0x0001020101020202, 0x0001020102000201, 0x0001020102010001,
    0x0001020102010002, 0x0001020102010101, 0x0001020102010200, 0x0001020102020001,
    0x0001020102020102, 0x0001020102020201, 0x0001020200000201, 0x0001020200010102,
    0x0001020200020100, 0x0001020200020102, 0x0001020201000100, 0x0001020201000102,
    0x0001020201000201, 0x0001020201010000, 0x0001020201010002, 0x0001020201010101,
    0x0001020201010200, 0x0001020201020001, 0x0001020201020102, 0x0001020201020201,
    0x0001020202000101, 0x0001020202010001, 0x0001020202010102, 0x0001020202010202,
    0x0002000000000000, 0x0002000000000002, 0x0002000000000200, 0x0002000000000202,
    0x0002000000010101, 0x0002000000020000, 0x0002000000020002, 0x0002000000020101,
    0x0002000000020200, 0x0002000000020202, 0x0002000001000101, 0x0002000001010001,
    0x0002000001010201, 0x0002000001020001, 0x0002000001020101, 0x0002000002000000,
    0x0002000002000002, 0x0002000002000200, 0x0002000002000202, 0x0002000002010101,
    0x0002000002020000, 0x0002000002020002, 0x0002000002020101, 0x0002000002020200,
    0x0002000002020202, 0x0002000100000101, 0x0002000100010001, 0x0002000100010100,
    0x0002000100010201, 0x0002000100020101, 0x0002000101000002, 0x0002000101000100,
    0x0002000101000201, 0x0002000101010101, 0x0002000101010200, 0x0002000101010202,
    0x0002000101020001, 0x0002000101020100, 0x0002000101020101, 0x0002000101020102,
    0x0002000102000101, 0x0002000102010000, 0x0002000102010102, 0x0002000102010201,
    0x0002000102020101, 0x0002000200000001, 0x0002000200000200, 0x0002000200000202,
    0x0002000200010001, 0x0002000200010101, 0x0002000200020000, 0x0002000200020002,
    0x0002000200020200, 0x0002000200020202, 0x0002000201000101, 0x0002000201010001,
    0x0002000201010102, 0x0002000201010201, 0x0002000201020101, 0x0002000202000001,
    0x0002000202000200, 0x0002000202000202, 0x0002000202010001, 0x0002000202010101,
    0x0002000202020000, 0x0002000202020002, 0x0002000202020200, 0x0002000202020202,
    0x0002010000000101, 0x0002010000010100, 0x0002010000010102, 0x0002010000010201,
    0x0002010000020101, 0x0002010001000100, 0x0002010001000101, 0x0002010001000102,
    0x0002010001000201, 0x0002010001010002, 0x0002010001010101, 0x0002010001010200,
    0x0002010001010202, 0x0002010001020102, 0x0002010002000101, 0x0002010002010001,
    0x0002010002010100, 0x0002010002010201, 0x0002010002020001, 0x0002010002020101,
    0x0002010100000201, 0x0002010100010101, 0x0002010100020001, 0x0002010100020201,
    0x0002010101000000, 0x0002010101000101, 0x0002010101000200, 0x0002010101010001,
    0x0002010101010100, 0x0002010101010101, 0x0002010101010201, 0x0002010101020002,
    0x0002010101020101, 0x0002010101020200, 0x0002010102000201, 0x0002010102010000,
    0x0002010102010100, 0x0002010102010101, 0x0002010102010200, 0x0002010102010202,
    0x0002010102020001, 0x0002010102020100, 0x0002010102020102, 0x0002010102020201,
    0x0002010200000101, 0x0002010200010000, 0x0002010200010002, 0x0002010200010201,
    0x0002010200020101, 0x0002010201000001, 0x0002010201000201, 0x0002010201010101,
    0x0002010201020000, 0x0002010201020001, 0x0002010201020201, 0x0002010202000100,
    0x0002010202000102, 0x0002010202010000, 0x0002010202010202, 0x0002020000000000,
    0x0002020000000002, 0x0002020000000200, 0x0002020000000202, 0x0002020000010101,
    0x0002020000020000, 0x0002020000020002, 0x0002020000020200, 0x0002020000020202,
    0x0002020001000101, 0x0002020001010001, 0x0002020001010100, 0x0002020001020101,
    0x0002020002000000, 0x0002020002000002, 0x0002020002000200, 0x0002020002000202,
    0x0002020002020000, 0x0002020002020002, 0x0002020002020200, 0x0002020002020202,
    0x0002020100000201, 0x0002020100010001, 0x0002020100010100, 0x0002020100010201,
    0x0002020100020101, 0x0002020101000102, 0x0002020101000201, 0x0002020101010002,
    0x0002020101010101, 0x0002020101020001, 0x0002020101020100, 0x0002020101020102,
    0x0002020101020201, 0x0002020102000101, 0x0002020102010000, 0x0002020102010102,
    0x0002020102010201, 0x0002020102020100, 0x0002020102020101, 0x0002020200000000,
    0x0002020200000002, 0x0002020200000200, 0x0002020200000202, 0x0002020200020000,
    0x0002020200020002, 0x0002020200020200, 0x0002020200020202, 0x0002020201000101,
    0x0002020201010001, 0x0002020201010102, 0x0002020201010201, 0x0002020201020101,
    0x0002020202000000, 0x0002020202000002, 0x0002020202000200, 0x0002020202000202,
    0x0002020202010101, 0x0002020202020000, 0x0002020202020002, 0x0002020202020200,
    0x0002020202020202, 0x0100000000000101, 0x0100000000010001, 0x0100000000010102,
    0x0100000000020101, 0x0100000001000201, 0x0100000001010002, 0x0100000001010101,
    0x0100000001010200, 0x0100000001010202, 0x0100000001020001, 0x0100000001020100,
    0x0100000001020102, 0x0100000002010100, 0x0100000002010201, 0x0100000002020001,
    0x0100000002020102, 0x0100000100000000, 0x0100000100000001, 0x0100000100000100,
    0x0100000100000102, 0x0100000100000201, 0x0100000100010002, 0x0100000100010101,
    0x0100000100010102, 0x0100000100010200, 0x0100000100010202, 0x0100000100020001,
    0x0100000100020102, 0x0100000100020201, 0x0100000101000101, 0x0100000101000200,
    0x0100000101000202, 0x0100000101010001, 0x0100000101010100, 0x0100000101010101,
    0x0100000101010102, 0x0100000101010201, 0x0100000101010202, 0x0100000101020101,
    0x0100000101020200, 0x0100000101020202, 0x0100000102000001, 0x0100000102000100,
    0x0100000102000102, 0x0100000102010000, 0x0100000102010002, 0x0100000102010101,
    0x0100000102020000, 0x0100000102020001, 0x0100000102020002, 0x0100000200000101,
    0x0100000200010001, 0x0100000200010100, 0x0100000200010102, 0x0100000200020101,
    0x0100000201000001, 0x0100000201010002, 0x0100000201010101, 0x0100000201010202,
    0x0100000201020100, 0x0100000201020201, 0x0100000202000201, 0x0100000202010100,
    0x0100000202020101, 0x0100010000000001, 0x0100010000010101, 0x0100010000010201,
    0x0100010000020201, 0x0100010001000101, 0x0100010001000200, 0x0100010001000202,
    0x0100010001010001, 0x0100010001010100, 0x0100010001010101, 0x0100010001010102,
    0x0100010001020001, 0x0100010001020002, 0x0100010001020101, 0x0100010001020200,
    0x0100010001020202, 0x0100010002000001, 0x0100010002000102, 0x0100010002000201,
    0x0100010002010000, 0x0100010002010002, 0x0100010002010101, 0x0100010002020000,
    0x0100010002020001, 0x0100010002020201, 0x0100010100000001, 0x0100010100000002,
    0x0100010100000101, 0x0100010100000202, 0x0100010100010001, 0x0100010100010100,
    0x0100010100010101, 0x0100010100010102, 0x0100010100010201, 0x0100010100020000,
    0x0100010100020101, 0x0100010100020202, 0x0100010101000001, 0x0100010101000100,
    0x0100010101000101, 0x0100010101000102, 0x0100010101000201, 0x0100010101010000,
    0x0100010101010001, 0x0100010101010100, 0x0100010101010101, 0x0100010101010102,
    0x0100010101010200, 0x0100010101010201, 0x0100010101020001, 0x0100010101020100,
    0x0100010101020101, 0x0100010101020102, 0x0100010101020201, 0x0100010102000002,
    0x0100010102000100, 0x0100010102000101, 0x0100010102000200, 0x0100010102010001,
    0x0100010102010100, 0x0100010102010101, 0x0100010102010102, 0x0100010102010201,
    0x0100010102010202, 0x0100010102020101, 0x0100010102020200, 0x0100010102020202,
    0x0100010200000001, 0x0100010200000101, 0x0100010200000201, 0x0100010200010100,
    0x0100010200010101, 0x0100010200010200, 0x0100010200010202, 0x0100010200020001,
    0x0100010200020100, 0x0100010200020201, 0x0100010201000000, 0x0100010201000002,
    0x0100010201000101, 0x0100010201000200, 0x0100010201010000, 0x0100010201010001,
    0x0100010201010002, 0x0100010201010101, 0x0100010201010102, 0x0100010201010201,
    0x0100010201020002, 0x0100010201020101, 0x0100010201020200, 0x0100010202000001,
    0x0100010202000101, 0x0100010202000202, 0x0100010202010100, 0x0100010202010101,
    0x0100010202020001, 0x0100010202020100, 0x0100010202020102, 0x0100020000000101,
    0x0100020000010001, 0x0100020000010101, 0x0100020000010202, 0x0100020000020101,
    0x0100020001000002, 0x0100020001000201, 0x0100020001010000, 0x0100020001010101,
    0x0100020001010200, 0x0100020001020001, 0x0100020001020100, 0x0100020001020102,
    0x0100020001020201, 0x0100020002000101, 0x0100020002010001, 0x0100020002010100,
    0x0100020002010102, 0x0100020002010201, 0x0100020002020101, 0x0100020100000001,
    0x0100020100000101, 0x0100020100000102, 0x0100020100000202, 0x0100020100010000,
    0x0100020100010100, 0x0100020100010101, 0x0100020100010200, 0x0100020100020001,
    0x0100020100020100, 0x0100020100020102, 0x0100020101000000, 0x0100020101000101,
    0x0100020101000202, 0x0100020101010001, 0x0100020101010002, 0x0100020101010100,
    0x0100020101010101, 0x0100020101010102, 0x0100020101010201, 0x0100020101020000,
    0x0100020101020002, 0x0100020101020101, 0x0100020101020102, 0x0100020101020202,
    0x0100020102000102, 0x0100020102000201, 0x0100020102010002, 0x0100020102010101,
    0x0100020102010102, 0x0100020102010200, 0x0100020102020001, 0x0100020102020100,
    0x0100020102020102, 0x0100020102020201, 0x0100020200010102, 0x0100020201000100,
    0x0100020201000102, 0x0100020201000201, 0x0100020201010101, 0x0100020201010200,
    0x0100020201010202, 0x0100020201020100, 0x0100020201020201, 0x0100020202010100,
    0x0100020202020101, 0x0101000000000001, 0x0101000000000100, 0x0101000000000101,
    0x0101000000000102, 0x0101000000000201, 0x0101000000010002, 0x0101000000010101,
    0x0101000000010202, 0x0101000000020001, 0x0101000000020100, 0x0101000000020201,
    0x0101000001000000, 0x0101000001000101, 0x0101000001000200, 0x0101000001010001,
    0x0101000001010100, 0x0101000001010101, 0x0101000001010102, 0x0101000001010201,
    0x0101000001020101, 0x0101000001020200, 0x0101000002000102, 0x0101000002000201,
    0x0101000002010101, 0x0101000002010200, 0x0101000002020000, 0x0101000002020001,
    0x0101000002020102, 0x0101000002020201, 0x0101000100000101, 0x0101000100000200,
    0x0101000100000201, 0x0101000100000202, 0x0101000100010001, 0x0101000100010100,
    0x0101000100010101, 0x0101000100010102, 0x0101000100010200, 0x0101000100010201,
    0x0101000100020000, 0x0101000100020101, 0x0101000100020102, 0x0101000100020200,
    0x0101000100020202, 0x0101000101000001, 0x0101000101000100, 0x0101000101000101,
    0x0101000101000102, 0x0101000101000201, 0x0101000101010000, 0x0101000101010001,
    0x0101000101010002, 0x0101000101010100, 0x0101000101010101, 0x0101000101010102,
    0x0101000101010200, 0x0101000101010201, 0x0101000101010202, 0x0101000101020001,
    0x0101000101020100, 0x0101000101020101, 0x0101000101020102, 0x0101000101020201,
    0x0101000102000002, 0x0101000102000101, 0x0101000102010001, 0x0101000102010100,
    0x0101000102010101, 0x0101000102010102, 0x0101000102010201, 0x0101000102020000,
    0x0101000102020101, 0x0101000102020202, 0x0101000200000001, 0x0101000200000102,
    0x0101000200010002, 0x0101000200010101, 0x0101000200010202, 0x0101000200020001,
    0x0101000200020100, 0x0101000201000002, 0x0101000201000101, 0x0101000201000202,
    0x0101000201010001, 0x0101000201010100, 0x0101000201010101, 0x0101000201010102,
    0x0101000201010201, 0x0101000201020002, 0x0101000201020101, 0x0101000202000101,
    0x0101000202010000, 0x0101000202010002, 0x0101000202010101, 0x0101000202010201,
    0x0101000202010202, 0x0101000202020100, 0x0101010000000100, 0x0101010000000101,
    0x0101010000010001, 0x0101010000010100, 0x0101010000010101, 0x0101010000010102,
    0x0101010000010200, 0x0101010000010201, 0x0101010000020001, 0x0101010000020101,
    0x0101010000020200, 0x0101010000020202, 0x0101010001000001, 0x0101010001000100,
    0x0101010001000101, 0x0101010001000102, 0x0101010001000201, 0x0101010001000202,
    0x0101010001010000, 0x0101010001010001, 0x0101010001010100, 0x0101010001010101,
    0x0101010001010102, 0x0101010001010200, 0x0101010001010201, 0x0101010001010202,
    0x0101010001020001, 0x0101010001020002, 0x0101010001020100, 0x0101010001020101,
    0x0101010001020102, 0x0101010001020201, 0x0101010002000000, 0x0101010002000200,
    0x0101010002000202, 0x0101010002010001, 0x0101010002010100, 0x0101010002010101,
    0x0101010002010102, 0x0101010002010201, 0x0101010002020001, 0x0101010002020100,
    0x0101010002020101, 0x0101010002020202, 0x0101010100000001, 0x0101010100000002,
    0x0101010100000100, 0x0101010100000101, 0x0101010100000102, 0x0101010100000201,
    0x0101010100010000, 0x0101010100010001, 0x0101010100010002, 0x0101010100010100,
    0x0101010100010101, 0x0101010100010102, 0x0101010100010201, 0x0101010100010202,
    0x0101010100020001, 0x0101010100020100, 0x0101010100020101, 0x0101010100020102,
    0x0101010100020201, 0x0101010101000000, 0x0101010101000001, 0x0101010101000002,
    0x0101010101000100, 0x0101010101000101, 0x0101010101000102, 0x0101010101000200,
    0x0101010101000201, 0x0101010101010000, 0x0101010101010001, 0x0101010101010002,
    0x0101010101010100, 0x0101010101010101, 0x0101010101010102, 0x0101010101010200,
    0x0101010101010201, 0x0101010101010202, 0x0101010101020000, 0x0101010101020001,
    0x0101010101020100, 0x0101010101020101, 0x0101010101020102, 0x0101010101020200,
    0x0101010101020201, 0x0101010101020202, 0x0101010102000001, 0x0101010102000100,
    0x0101010102000101, 0x0101010102000201, 0x0101010102000202, 0x0101010102010000,
    0x0101010102010001, 0x0101010102010100, 0x0101010102010101, 0x0101010102010102,
    0x0101010102010200, 0x0101010102010201, 0x0101010102020001, 0x0101010102020100,
    0x0101010102020101, 0x0101010102020102, 0x0101010102020201, 0x0101010200000000,
    0x0101010200000001, 0x0101010200000002, 0x0101010200000100, 0x0101010200000102,
    0x0101010200000200, 0x0101010200000201, 0x0101010200010001, 0x0101010200010100,
    0x0101010200010101, 0x0101010200010200, 0x0101010200010201, 0x0101010200020000,
    0x0101010200020001, 0x0101010200020002, 0x0101010200020100, 0x0101010200020101,
    0x0101010200020102, 0x0101010200020200, 0x0101010200020201, 0x0101010201000001,
    0x0101010201000101, 0x0101010201000102, 0x0101010201000200, 0x0101010201000201,
    0x0101010201000202, 0x0101010201010000, 0x0101010201010001, 0x0101010201010002,
    0x0101010201010100, 0x0101010201010101, 0x0101010201010102, 0x0101010201010200,
    0x0101010201010201, 0x0101010201010202, 0x0101010201020001, 0x0101010201020100,
    0x0101010201020101, 0x0101010201020201, 0x0101010202000002, 0x0101010202000101,
    0x0101010202000102, 0x0101010202000200, 0x0101010202000201, 0x0101010202000202,
    0x0101010202010001, 0x0101010202010101, 0x0101010202010202, 0x0101010202020002,
    0x0101010202020101, 0x0101010202020102, 0x0101010202020200, 0x0101010202020201,
    0x0101020000000100, 0x0101020000000101, 0x0101020000000102, 0x0101020000000201,
    0x0101020000010000, 0x0101020000010101, 0x0101020000010200, 0x0101020000020001,
    0x0101020000020202, 0x0101020001000101, 0x0101020001000200, 0x0101020001000202,
    0x0101020001010001, 0x0101020001010100, 0x0101020001010101, 0x0101020001010102,
    0x0101020001010200, 0x0101020001010201, 0x0101020001020000, 0x0101020001020002,
    0x0101020001020100, 0x0101020001020101, 0x0101020002000002, 0x0101020002000201,
    0x0101020002010000, 0x0101020002010002, 0x0101020002010101, 0x0101020002010200,
    0x0101020002020001, 0x0101020002020201, 0x0101020100000001, 0x0101020100000002,
    0x0101020100000101, 0x0101020100000202, 0x0101020100010001, 0x0101020100010100,
    0x0101020100010101, 0x0101020100010102, 0x0101020100010201, 0x0101020100020101,
    0x0101020101000001, 0x0101020101000100, 0x0101020101000101, 0x0101020101000102,
    0x0101020101000201, 0x0101020101010000, 0x0101020101010001, 0x0101020101010002,
    0x0101020101010100, 0x0101020101010101, 0x0101020101010102, 0x0101020101010200,
    0x0101020101010201, 0x0101020101010202, 0x0101020101020001, 0x0101020101020100,
    0x0101020101020101, 0x0101020101020102, 0x0101020101020201, 0x0101020102000001,
    0x0101020102000101, 0x0101020102000201, 0x0101020102010001, 0x0101020102010100,
    0x0101020102010101, 0x0101020102010102, 0x0101020102010200, 0x0101020102010201,
    0x0101020102020101, 0x0101020200000100, 0x0101020200000200, 0x0101020200010101,
    0x0101020200010202, 0x0101020200020000, 0x0101020200020101, 0x0101020200020102,
    0x0101020200020201, 0x0101020201000101, 0x0101020201000200, 0x0101020201000201,
    0x0101020201010001, 0x0101020201010101, 0x0101020201010102, 0x0101020201010200,
    0x0101020201010201, 0x0101020201020002, 0x0101020201020101, 0x0101020201020200,
    0x0101020201020202, 0x0101020202000001, 0x0101020202000202, 0x0101020202010002,
    0x0101020202010101, 0x0101020202010102, 0x0101020202010200, 0x0101020202010202,
    0x0101020202020001, 0x0102000000000101, 0x0102000000010100, 0x0102000000010102,
    0x0102000000010201, 0x0102000000020101, 0x0102000001000100, 0x0102000001010000,
    0x0102000001010101, 0x0102000001010102, 0x0102000001010200, 0x0102000001010202,
    0x0102000001020001, 0x0102000001020100, 0x0102000001020102, 0x0102000001020201,
    0x0102000002000001, 0x0102000002010102, 0x0102000002020101, 0x0102000100000001,
    0x0102000100000100, 0x0102000100000102, 0x0102000100000201, 0x0102000100010002,
    0x0102000100010101, 0x0102000100020001, 0x0102000100020002, 0x0102000100020102,
    0x0102000100020201, 0x0102000101000101, 0x0102000101000201, 0x0102000101010001,
    0x0102000101010101, 0x0102000101010102, 0x0102000101010201, 0x0102000101020101,
    0x0102000101020102, 0x0102000101020202, 0x0102000102000100, 0x0102000102000202,
    0x0102000102010002, 0x0102000102010101, 0x0102000102020001, 0x0102000102020102,
    0x0102000102020201, 0x0102000200010001, 0x0102000200010102, 0x0102000200010201,
    0x0102000201000000, 0x0102000201000001, 0x0102000201000102, 0x0102000201010101,
    0x0102000201010102, 0x0102000201010200, 0x0102000201020000, 0x0102000202000101,
    0x0102000202010001, 0x0102000202010102, 0x0102000202020101, 0x0102010000010001,
    0x0102010000010002, 0x0102010000010101, 0x0102010000010102, 0x0102010000010202,
    0x0102010000020001, 0x0102010000020102, 0x0102010000020201, 0x0102010001000000,
    0x0102010001000002, 0x0102010001000101, 0x0102010001000200, 0x0102010001000202,
    0x0102010001010001, 0x0102010001010100, 0x0102010001010101, 0x0102010001010102,
    0x0102010001010201, 0x0102010001010202, 0x0102010001020000, 0x0102010001020002,
    0x0102010001020101, 0x0102010002000100, 0x0102010002000101, 0x0102010002000201,
    0x0102010002010000, 0x0102010002010002, 0x0102010002010100, 0x0102010002010101,
    0x0102010002010102, 0x0102010002010200, 0x0102010002010202, 0x0102010002020001,
    0x0102010002020100, 0x0102010002020201, 0x0102010100000101, 0x0102010100000200,
    0x0102010100000202, 0x0102010100010001, 0x0102010100010101, 0x0102010100010102,
    0x0102010100010201, 0x0102010101000100, 0x0102010101000101, 0x0102010101000102,
    0x0102010101000201, 0x0102010101010000, 0x0102010101010001, 0x0102010101010100,
    0x0102010101010101, 0x0102010101010102, 0x0102010101010201, 0x0102010101020001,
    0x0102010101020100, 0x0102010101020101, 0x0102010101020102, 0x0102010101020201,
    0x0102010102000102, 0x0102010102000201, 0x0102010102000202, 0x0102010102010001,
    0x0102010102010101, 0x0102010102010102, 0x0102010102010201, 0x0102010102010202,
    0x0102010102020002, 0x0102010102020101, 0x0102010102020102, 0x0102010102020200,
    0x0102010200000002, 0x0102010200000201, 0x0102010200010101, 0x0102010200020000,
    0x0102010200020102, 0x0102010200020200, 0x0102010200020201, 0x0102010201000000,
    0x0102010201000101, 0x0102010201000200, 0x0102010201000202, 0x0102010201010001,
    0x0102010201010100, 0x0102010201010101, 0x0102010201010102, 0x0102010201010200,
    0x0102010201010202, 0x0102010201020000, 0x0102010201020101, 0x0102010201020200,
    0x0102010202000000, 0x0102010202000002, 0x0102010202000101, 0x0102010202000202,
    0x0102010202010100, 0x0102010202010102, 0x0102010202010200, 0x0102010202010201,
    0x0102010202020000, 0x0102010202020100, 0x0102010202020102, 0x0102010202020202,
    0x0102020000010102, 0x0102020000010201, 0x0102020000020101, 0x0102020001000001,
    0x0102020001010002, 0x0102020001010101, 0x0102020001010202, 0x0102020001020001,
    0x0102020001020201, 0x0102020002000101, 0x0102020002010001, 0x0102020002010200,
    0x0102020002020102, 0x0102020100000001, 0x0102020100000100, 0x0102020100010000,
    0x0102020100010101, 0x0102020100020001, 0x0102020100020100, 0x0102020100020102,
    0x0102020100020201, 0x0102020101000000, 0x0102020101000001, 0x0102020101000101,
    0x0102020101000102, 0x0102020101000200, 0x0102020101010001, 0x0102020101010100,
    0x0102020101010101, 0x0102020101010102, 0x0102020101010201, 0x0102020101020000,
    0x0102020101020101, 0x0102020101020202, 0x0102020102000002, 0x0102020102000100,
    0x0102020102000202, 0x0102020102010101, 0x0102020102020001, 0x0102020102020100,
    0x0102020102020101, 0x0102020102020201, 0x0102020200010001, 0x0102020200010102,
    0x0102020200010200, 0x0102020201000001, 0x0102020201000100, 0x0102020201000201,
    0x0102020201010000, 0x0102020201010101, 0x0102020201010200, 0x0102020201010202,
    0x0102020201020100, 0x0102020201020101, 0x0102020201020201, 0x0102020202000102,
    0x0102020202010100, 0x0102020202010200, 0x0102020202010202, 0x0102020202020102,
    0x0200000000000000, 0x0200000000000002, 0x0200000000000200, 0x0200000000000202,
    0x0200000000020000, 0x0200000000020002, 0x0200000000020200, 0x0200000000020202,
    0x0200000001000101, 0x0200000001010000, 0x0200000001010001, 0x0200000001010100,
    0x0200000001010102, 0x0200000001010201, 0x0200000001020101, 0x0200000002000000,
    0x0200000002000002, 0x0200000002000200, 0x0200000002000202, 0x0200000002010101,
    0x0200000002020000, 0x0200000002020002, 0x0200000002020200, 0x0200000002020202,
    0x0200000100000101, 0x0200000100010001, 0x0200000100010100, 0x0200000100010102,
    0x0200000100010201, 0x0200000100020101, 0x0200000101000001, 0x0200000101000100,
    0x0200000101000201, 0x0200000101010000, 0x0200000101010002, 0x0200000101010101,
    0x0200000101010102, 0x0200000101010200, 0x0200000101010201, 0x0200000101020100,
    0x0200000101020102, 0x0200000101020201, 0x0200000102000101, 0x0200000102000201,
    0x0200000102010100, 0x0200000102010102, 0x0200000102010201, 0x0200000102020101,
    0x0200000200000000, 0x0200000200000002, 0x0200000200000200, 0x0200000200000202,
    0x0200000200010101, 0x0200000200020000, 0x0200000200020002, 0x0200000200020200,
    0x0200000200020202, 0x0200000201010001, 0x0200000201010100, 0x0200000201010201,
    0x0200000201020101, 0x0200000202000000, 0x0200000202000002, 0x0200000202000200,
    0x0200000202000202, 0x0200000202010101, 0x0200000202020000, 0x0200000202020002,
    0x0200000202020200, 0x0200000202020202, 0x0200010000010100, 0x0200010000010201,
    0x0200010001000001, 0x0200010001000100, 0x0200010001010001, 0x0200010001010101,
    0x0200010001010202, 0x0200010001020001, 0x0200010001020100, 0x0200010001020201,
    0x0200010002010100, 0x0200010002010201, 0x0200010100000001, 0x0200010100000201,
    0x0200010100010002, 0x0200010100010101, 0x0200010100010202, 0x0200010100020102,
    0x0200010100020201, 0x0200010101000000, 0x0200010101000001, 0x0200010101000101,
    0x0200010101000200, 0x0200010101010001, 0x0200010101010100, 0x0200010101010101,
    0x0200010101010102, 0x0200010101010201, 0x0200010101010202, 0x0200010101020101,
    0x0200010101020102, 0x0200010101020200, 0x0200010101020202, 0x0200010102000001,
    0x0200010102000100, 0x0200010102000102, 0x0200010102000201, 0x0200010102010000,
    0x0200010102010002, 0x0200010102010101, 0x0200010102010200, 0x0200010102020102,
    0x0200010200010001, 0x0200010200010102, 0x0200010200010201, 0x0200010200020101,
    0x0200010201000001, 0x0200010201000100, 0x0200010201000201, 0x0200010201000202,
    0x0200010201010000, 0x0200010201010101, 0x0200010201010201, 0x0200010201010202,
    0x0200010201020001, 0x0200010201020102, 0x0200010201020202, 0x0200010202000101,
    0x0200010202010001, 0x0200010202010202, 0x0200010202020100, 0x0200020000000000,
    0x0200020000000002, 0x0200020000000200, 0x0200020000000202, 0x0200020000010101,
    0x0200020000020000, 0x0200020000020002, 0x0200020000020200, 0x0200020000020202,
    0x0200020001000001, 0x0200020001000101, 0x0200020001010001, 0x0200020001010100,
    0x0200020001010201, 0x0200020001020101, 0x0200020001020201, 0x0200020002000000,
    0x0200020002000002, 0x0200020002000200, 0x0200020002000202, 0x0200020002010101,
    0x0200020002020000, 0x0200020002020002, 0x0200020002020200, 0x0200020002020202,
    0x0200020100000101, 0x0200020100000102, 0x0200020100010001, 0x0200020100010100,
    0x0200020100010102, 0x0200020100020101, 0x0200020101000001, 0x0200020101000100,
    0x0200020101000102, 0x0200020101000201, 0x0200020101010000, 0x0200020101010002,
    0x0200020101010101, 0x0200020101010202, 0x0200020101020001, 0x0200020101020100,
    0x0200020102000101, 0x0200020102010102, 0x0200020102010201, 0x0200020102020101,
    0x0200020200000000, 0x0200020200000002, 0x0200020200000200, 0x0200020200000202,
    0x0200020200010101, 0x0200020200020000, 0x0200020200020002, 0x0200020200020200,
    0x0200020200020202, 0x0200020201000101, 0x0200020201010001, 0x0200020201010100,
    0x0200020201010102, 0x0200020202000000, 0x0200020202000002, 0x0200020202000200,
    0x0200020202000202, 0x0200020202010101, 0x0200020202020000, 0x0200020202020002,
    0x0200020202020200, 0x0200020202020202, 0x0201000000000101, 0x0201000000010001,
    0x0201000000010102, 0x0201000000010200, 0x0201000000010201, 0x0201000000020101,
    0x0201000001000001, 0x0201000001000102, 0x0201000001000201, 0x0201000001010101,
    0x0201000001010200, 0x0201000001010202, 0x0201000001020201, 0x0201000001020202,
    0x0201000002000101, 0x0201000002010001, 0x0201000002010100, 0x0201000002010102,
    0x0201000002010201, 0x0201000002020101, 0x0201000100000001, 0x0201000100000100,
    0x0201000100000102, 0x0201000100000201, 0x0201000100010000, 0x0201000100010101,
    0x0201000100010200, 0x0201000100010202, 0x0201000100020001, 0x0201000100020100,
    0x0201000100020102, 0x0201000100020201, 0x0201000101000000, 0x0201000101000101,
    0x0201000101010000, 0x0201000101010001, 0x0201000101010100, 0x0201000101010101,
    0x0201000101010102, 0x0201000101010201, 0x0201000101020002, 0x0201000101020101,
    0x0201000102000100, 0x0201000102000102, 0x0201000102010002, 0x0201000102010101,
    0x0201000102010200, 0x0201000102020001, 0x0201000102020100, 0x0201000102020102,
    0x0201000102020201, 0x0201000200000101, 0x0201000200010001, 0x0201000200010100,
    0x0201000200010201, 0x0201000200020101, 0x0201000201000100, 0x0201000201000102,
    0x0201000201000201, 0x0201000201010000, 0x0201000201010002, 0x0201000201010101,
    0x0201000201010200, 0x0201000201020102, 0x0201000201020201, 0x0201000202000101,
    0x0201000202010100, 0x0201000202010102, 0x0201000202020201, 0x0201010000000001,
    0x0201010000000100, 0x0201010000000102, 0x0201010000010000, 0x0201010000010101,
    0x0201010000010200, 0x0201010000020102, 0x0201010001000000, 0x0201010001000202,
    0x0201010001010001, 0x0201010001010100, 0x0201010001010101, 0x0201010001010102,
    0x0201010001010200, 0x0201010001010201, 0x0201010001020000, 0x0201010001020001,
    0x0201010001020002, 0x0201010001020101, 0x0201010002000100, 0x0201010002000102,
    0x0201010002010002, 0x0201010002010100, 0x0201010002010101, 0x0201010002010200,
    0x0201010002020001, 0x0201010002020201, 0x0201010100000000, 0x0201010100000101,
    0x0201010100000200, 0x0201010100000202, 0x0201010100010000, 0x0201010100010001,
    0x0201010100010100, 0x0201010100010101, 0x0201010100010102, 0x0201010100010201,
    0x0201010100020001, 0x0201010100020101, 0x0201010100020201, 0x0201010100020202,
    0x0201010101000001, 0x0201010101000100, 0x0201010101000101, 0x0201010101000102,
    0x0201010101000201, 0x0201010101010000, 0x0201010101010001, 0x0201010101010002,
    0x0201010101010100, 0x0201010101010101, 0x0201010101010102, 0x0201010101010200,
    0x0201010101010201, 0x0201010101010202, 0x0201010101020001, 0x0201010101020100,
    0x0201010101020101, 0x0201010101020102, 0x0201010101020201, 0x0201010102000001,
    0x0201010102000101, 0x0201010102000200, 0x0201010102010001, 0x0201010102010002,
    0x0201010102010100, 0x0201010102010101, 0x0201010102010102, 0x0201010102010201,
    0x0201010102010202, 0x0201010102020000, 0x0201010102020002, 0x0201010102020101,
    0x0201010102020200, 0x0201010102020202, 0x0201010200000001, 0x0201010200000100,
    0x0201010200010000, 0x0201010200010101, 0x0201010200010201, 0x0201010200020000,
    0x0201010200020102, 0x0201010200020201, 0x0201010201000101, 0x0201010201000200,
    0x0201010201000201, 0x0201010201010001, 0x0201010201010002, 0x0201010201010101,
    0x0201010201010102, 0x0201010201010201, 0x0201010201020101, 0x0201010201020200,
    0x0201010202000002, 0x0201010202000100, 0x0201010202000201, 0x0201010202000202,
    0x0201010202010002, 0x0201010202010100, 0x0201010202010101, 0x0201010202020100,
    0x0201010202020102, 0x0201010202020201, 0x0201020000000101, 0x0201020000010102,
    0x0201020000010201, 0x0201020000020101, 0x0201020001000001, 0x0201020001000102,
    0x0201020001010000, 0x0201020001010002, 0x0201020001010101, 0x0201020001010102,
    0x0201020001010202, 0x0201020001020100, 0x0201020001020101, 0x0201020002000101,
    0x0201020002010001, 0x0201020002010102, 0x0201020002010201, 0x0201020002020101,
    0x0201020100000100, 0x0201020100000102, 0x0201020100000201, 0x0201020100010000,
    0x0201020100010002, 0x0201020100010101, 0x0201020100010200, 0x0201020100010202,
    0x0201020100020000, 0x0201020100020001, 0x0201020100020100, 0x0201020100020102,
    0x0201020101000000, 0x0201020101000002, 0x0201020101000101, 0x0201020101000200,
    0x0201020101000202, 0x0201020101010001, 0x0201020101010100, 0x0201020101010101,
    0x0201020101010102, 0x0201020101010201, 0x0201020101020002, 0x0201020101020101,
    0x0201020101020102, 0x0201020101020202, 0x0201020102000001, 0x0201020102000100,
    0x0201020102010000, 0x0201020102010002, 0x0201020102010101, 0x0201020102010202,
    0x0201020102020001, 0x0201020102020102, 0x0201020200000101, 0x0201020200010101,
    0x0201020200020101, 0x0201020201000100, 0x0201020201000102, 0x0201020201000201,
    0x0201020201010000, 0x0201020201010101, 0x0201020201010200, 0x0201020201020001,
    0x0201020202000101, 0x0201020202010001, 0x0201020202010100, 0x0201020202010101,
    0x0201020202010102, 0x0202000000000000, 0x0202000000000002, 0x0202000000000200,
    0x0202000000000202, 0x0202000000010101, 0x0202000000020000, 0x0202000000020002,
    0x0202000000020200, 0x0202000000020202, 0x0202000001000101, 0x0202000001010001,
    0x0202000001010100, 0x0202000001010102, 0x0202000001010201, 0x0202000002000000,
    0x0202000002000002, 0x0202000002000200, 0x0202000002000202, 0x0202000002010101,
    0x0202000002020000, 0x0202000002020002, 0x0202000002020200, 0x0202000002020202,
    0x0202000100000101, 0x0202000100000201, 0x0202000100010001, 0x0202000100010100,
    0x0202000100010102, 0x0202000100010201, 0x0202000100010202, 0x0202000101000102,
    0x0202000101000201, 0x0202000101010001, 0x0202000101010101, 0x0202000101010200,
    0x0202000101010202, 0x0202000101020001, 0x0202000101020100, 0x0202000102000101,
    0x0202000102010000, 0x0202000102010002, 0x0202000102010102, 0x0202000102010201,
    0x0202000200000002, 0x0202000200000200, 0x0202000200000202, 0x0202000200010000,
    0x0202000200010201, 0x0202000200020002, 0x0202000200020200, 0x0202000200020202,
    0x0202000201000101, 0x0202000201010001, 0x0202000201010102, 0x0202000201010201,
    0x0202000201020101, 0x0202000202000000, 0x0202000202000002, 0x0202000202000200,
    0x0202000202000202, 0x0202000202010101, 0x0202000202020000, 0x0202000202020002,
    0x0202000202020200, 0x0202000202020202, 0x0202010000010201, 0x0202010000020101,
    0x0202010001000001, 0x0202010001000100, 0x0202010001010000, 0x0202010001010100,
    0x0202010001010101, 0x0202010001010200, 0x0202010001010202, 0x0202010001020001,
    0x0202010001020101, 0x0202010001020102, 0x0202010001020200, 0x0202010001020201,
    0x0202010002000101, 0x0202010100000102, 0x0202010100000201, 0x0202010100010000,
    0x0202010100010002, 0x0202010100010101, 0x0202010100010200, 0x0202010100020102,
    0x0202010100020201, 0x0202010101000002, 0x0202010101000101, 0x0202010101010001,
    0x0202010101010100, 0x0202010101010101, 0x0202010101010102, 0x0202010101010201,
    0x0202010101020101, 0x0202010101020202, 0x0202010102000001, 0x0202010102000100,
    0x0202010102000101, 0x0202010102000102, 0x0202010102000201, 0x0202010102010002,
    0x0202010102010101, 0x0202010102010200, 0x0202010200000101, 0x0202010200010001,
    0x0202010200010102, 0x0202010200010202, 0x0202010200020001, 0x0202010200020101,
    0x0202010201000100, 0x0202010201000102, 0x0202010201000202, 0x0202010201010002,
    0x0202010201010101, 0x0202010201010102, 0x0202010201010200, 0x0202010201020000,
    0x0202010201020002, 0x0202010202000102, 0x0202010202010000, 0x0202010202010101,
    0x0202010202010102, 0x0202010202010201, 0x0202010202020001, 0x0202010202020100,
    0x0202010202020102, 0x0202020000000000, 0x0202020000000002, 0x0202020000000200,
    0x0202020000000202, 0x0202020000020000, 0x0202020000020002, 0x0202020000020200,
    0x0202020000020202, 0x0202020001010001, 0x0202020001010100, 0x0202020001010102,
    0x0202020001010201, 0x0202020002000000, 0x0202020002000002, 0x0202020002000200,
    0x0202020002000202, 0x0202020002010101, 0x0202020002020000, 0x0202020002020002,
    0x0202020002020200, 0x0202020002020202, 0x0202020100000101, 0x0202020100010100,
    0x0202020100010201, 0x0202020100020001, 0x0202020100020101, 0x0202020101000001,
    0x0202020101010000, 0x0202020101010101, 0x0202020101010202, 0x0202020101020001,
    0x0202020101020102, 0x0202020101020201, 0x0202020102010000, 0x0202020102010102,
    0x0202020200000000, 0x0202020200000002, 0x0202020200000200, 0x0202020200000202,
    0x0202020200020000, 0x0202020200020002, 0x0202020200020200, 0x0202020200020202,
    0x0202020201010001, 0x0202020201010100, 0x0202020201010102, 0x0202020202000000,
    0x0202020202000002, 0x0202020202000200, 0x0202020202000202, 0x0202020202010101,
    0x0202020202020000, 0x0202020202020002, 0x0202020202020200, 0x0202020202020202,
};
#ifdef __aarch64__
static const uint32_t iq1s_grid_us_neon[2048] = {
    0x00000000, 0x00000002, 0x00000101, 0x00000200, 0x00000202, 0x00010001, 0x00010101, 0x00020000,
    0x00020002, 0x00020200, 0x00020202, 0x01000101, 0x01010001, 0x01010100, 0x01010102, 0x01020101,
    0x02000000, 0x02000002, 0x02000200, 0x02000202, 0x02010101, 0x02020000, 0x02020002, 0x02020200,
    0x02020202, 0x00000110, 0x00000111, 0x00010011, 0x00010110, 0x00010112, 0x00010211, 0x00010212,
    0x00020111, 0x01000011, 0x01000112, 0x01000211, 0x01010012, 0x01010111, 0x01010212, 0x01020011,
    0x01020110, 0x01020112, 0x01020210, 0x02000111, 0x02010011, 0x02010110, 0x02010112, 0x02020111,
    0x00000020, 0x00000022, 0x00000220, 0x00000222, 0x00010121, 0x00020020, 0x00020022, 0x00020220,
    0x00020222, 0x01000121, 0x01010021, 0x01010221, 0x01020120, 0x01020221, 0x02000020, 0x02000022,
    0x02000220, 0x02000222, 0x02010021, 0x02010121, 0x02010221, 0x02020020, 0x02020022, 0x02020220,
    0x02020222, 0x00011001, 0x00011100, 0x00011102, 0x00021101, 0x01001001, 0x01001201, 0x01011101,
    0x01011202, 0x01021100, 0x01021101, 0x02011001, 0x02011201, 0x02021101, 0x00001011, 0x00001110,
    0x00001111, 0x00001112, 0x00011111, 0x00011210, 0x00011212, 0x00021211, 0x01001010, 0x01001111,
    0x01001212, 0x01011010, 0x01011011, 0x01011110, 0x01011111, 0x01011112, 0x01011211, 0x01021010,
    0x01021012, 0x01021111, 0x01021210, 0x01021212, 0x02001011, 0x02011011, 0x02011111, 0x02011210,
    0x02011212, 0x02021011, 0x02021110, 0x02021111, 0x02021112, 0x02021211, 0x00011120, 0x00011221,
    0x01001021, 0x01001120, 0x01011020, 0x01011022, 0x01011121, 0x01011220, 0x01021020, 0x01021021,
    0x01021122, 0x01021221, 0x02001121, 0x02011021, 0x02011120, 0x02011221, 0x00002000, 0x00002002,
    0x00002200, 0x00002202, 0x00012101, 0x00022000, 0x00022002, 0x00022200, 0x00022202, 0x01002101,
    0x01012001, 0x01012102, 0x01022101, 0x02002000, 0x02002002, 0x02002200, 0x02002202, 0x02012101,
    0x02022000, 0x02022002, 0x02022200, 0x02022202, 0x00002111, 0x00012011, 0x00012110, 0x00012211,
    0x00022110, 0x00022111, 0x01002011, 0x01012010, 0x01012011, 0x01012111, 0x01022011, 0x01022110,
    0x01022211, 0x02012011, 0x02012110, 0x02012112, 0x02012211, 0x02022111, 0x00002020, 0x00002022,
    0x00002220, 0x00002222, 0x00012121, 0x00022020, 0x00022022, 0x00022220, 0x00022222, 0x01002121,
    0x01012021, 0x01012221, 0x01022021, 0x01022121, 0x02002020, 0x02002022, 0x02002121, 0x02002220,
    0x02002222, 0x02012121, 0x02022020, 0x02022022, 0x02022220, 0x02022222, 0x00110000, 0x00110001,
    0x00110100, 0x00110201, 0x00120100, 0x00120101, 0x01100001, 0x01100100, 0x01110000, 0x01110101,
    0x01110200, 0x01120001, 0x01120100, 0x01120101, 0x01120201, 0x02110001, 0x02110100, 0x02110102,
    0x02120001, 0x02120101, 0x00100011, 0x00100110, 0x00100112, 0x00100211, 0x00110010, 0x00110012,
    0x00110111, 0x00110210, 0x00120011, 0x00120110, 0x00120211, 0x01100111, 0x01100212, 0x01110010,
    0x01110011, 0x01110012, 0x01110110, 0x01110111, 0x01110112, 0x01110211, 0x01120010, 0x01120111,
    0x02100110, 0x02110012, 0x02110111, 0x02120011, 0x02120110, 0x00110021, 0x00110120, 0x00110122,
    0x00120121, 0x01100020, 0x01100122, 0x01100221, 0x01110022, 0x01110121, 0x01110220, 0x01110222,
    0x01120120, 0x01120122, 0x02100121, 0x02110021, 0x02110120, 0x02110122, 0x02120121, 0x00101001,
    0x00101102, 0x00101201, 0x00111100, 0x00111101, 0x00111200, 0x00111201, 0x00121001, 0x00121102,
    0x01101001, 0x01101101, 0x01101102, 0x01101200, 0x01101202, 0x01111001, 0x01111100, 0x01111101,
    0x01111102, 0x01111201, 0x01121002, 0x01121101, 0x01121200, 0x02101100, 0x02101201, 0x02111000,
    0x02111100, 0x02111101, 0x02111200, 0x02111201, 0x02111202, 0x02121001, 0x02121100, 0x02121101,
    0x02121201, 0x00101012, 0x00101111, 0x00101212, 0x00111011, 0x00111110, 0x00111111, 0x00111112,
    0x00111211, 0x00121010, 0x00121012, 0x00121111, 0x00121210, 0x00121212, 0x01101011, 0x01101110,
    0x01101111, 0x01101112, 0x01111011, 0x01111012, 0x01111110, 0x01111111, 0x01111112, 0x01111211,
    0x01111212, 0x01121011, 0x01121110, 0x01121111, 0x01121112, 0x01121211, 0x02101010, 0x02101012,
    0x02101110, 0x02101111, 0x02101210, 0x02101212, 0x02111010, 0x02111011, 0x02111110, 0x02111111,
    0x02111112, 0x02111211, 0x02111212, 0x02121010, 0x02121012, 0x02121111, 0x00101021, 0x00101120,
    0x00101121, 0x00101122, 0x00111121, 0x00111122, 0x00111220, 0x00111222, 0x00121021, 0x00121122,
    0x01101020, 0x01101022, 0x01101120, 0x01101121, 0x01101220, 0x01101222, 0x01111021, 0x01111121,
    0x01111122, 0x01111220, 0x01111221, 0x01121021, 0x01121120, 0x01121121, 0x01121220, 0x01121221,
    0x01121222, 0x02101122, 0x02101222, 0x02111022, 0x02111121, 0x02121120, 0x02121221, 0x00112001,
    0x00112102, 0x00122101, 0x01102001, 0x01102100, 0x01102102, 0x01102201, 0x01112000, 0x01112101,
    0x01112200, 0x01112202, 0x01122000, 0x01122001, 0x01122100, 0x01122102, 0x01122201, 0x02102101,
    0x02112001, 0x02112100, 0x02122101, 0x00112010, 0x00112012, 0x00112111, 0x00112212, 0x00122011,
    0x00122111, 0x01102012, 0x01102110, 0x01102111, 0x01102210, 0x01112011, 0x01112110, 0x01112111,
    0x01112112, 0x01112211, 0x01112212, 0x01122010, 0x01122111, 0x01122212, 0x02102211, 0x02112011,
    0x02112012, 0x02112111, 0x02112210, 0x02122011, 0x02122112, 0x02122211, 0x00102221, 0x00112122,
    0x00122120, 0x00122122, 0x01102120, 0x01102122, 0x01102221, 0x01112020, 0x01112022, 0x01112121,
    0x01112220, 0x01122021, 0x01122122, 0x01122221, 0x02102121, 0x02112021, 0x02112122, 0x02112222,
    0x00200000, 0x00200002, 0x00200200, 0x00200202, 0x00210101, 0x00220000, 0x00220002, 0x00220101,
    0x00220200, 0x00220202, 0x01200101, 0x01210001, 0x01210201, 0x01220001, 0x01220101, 0x02200000,
    0x02200002, 0x02200200, 0x02200202, 0x02210101, 0x02220000, 0x02220002, 0x02220101, 0x02220200,
    0x02220202, 0x00200111, 0x00210011, 0x00210110, 0x00210211, 0x00220111, 0x01200012, 0x01200110,
    0x01200211, 0x01210111, 0x01210210, 0x01210212, 0x01220011, 0x01220110, 0x01220111, 0x01220112,
    0x02200111, 0x02210010, 0x02210112, 0x02210211, 0x02220111, 0x00200021, 0x00200220, 0x00200222,
    0x00210021, 0x00210121, 0x00220020, 0x00220022, 0x00220220, 0x00220222, 0x01200121, 0x01210021,
    0x01210122, 0x01210221, 0x01220121, 0x02200021, 0x02200220, 0x02200222, 0x02210021, 0x02210121,
    0x02220020, 0x02220022, 0x02220220, 0x02220222, 0x00201101, 0x00211100, 0x00211102, 0x00211201,
    0x00221101, 0x01201100, 0x01201101, 0x01201102, 0x01201201, 0x01211002, 0x01211101, 0x01211200,
    0x01211202, 0x01221102, 0x02201101, 0x02211001, 0x02211100, 0x02211201, 0x02221001, 0x02221101,
    0x00201211, 0x00211111, 0x00221011, 0x00221211, 0x01201010, 0x01201111, 0x01201210, 0x01211011,
    0x01211110, 0x01211111, 0x01211211, 0x01221012, 0x01221111, 0x01221210, 0x02201211, 0x02211010,
    0x02211110, 0x02211111, 0x02211210, 0x02211212, 0x02221011, 0x02221110, 0x02221112, 0x02221211,
    0x00201121, 0x00211020, 0x00211022, 0x00211221, 0x00221121, 0x01201021, 0x01201221, 0x01211121,
    0x01221020, 0x01221021, 0x01221221, 0x02201120, 0x02201122, 0x02211020, 0x02211222, 0x00202000,
    0x00202002, 0x00202200, 0x00202202, 0x00212101, 0x00222000, 0x00222002, 0x00222200, 0x00222202,
    0x01202101, 0x01212001, 0x01212100, 0x01222101, 0x02202000, 0x02202002, 0x02202200, 0x02202202,
    0x02222000, 0x02222002, 0x02222200, 0x02222202, 0x00202211, 0x00212011, 0x00212110, 0x00212211,
    0x00222111, 0x01202112, 0x01202211, 0x01212012, 0x01212111, 0x01222011, 0x01222110, 0x01222112,
    0x01222211, 0x02202111, 0x02212010, 0x02212112, 0x02212211, 0x02222110, 0x02222111, 0x00202020,
    0x00202022, 0x00202220, 0x00202222, 0x00222020, 0x00222022, 0x00222220, 0x00222222, 0x01202121,
    0x01212021, 0x01212122, 0x01212221, 0x01222121, 0x02202020, 0x02202022, 0x02202220, 0x02202222,
    0x02212121, 0x02222020, 0x02222022, 0x02222220, 0x02222222, 0x10000101, 0x10010001, 0x10010102,
    0x10020101, 0x11000201, 0x11010002, 0x11010101, 0x11010200, 0x11010202, 0x11020001, 0x11020100,
    0x11020102, 0x12010100, 0x12010201, 0x12020001, 0x12020102, 0x10000010, 0x10000011, 0x10000110,
    0x10000112, 0x10000211, 0x10010012, 0x10010111, 0x10010112, 0x10010210, 0x10010212, 0x10020011,
    0x10020112, 0x10020211, 0x11000111, 0x11000210, 0x11000212, 0x11010011, 0x11010110, 0x11010111,
    0x11010112, 0x11010211, 0x11010212, 0x11020111, 0x11020210, 0x11020212, 0x12000011, 0x12000110,
    0x12000112, 0x12010010, 0x12010012, 0x12010111, 0x12020010, 0x12020011, 0x12020012, 0x10000121,
    0x10010021, 0x10010120, 0x10010122, 0x10020121, 0x11000021, 0x11010022, 0x11010121, 0x11010222,
    0x11020120, 0x11020221, 0x12000221, 0x12010120, 0x12020121, 0x10001001, 0x10011101, 0x10011201,
    0x10021201, 0x11001101, 0x11001200, 0x11001202, 0x11011001, 0x11011100, 0x11011101, 0x11011102,
    0x11021001, 0x11021002, 0x11021101, 0x11021200, 0x11021202, 0x12001001, 0x12001102, 0x12001201,
    0x12011000, 0x12011002, 0x12011101, 0x12021000, 0x12021001, 0x12021201, 0x10001011, 0x10001012,
    0x10001111, 0x10001212, 0x10011011, 0x10011110, 0x10011111, 0x10011112, 0x10011211, 0x10021010,
    0x10021111, 0x10021212, 0x11001011, 0x11001110, 0x11001111, 0x11001112, 0x11001211, 0x11011010,
    0x11011011, 0x11011110, 0x11011111, 0x11011112, 0x11011210, 0x11011211, 0x11021011, 0x11021110,
    0x11021111, 0x11021112, 0x11021211, 0x12001012, 0x12001110, 0x12001111, 0x12001210, 0x12011011,
    0x12011110, 0x12011111, 0x12011112, 0x12011211, 0x12011212, 0x12021111, 0x12021210, 0x12021212,
    0x10001021, 0x10001121, 0x10001221, 0x10011120, 0x10011121, 0x10011220, 0x10011222, 0x10021021,
    0x10021120, 0x10021221, 0x11001020, 0x11001022, 0x11001121, 0x11001220, 0x11011020, 0x11011021,
    0x11011022, 0x11011121, 0x11011122, 0x11011221, 0x11021022, 0x11021121, 0x11021220, 0x12001021,
    0x12001121, 0x12001222, 0x12011120, 0x12011121, 0x12021021, 0x12021120, 0x12021122, 0x10002101,
    0x10012001, 0x10012101, 0x10012202, 0x10022101, 0x11002002, 0x11002201, 0x11012000, 0x11012101,
    0x11012200, 0x11022001, 0x11022100, 0x11022102, 0x11022201, 0x12002101, 0x12012001, 0x12012100,
    0x12012102, 0x12012201, 0x12022101, 0x10002011, 0x10002111, 0x10002112, 0x10002212, 0x10012010,
    0x10012110, 0x10012111, 0x10012210, 0x10022011, 0x10022110, 0x10022112, 0x11002010, 0x11002111,
    0x11002212, 0x11012011, 0x11012012, 0x11012110, 0x11012111, 0x11012112, 0x11012211, 0x11022010,
    0x11022012, 0x11022111, 0x11022112, 0x11022212, 0x12002112, 0x12002211, 0x12012012, 0x12012111,
    0x12012112, 0x12012210, 0x12022011, 0x12022110, 0x12022112, 0x12022211, 0x10012122, 0x11002120,
    0x11002122, 0x11002221, 0x11012121, 0x11012220, 0x11012222, 0x11022120, 0x11022221, 0x12012120,
    0x12022121, 0x10100001, 0x10100100, 0x10100101, 0x10100102, 0x10100201, 0x10110002, 0x10110101,
    0x10110202, 0x10120001, 0x10120100, 0x10120201, 0x11100000, 0x11100101, 0x11100200, 0x11110001,
    0x11110100, 0x11110101, 0x11110102, 0x11110201, 0x11120101, 0x11120200, 0x12100102, 0x12100201,
    0x12110101, 0x12110200, 0x12120000, 0x12120001, 0x12120102, 0x12120201, 0x10100111, 0x10100210,
    0x10100211, 0x10100212, 0x10110011, 0x10110110, 0x10110111, 0x10110112, 0x10110210, 0x10110211,
    0x10120010, 0x10120111, 0x10120112, 0x10120210, 0x10120212, 0x11100011, 0x11100110, 0x11100111,
    0x11100112, 0x11100211, 0x11110010, 0x11110011, 0x11110012, 0x11110110, 0x11110111, 0x11110112,
    0x11110210, 0x11110211, 0x11110212, 0x11120011, 0x11120110, 0x11120111, 0x11120112, 0x11120211,
    0x12100012, 0x12100111, 0x12110011, 0x12110110, 0x12110111, 0x12110112, 0x12110211, 0x12120010,
    0x12120111, 0x12120212, 0x10100021, 0x10100122, 0x10110022, 0x10110121, 0x10110222, 0x10120021,
    0x10120120, 0x11100022, 0x11100121, 0x11100222, 0x11110021, 0x11110120, 0x11110121, 0x11110122,
    0x11110221, 0x11120022, 0x11120121, 0x12100121, 0x12110020, 0x12110022, 0x12110121, 0x12110221,
    0x12110222, 0x12120120, 0x10101100, 0x10101101, 0x10111001, 0x10111100, 0x10111101, 0x10111102,
    0x10111200, 0x10111201, 0x10121001, 0x10121101, 0x10121200, 0x10121202, 0x11101001, 0x11101100,
    0x11101101, 0x11101102, 0x11101201, 0x11101202, 0x11111000, 0x11111001, 0x11111100, 0x11111101,
    0x11111102, 0x11111200, 0x11111201, 0x11111202, 0x11121001, 0x11121002, 0x11121100, 0x11121101,
    0x11121102, 0x11121201, 0x12101000, 0x12101200, 0x12101202, 0x12111001, 0x12111100, 0x12111101,
    0x12111102, 0x12111201, 0x12121001, 0x12121100, 0x12121101, 0x12121202, 0x10101011, 0x10101012,
    0x10101110, 0x10101111, 0x10101112, 0x10101211, 0x10111010, 0x10111011, 0x10111012, 0x10111110,
    0x10111111, 0x10111112, 0x10111211, 0x10111212, 0x10121011, 0x10121110, 0x10121111, 0x10121112,
    0x10121211, 0x11101010, 0x11101011, 0x11101012, 0x11101110, 0x11101111, 0x11101112, 0x11101210,
    0x11101211, 0x11111010, 0x11111011, 0x11111012, 0x11111110, 0x11111111, 0x11111112, 0x11111210,
    0x11111211, 0x11111212, 0x11121010, 0x11121011, 0x11121110, 0x11121111, 0x11121112, 0x11121210,
    0x11121211, 0x11121212, 0x12101011, 0x12101110, 0x12101111, 0x12101211, 0x12101212, 0x12111010,
    0x12111011, 0x12111110, 0x12111111, 0x12111112, 0x12111210, 0x12111211, 0x12121011, 0x12121110,
    0x12121111, 0x12121112, 0x12121211, 0x10101020, 0x10101021, 0x10101022, 0x10101120, 0x10101122,
    0x10101220, 0x10101221, 0x10111021, 0x10111120, 0x10111121, 0x10111220, 0x10111221, 0x10121020,
    0x10121021, 0x10121022, 0x10121120, 0x10121121, 0x10121122, 0x10121220, 0x10121221, 0x11101021,
    0x11101121, 0x11101122, 0x11101220, 0x11101221, 0x11101222, 0x11111020, 0x11111021, 0x11111022,
    0x11111120, 0x11111121, 0x11111122, 0x11111220, 0x11111221, 0x11111222, 0x11121021, 0x11121120,
    0x11121121, 0x11121221, 0x12101022, 0x12101121, 0x12101122, 0x12101220, 0x12101221, 0x12101222,
    0x12111021, 0x12111121, 0x12111222, 0x12121022, 0x12121121, 0x12121122, 0x12121220, 0x12121221,
    0x10102100, 0x10102101, 0x10102102, 0x10102201, 0x10112000, 0x10112101, 0x10112200, 0x10122001,
    0x10122202, 0x11102101, 0x11102200, 0x11102202, 0x11112001, 0x11112100, 0x11112101, 0x11112102,
    0x11112200, 0x11112201, 0x11122000, 0x11122002, 0x11122100, 0x11122101, 0x12102002, 0x12102201,
    0x12112000, 0x12112002, 0x12112101, 0x12112200, 0x12122001, 0x12122201, 0x10102011, 0x10102012,
    0x10102111, 0x10102212, 0x10112011, 0x10112110, 0x10112111, 0x10112112, 0x10112211, 0x10122111,
    0x11102011, 0x11102110, 0x11102111, 0x11102112, 0x11102211, 0x11112010, 0x11112011, 0x11112012,
    0x11112110, 0x11112111, 0x11112112, 0x11112210, 0x11112211, 0x11112212, 0x11122011, 0x11122110,
    0x11122111, 0x11122112, 0x11122211, 0x12102011, 0x12102111, 0x12102211, 0x12112011, 0x12112110,
    0x12112111, 0x12112112, 0x12112210, 0x12112211, 0x12122111, 0x10102120, 0x10102220, 0x10112121,
    0x10112222, 0x10122020, 0x10122121, 0x10122122, 0x10122221, 0x11102121, 0x11102220, 0x11102221,
    0x11112021, 0x11112121, 0x11112122, 0x11112220, 0x11112221, 0x11122022, 0x11122121, 0x11122220,
    0x11122222, 0x12102021, 0x12102222, 0x12112022, 0x12112121, 0x12112122, 0x12112220, 0x12112222,
    0x12122021, 0x10200101, 0x10210100, 0x10210102, 0x10210201, 0x10220101, 0x11200100, 0x11210000,
    0x11210101, 0x11210102, 0x11210200, 0x11210202, 0x11220001, 0x11220100, 0x11220102, 0x11220201,
    0x12200001, 0x12210102, 0x12220101, 0x10200011, 0x10200110, 0x10200112, 0x10200211, 0x10210012,
    0x10210111, 0x10220011, 0x10220012, 0x10220112, 0x10220211, 0x11200111, 0x11200211, 0x11210011,
    0x11210111, 0x11210112, 0x11210211, 0x11220111, 0x11220112, 0x11220212, 0x12200110, 0x12200212,
    0x12210012, 0x12210111, 0x12220011, 0x12220112, 0x12220211, 0x10210021, 0x10210122, 0x10210221,
    0x11200020, 0x11200021, 0x11200122, 0x11210121, 0x11210122, 0x11210220, 0x11220020, 0x12200121,
    0x12210021, 0x12210122, 0x12220121, 0x10211001, 0x10211002, 0x10211101, 0x10211102, 0x10211202,
    0x10221001, 0x10221102, 0x10221201, 0x11201000, 0x11201002, 0x11201101, 0x11201200, 0x11201202,
    0x11211001, 0x11211100, 0x11211101, 0x11211102, 0x11211201, 0x11211202, 0x11221000, 0x11221002,
    0x11221101, 0x12201100, 0x12201101, 0x12201201, 0x12211000, 0x12211002, 0x12211100, 0x12211101,
    0x12211102, 0x12211200, 0x12211202, 0x12221001, 0x12221100, 0x12221201, 0x10201111, 0x10201210,
    0x10201212, 0x10211011, 0x10211111, 0x10211112, 0x10211211, 0x11201110, 0x11201111, 0x11201112,
    0x11201211, 0x11211010, 0x11211011, 0x11211110, 0x11211111, 0x11211112, 0x11211211, 0x11221011,
    0x11221110, 0x11221111, 0x11221112, 0x11221211, 0x12201112, 0x12201211, 0x12201212, 0x12211011,
    0x12211111, 0x12211112, 0x12211211, 0x12211212, 0x12221012, 0x12221111, 0x12221112, 0x12221210,
    0x10201022, 0x10201221, 0x10211121, 0x10221020, 0x10221122, 0x10221220, 0x10221221, 0x11201020,
    0x11201121, 0x11201220, 0x11201222, 0x11211021, 0x11211120, 0x11211121, 0x11211122, 0x11211220,
    0x11211222, 0x11221020, 0x11221121, 0x11221220, 0x12201020, 0x12201022, 0x12201121, 0x12201222,
    0x12211120, 0x12211122, 0x12211220, 0x12211221, 0x12221020, 0x12221120, 0x12221122, 0x12221222,
    0x10212102, 0x10212201, 0x10222101, 0x11202001, 0x11212002, 0x11212101, 0x11212202, 0x11222001,
    0x11222201, 0x12202101, 0x12212001, 0x12212200, 0x12222102, 0x10202011, 0x10202110, 0x10212010,
    0x10212111, 0x10222011, 0x10222110, 0x10222112, 0x10222211, 0x11202010, 0x11202011, 0x11202111,
    0x11202112, 0x11202210, 0x11212011, 0x11212110, 0x11212111, 0x11212112, 0x11212211, 0x11222010,
    0x11222111, 0x11222212, 0x12202012, 0x12202110, 0x12202212, 0x12212111, 0x12222011, 0x12222110,
    0x12222111, 0x12222211, 0x10212021, 0x10212122, 0x10212220, 0x11202021, 0x11202120, 0x11202221,
    0x11212020, 0x11212121, 0x11212220, 0x11212222, 0x11222120, 0x11222121, 0x11222221, 0x12202122,
    0x12212120, 0x12212220, 0x12212222, 0x12222122, 0x20000000, 0x20000002, 0x20000200, 0x20000202,
    0x20020000, 0x20020002, 0x20020200, 0x20020202, 0x21000101, 0x21010000, 0x21010001, 0x21010100,
    0x21010102, 0x21010201, 0x21020101, 0x22000000, 0x22000002, 0x22000200, 0x22000202, 0x22010101,
    0x22020000, 0x22020002, 0x22020200, 0x22020202, 0x20000111, 0x20010011, 0x20010110, 0x20010112,
    0x20010211, 0x20020111, 0x21000011, 0x21000110, 0x21000211, 0x21010010, 0x21010012, 0x21010111,
    0x21010112, 0x21010210, 0x21010211, 0x21020110, 0x21020112, 0x21020211, 0x22000111, 0x22000211,
    0x22010110, 0x22010112, 0x22010211, 0x22020111, 0x20000020, 0x20000022, 0x20000220, 0x20000222,
    0x20010121, 0x20020020, 0x20020022, 0x20020220, 0x20020222, 0x21010021, 0x21010120, 0x21010221,
    0x21020121, 0x22000020, 0x22000022, 0x22000220, 0x22000222, 0x22010121, 0x22020020, 0x22020022,
    0x22020220, 0x22020222, 0x20011100, 0x20011201, 0x21001001, 0x21001100, 0x21011001, 0x21011101,
    0x21011202, 0x21021001, 0x21021100, 0x21021201, 0x22011100, 0x22011201, 0x20001011, 0x20001211,
    0x20011012, 0x20011111, 0x20011212, 0x20021112, 0x20021211, 0x21001010, 0x21001011, 0x21001111,
    0x21001210, 0x21011011, 0x21011110, 0x21011111, 0x21011112, 0x21011211, 0x21011212, 0x21021111,
    0x21021112, 0x21021210, 0x21021212, 0x22001011, 0x22001110, 0x22001112, 0x22001211, 0x22011010,
    0x22011012, 0x22011111, 0x22011210, 0x22021112, 0x20011021, 0x20011122, 0x20011221, 0x20021121,
    0x21001021, 0x21001120, 0x21001221, 0x21001222, 0x21011020, 0x21011121, 0x21011221, 0x21011222,
    0x21021021, 0x21021122, 0x21021222, 0x22001121, 0x22011021, 0x22011222, 0x22021120, 0x20002000,
    0x20002002, 0x20002200, 0x20002202, 0x20012101, 0x20022000, 0x20022002, 0x20022200, 0x20022202,
    0x21002001, 0x21002101, 0x21012001, 0x21012100, 0x21012201, 0x21022101, 0x21022201, 0x22002000,
    0x22002002, 0x22002200, 0x22002202, 0x22012101, 0x22022000, 0x22022002, 0x22022200, 0x22022202,
    0x20002111, 0x20002112, 0x20012011, 0x20012110, 0x20012112, 0x20022111, 0x21002011, 0x21002110,
    0x21002112, 0x21002211, 0x21012010, 0x21012012, 0x21012111, 0x21012212, 0x21022011, 0x21022110,
    0x22002111, 0x22012112, 0x22012211, 0x22022111, 0x20002020, 0x20002022, 0x20002220, 0x20002222,
    0x20012121, 0x20022020, 0x20022022, 0x20022220, 0x20022222, 0x21002121, 0x21012021, 0x21012120,
    0x21012122, 0x22002020, 0x22002022, 0x22002220, 0x22002222, 0x22012121, 0x22022020, 0x22022022,
    0x22022220, 0x22022222, 0x20100101, 0x20110001, 0x20110102, 0x20110200, 0x20110201, 0x20120101,
    0x21100001, 0x21100102, 0x21100201, 0x21110101, 0x21110200, 0x21110202, 0x21120201, 0x21120202,
    0x22100101, 0x22110001, 0x22110100, 0x22110102, 0x22110201, 0x22120101, 0x20100011, 0x20100110,
    0x20100112, 0x20100211, 0x20110010, 0x20110111, 0x20110210, 0x20110212, 0x20120011, 0x20120110,
    0x20120112, 0x20120211, 0x21100010, 0x21100111, 0x21110010, 0x21110011, 0x21110110, 0x21110111,
    0x21110112, 0x21110211, 0x21120012, 0x21120111, 0x22100110, 0x22100112, 0x22110012, 0x22110111,
    0x22110210, 0x22120011, 0x22120110, 0x22120112, 0x22120211, 0x20100121, 0x20110021, 0x20110120,
    0x20110221, 0x20120121, 0x21100120, 0x21100122, 0x21100221, 0x21110020, 0x21110022, 0x21110121,
    0x21110220, 0x21120122, 0x21120221, 0x22100121, 0x22110120, 0x22110122, 0x22120221, 0x20101001,
    0x20101100, 0x20101102, 0x20111000, 0x20111101, 0x20111200, 0x20121102, 0x21101000, 0x21101202,
    0x21111001, 0x21111100, 0x21111101, 0x21111102, 0x21111200, 0x21111201, 0x21121000, 0x21121001,
    0x21121002, 0x21121101, 0x22101100, 0x22101102, 0x22111002, 0x22111100, 0x22111101, 0x22111200,
    0x22121001, 0x22121201, 0x20101010, 0x20101111, 0x20101210, 0x20101212, 0x20111010, 0x20111011,
    0x20111110, 0x20111111, 0x20111112, 0x20111211, 0x20121011, 0x20121111, 0x20121211, 0x20121212,
    0x21101011, 0x21101110, 0x21101111, 0x21101112, 0x21101211, 0x21111010, 0x21111011, 0x21111012,
    0x21111110, 0x21111111, 0x21111112, 0x21111210, 0x21111211, 0x21111212, 0x21121011, 0x21121110,
    0x21121111, 0x21121112, 0x21121211, 0x22101011, 0x22101111, 0x22101210, 0x22111011, 0x22111012,
    0x22111110, 0x22111111, 0x22111112, 0x22111211, 0x22111212, 0x22121010, 0x22121012, 0x22121111,
    0x22121210, 0x22121212, 0x20101021, 0x20101120, 0x20111020, 0x20111121, 0x20111221, 0x20121020,
    0x20121122, 0x20121221, 0x21101121, 0x21101220, 0x21101221, 0x21111021, 0x21111022, 0x21111121,
    0x21111122, 0x21111221, 0x21121121, 0x21121220, 0x22101022, 0x22101120, 0x22101221, 0x22101222,
    0x22111022, 0x22111120, 0x22111121, 0x22121120, 0x22121122, 0x22121221, 0x20102101, 0x20112102,
    0x20112201, 0x20122101, 0x21102001, 0x21102102, 0x21112000, 0x21112002, 0x21112101, 0x21112102,
    0x21112202, 0x21122100, 0x21122101, 0x22102101, 0x22112001, 0x22112102, 0x22112201, 0x22122101,
    0x20102110, 0x20102112, 0x20102211, 0x20112010, 0x20112012, 0x20112111, 0x20112210, 0x20112212,
    0x20122010, 0x20122011, 0x20122110, 0x20122112, 0x21102010, 0x21102012, 0x21102111, 0x21102210,
    0x21102212, 0x21112011, 0x21112110, 0x21112111, 0x21112112, 0x21112211, 0x21122012, 0x21122111,
    0x21122112, 0x21122212, 0x22102011, 0x22102110, 0x22112010, 0x22112012, 0x22112111, 0x22112212,
    0x22122011, 0x22122112, 0x20102121, 0x20112121, 0x20122121, 0x21102120, 0x21102122, 0x21102221,
    0x21112020, 0x21112121, 0x21112220, 0x21122021, 0x22102121, 0x22112021, 0x22112120, 0x22112121,
    0x22112122, 0x20200000, 0x20200002, 0x20200200, 0x20200202, 0x20210101, 0x20220000, 0x20220002,
    0x20220200, 0x20220202, 0x21200101, 0x21210001, 0x21210100, 0x21210102, 0x21210201, 0x22200000,
    0x22200002, 0x22200200, 0x22200202, 0x22210101, 0x22220000, 0x22220002, 0x22220200, 0x22220202,
    0x20200111, 0x20200211, 0x20210011, 0x20210110, 0x20210112, 0x20210211, 0x20210212, 0x21200112,
    0x21200211, 0x21210011, 0x21210111, 0x21210210, 0x21210212, 0x21220011, 0x21220110, 0x22200111,
    0x22210010, 0x22210012, 0x22210112, 0x22210211, 0x20200022, 0x20200220, 0x20200222, 0x20210020,
    0x20210221, 0x20220022, 0x20220220, 0x20220222, 0x21200121, 0x21210021, 0x21210122, 0x21210221,
    0x21220121, 0x22200020, 0x22200022, 0x22200220, 0x22200222, 0x22210121, 0x22220020, 0x22220022,
    0x22220220, 0x22220222, 0x20211201, 0x20221101, 0x21201001, 0x21201100, 0x21211000, 0x21211100,
    0x21211101, 0x21211200, 0x21211202, 0x21221001, 0x21221101, 0x21221102, 0x21221200, 0x21221201,
    0x22201101, 0x20201112, 0x20201211, 0x20211010, 0x20211012, 0x20211111, 0x20211210, 0x20221112,
    0x20221211, 0x21201012, 0x21201111, 0x21211011, 0x21211110, 0x21211111, 0x21211112, 0x21211211,
    0x21221111, 0x21221212, 0x22201011, 0x22201110, 0x22201111, 0x22201112, 0x22201211, 0x22211012,
    0x22211111, 0x22211210, 0x20201121, 0x20211021, 0x20211122, 0x20211222, 0x20221021, 0x20221121,
    0x21201120, 0x21201122, 0x21201222, 0x21211022, 0x21211121, 0x21211122, 0x21211220, 0x21221020,
    0x21221022, 0x22201122, 0x22211020, 0x22211121, 0x22211122, 0x22211221, 0x22221021, 0x22221120,
    0x22221122, 0x20202000, 0x20202002, 0x20202200, 0x20202202, 0x20222000, 0x20222002, 0x20222200,
    0x20222202, 0x21212001, 0x21212100, 0x21212102, 0x21212201, 0x22202000, 0x22202002, 0x22202200,
    0x22202202, 0x22212101, 0x22222000, 0x22222002, 0x22222200, 0x22222202, 0x20202111, 0x20212110,
    0x20212211, 0x20222011, 0x20222111, 0x21202011, 0x21212010, 0x21212111, 0x21212212, 0x21222011,
    0x21222112, 0x21222211, 0x22212010, 0x22212112, 0x20202020, 0x20202022, 0x20202220, 0x20202222,
    0x20222020, 0x20222022, 0x20222220, 0x20222222, 0x21212021, 0x21212120, 0x21212122, 0x22202020,
    0x22202022, 0x22202220, 0x22202222, 0x22212121, 0x22222020, 0x22222022, 0x22222220, 0x22222222,
};
#endif

}

#ifdef __x86_64__

namespace {
template <int nrc_y>
void mul_mat_iq1_s_q8_K(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
    GGML_ASSERT(n%QK_K == 0);
    Q8<nrc_y, block_q8_K> q8(info);
    __m256i qx[8];
    __m256i scales[4];
    __m256  acc[nrc_y] = {};
    auto delta_mask = _mm_set1_epi16(-32768); // to avoid stupid overflow warnings when using 0x8000
    __m256i shuffle0 = _mm256_set_epi64x(0x0302030203020302, 0x0100010001000100, 0x0302030203020302, 0x0100010001000100);
    for (int ix = 0; ix < nrc_x; ++ix) {
        auto iq1s = (const block_iq1_s *)((const char *)vx + ix*bx);
        for (int ibl = 0; ibl < n/QK_K; ++ibl) {
            float d = GGML_FP16_TO_FP32(iq1s[ibl].d);
            auto qhb = _mm_loadu_si128((const __m128i *)iq1s[ibl].qh);
            auto scales128 = _mm_and_si128(_mm_srli_epi16(qhb, 12), _mm_set1_epi16(7));
            scales128 = _mm_add_epi16(_mm_slli_epi16(scales128, 1), _mm_set1_epi16(1));
#ifdef HAVE_FANCY_SIMD
            auto mask = _mm_cmpeq_epi16_mask(_mm_and_si128(qhb, delta_mask), delta_mask);
            auto deltas128 = _mm_mask_blend_epi16(mask, _mm_set1_epi16(-7), _mm_set1_epi16(-9));
#else
            auto mask = _mm_cmpeq_epi16(_mm_and_si128(qhb, delta_mask), delta_mask);
            auto deltas128 = _mm_or_si128(_mm_and_si128(mask, _mm_set1_epi16(-9)), _mm_andnot_si128(mask, _mm_set1_epi16(-7)));
#endif
            deltas128 = _mm_mullo_epi16(scales128, deltas128);
            scales128 = _mm_slli_epi16(scales128, 3);
            auto deltas_l = _mm_unpacklo_epi16(deltas128, deltas128);
            auto deltas_h = _mm_unpackhi_epi16(deltas128, deltas128);
            auto deltas = MM256_SET_M128I(deltas_h, deltas_l); // blocks 0,0, 1,1, 2,2, ..., 7,7
            auto all_scales = MM256_SET_M128I(scales128, scales128);
            auto shuffle = shuffle0;
            for (int ib64 = 0; ib64 < QK_K/64; ++ib64) {
                scales[ib64] = _mm256_shuffle_epi8(all_scales, shuffle);
                shuffle = _mm256_add_epi8(shuffle, _mm256_set1_epi8(4));
            }
            const uint8_t  * qs = iq1s[ibl].qs;
            const uint16_t * qh = iq1s[ibl].qh;
            for (int ib = 0; ib < QK_K/32; ib += 2) {
                qx[ib+0] = _mm256_set_epi64x(iq1s_grid_us[qs[3] | ((qh[ib+0] >> 1) & 0x700)], iq1s_grid_us[qs[2] | ((qh[ib+0] << 2) & 0x700)],
                                             iq1s_grid_us[qs[1] | ((qh[ib+0] << 5) & 0x700)], iq1s_grid_us[qs[0] | ((qh[ib+0] << 8) & 0x700)]);
                qx[ib+1] = _mm256_set_epi64x(iq1s_grid_us[qs[7] | ((qh[ib+1] >> 1) & 0x700)], iq1s_grid_us[qs[6] | ((qh[ib+1] << 2) & 0x700)],
                                             iq1s_grid_us[qs[5] | ((qh[ib+1] << 5) & 0x700)], iq1s_grid_us[qs[4] | ((qh[ib+1] << 8) & 0x700)]);
                qs += 8;
            }
            for (int iy = 0; iy < nrc_y; ++iy) {
                auto bsums = q8.load_bsums(iy, ibl);
                auto sumi = _mm256_setzero_si256();
                for (int ib64 = 0; ib64 < QK_K/64; ++ib64) {
                    auto qy1 = q8.load_quants(iy, ibl, 2*ib64+0);
                    auto qy2 = q8.load_quants(iy, ibl, 2*ib64+1);
#ifdef HAVE_FANCY_SIMD
                    auto dot1 = _mm256_dpbusd_epi32(_mm256_setzero_si256(), qx[2*ib64+0], qy1);
                    auto dot2 = _mm256_dpbusd_epi32(_mm256_setzero_si256(), qx[2*ib64+1], qy2);
                    sumi = _mm256_dpwssd_epi32(sumi, scales[ib64], _mm256_packs_epi32(dot1, dot2));
#else
                    auto dot1 = _mm256_maddubs_epi16(qx[2*ib64+0], qy1);
                    auto dot2 = _mm256_maddubs_epi16(qx[2*ib64+1], qy2);
                    auto dot  = _mm256_add_epi16(_mm256_unpacklo_epi64(dot1, dot2), _mm256_unpackhi_epi64(dot1, dot2));
                    sumi = _mm256_add_epi32(sumi, _mm256_madd_epi16(scales[ib64], dot));
#endif
                }
#ifdef HAVE_FANCY_SIMD
                sumi = _mm256_dpwssd_epi32(sumi, bsums, deltas);
#else
                sumi = _mm256_add_epi32(sumi, _mm256_madd_epi16(bsums, deltas));
#endif
                acc[iy] = _mm256_fmadd_ps(_mm256_set1_ps(d*q8.scale(iy, ibl)), _mm256_cvtepi32_ps(sumi), acc[iy]);
            }
        }
        for (int iy = 0; iy < nrc_y; ++iy) {
            info.store(ix, iy, 0.125f*hsum_float_8(acc[iy]));
            acc[iy] = _mm256_setzero_ps();
        }
    }
}

template <int nrc_y>
void mul_mat_iq1_m_q8_K(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
    GGML_ASSERT(n%QK_K == 0);
    Q8<nrc_y, block_q8_K> q8(info);
    __m256i qx[8];
    __m256  acc[nrc_y] = {};
    auto scale_shuffle = _mm256_set_epi64x(0x0706070607060706, 0x0504050405040504, 0x0302030203020302, 0x0100010001000100);
    auto delta_mask = _mm256_set_epi64x(0x8000, 0x0800, 0x0080, 0x0008);
    iq1m_scale_t scale;
    union { __m256i vec; int16_t val[16]; } helper;
    for (int ix = 0; ix < nrc_x; ++ix) {
        auto iq1m = (const block_iq1_m *)((const char *)vx + ix*bx);
        for (int ibl = 0; ibl < n/QK_K; ++ibl) {
            const uint16_t * sc = (const uint16_t *)iq1m[ibl].scales; // 4 x uint16_t, each containing 4 scales
            scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000);
            float d = GGML_FP16_TO_FP32(scale.f16);
            auto qs = iq1m[ibl].qs;
            auto qh = iq1m[ibl].qh;
            auto aux = _mm_loadl_epi64((const __m128i *)iq1m[ibl].scales);
            auto sc16 = _mm256_shuffle_epi8(MM256_SET_M128I(aux, aux), scale_shuffle);
            sc16 = _mm256_and_si256(sc16, _mm256_set1_epi64x(0x0e0001c000380007));
            sc16 = _mm256_mullo_epi16(sc16, _mm256_set1_epi64x(0x0001000800400200));
            helper.vec = _mm256_add_epi8(_mm256_srli_epi16(sc16, 8), _mm256_set1_epi16(1));
            for (int ib64 = 0; ib64 < QK_K/64; ++ib64) {
                qx[2*ib64+0] = _mm256_set_epi64x(iq1s_grid_us[qs[3] | (((uint16_t)qh[1] << 4) & 0x700)], iq1s_grid_us[qs[2] | (((uint16_t)qh[1] << 8) & 0x700)],
                                                 iq1s_grid_us[qs[1] | (((uint16_t)qh[0] << 4) & 0x700)], iq1s_grid_us[qs[0] | (((uint16_t)qh[0] << 8) & 0x700)]);
                qx[2*ib64+1] = _mm256_set_epi64x(iq1s_grid_us[qs[7] | (((uint16_t)qh[3] << 4) & 0x700)], iq1s_grid_us[qs[6] | (((uint16_t)qh[3] << 8) & 0x700)],
                                                 iq1s_grid_us[qs[5] | (((uint16_t)qh[2] << 4) & 0x700)], iq1s_grid_us[qs[4] | (((uint16_t)qh[2] << 8) & 0x700)]);
                //auto delta1 = _mm256_set_epi64x(qh[1] & 0x80 ? 0x0909090909090909 : 0x0707070707070707,
                //                                qh[1] & 0x08 ? 0x0909090909090909 : 0x0707070707070707,
                //                                qh[0] & 0x80 ? 0x0909090909090909 : 0x0707070707070707,
                //                                qh[0] & 0x08 ? 0x0909090909090909 : 0x0707070707070707);
                //auto delta2 = _mm256_set_epi64x(qh[3] & 0x80 ? 0x0909090909090909 : 0x0707070707070707,
                //                                qh[3] & 0x08 ? 0x0909090909090909 : 0x0707070707070707,
                //                                qh[2] & 0x80 ? 0x0909090909090909 : 0x0707070707070707,
                //                                qh[2] & 0x08 ? 0x0909090909090909 : 0x0707070707070707);
                auto qh16 = (const uint16_t *)qh;
                auto delta1 = _mm256_cmpeq_epi64(_mm256_and_si256(_mm256_set1_epi64x(qh16[0]), delta_mask), delta_mask);
                auto delta2 = _mm256_cmpeq_epi64(_mm256_and_si256(_mm256_set1_epi64x(qh16[1]), delta_mask), delta_mask);
                delta1 = _mm256_sub_epi8(_mm256_set1_epi8(8), _mm256_or_si256(delta1, _mm256_set1_epi8(1)));
                delta2 = _mm256_sub_epi8(_mm256_set1_epi8(8), _mm256_or_si256(delta2, _mm256_set1_epi8(1)));
                qx[2*ib64+0] = _mm256_sub_epi8(_mm256_slli_epi16(qx[2*ib64+0], 3), delta1);
                qx[2*ib64+1] = _mm256_sub_epi8(_mm256_slli_epi16(qx[2*ib64+1], 3), delta2);
                qs += 8;
                qh += 4;
            }
            for (int iy = 0; iy < nrc_y; ++iy) {
                auto sumi = _mm256_setzero_si256();
                for (int j = 0; j < 8; ++j) {
                    auto p = _mm256_maddubs_epi16(_mm256_sign_epi8(qx[j], qx[j]), _mm256_sign_epi8(q8.load_quants(iy, ibl, j), qx[j]));
                    sumi = _mm256_add_epi32(sumi, _mm256_madd_epi16(p, MM256_SET_M128I(_mm_set1_epi16(helper.val[2*j+1]), _mm_set1_epi16(helper.val[2*j+0]))));
                }
                acc[iy] = _mm256_fmadd_ps(_mm256_set1_ps(d*q8.scale(iy, ibl)), _mm256_cvtepi32_ps(sumi), acc[iy]);
            }
        }
        for (int iy = 0; iy < nrc_y; ++iy) {
            info.store(ix, iy, 0.125f*hsum_float_8(acc[iy]));
            acc[iy] = _mm256_setzero_ps();
        }
    }
}

template <int nrc_y>
void mul_mat_iq1_s_q8_2_x4(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
    GGML_ASSERT(n%QK_K == 0);
    Q8<nrc_y, block_q8_2_x4> q8(info);
    __m256i qx[4];
    __m256  scales[2];
    __m256  acc[nrc_y] = {};
    auto delta_mask = _mm_set1_epi16(-32768); // to avoid stupid overflow warnings when using 0x8000
    for (int ix = 0; ix < nrc_x; ++ix) {
        auto iq1s = (const block_iq1_s *)((const char *)vx + ix*bx);
        for (int ibl = 0; ibl < n/QK_K; ++ibl) {
            float d = GGML_FP16_TO_FP32(iq1s[ibl].d);
            auto qhb = _mm_loadu_si128((const __m128i *)iq1s[ibl].qh);
            auto scales128 = _mm_and_si128(_mm_srli_epi16(qhb, 12), _mm_set1_epi16(7));
            scales128 = _mm_add_epi16(_mm_slli_epi16(scales128, 1), _mm_set1_epi16(1));
            auto all_scales = _mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(scales128)));
#ifdef HAVE_FANCY_SIMD
            auto mask = _mm_cmpeq_epi16_mask(_mm_and_si128(qhb, delta_mask), delta_mask);
            auto deltas128 = _mm_mask_blend_epi16(mask, _mm_set1_epi16(-7), _mm_set1_epi16(-9));
#else
            auto mask = _mm_cmpeq_epi16(_mm_and_si128(qhb, delta_mask), delta_mask);
            auto deltas128 = _mm_or_si128(_mm_and_si128(mask, _mm_set1_epi16(-9)), _mm_andnot_si128(mask, _mm_set1_epi16(-7)));
#endif
            auto deltas = _mm256_mul_ps(all_scales, _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(deltas128)));
            for (int iy = 0; iy < nrc_y; ++iy) {
                auto my1 = _mm_cvtepu16_epi32(_mm_loadl_epi64((const __m128i *)(q8.y[iy][2*ibl+0].d + 4)));
                auto my2 = _mm_cvtepu16_epi32(_mm_loadl_epi64((const __m128i *)(q8.y[iy][2*ibl+1].d + 4)));
                auto my  = _mm256_castsi256_ps(_mm256_slli_epi32(MM256_SET_M128I(my2, my1), 16));
                acc[iy]  = _mm256_fmadd_ps(deltas, my, acc[iy]);
            }
            all_scales = _mm256_mul_ps(_mm256_set1_ps(8.f), all_scales);
            auto scales_l = _mm256_castps256_ps128(all_scales);
            auto scales_h = _mm256_extractf128_ps(all_scales, 1);
            scales[0] = _mm256_set_m128(scales_l, scales_l);
            scales[1] = _mm256_set_m128(scales_h, scales_h);
            const uint8_t  * qs = iq1s[ibl].qs;
            const uint16_t * qh = iq1s[ibl].qh;
            for (int i128 = 0; i128 < QK_K/128; ++i128) {
                qx[0] = _mm256_set_epi64x(iq1s_grid_us[qs[3] | ((qh[0] >> 1) & 0x700)], iq1s_grid_us[qs[2] | ((qh[0] << 2) & 0x700)],
                                          iq1s_grid_us[qs[1] | ((qh[0] << 5) & 0x700)], iq1s_grid_us[qs[0] | ((qh[0] << 8) & 0x700)]);
                qx[1] = _mm256_set_epi64x(iq1s_grid_us[qs[7] | ((qh[1] >> 1) & 0x700)], iq1s_grid_us[qs[6] | ((qh[1] << 2) & 0x700)],
                                          iq1s_grid_us[qs[5] | ((qh[1] << 5) & 0x700)], iq1s_grid_us[qs[4] | ((qh[1] << 8) & 0x700)]);
                qs += 8;
                qx[2] = _mm256_set_epi64x(iq1s_grid_us[qs[3] | ((qh[2] >> 1) & 0x700)], iq1s_grid_us[qs[2] | ((qh[2] << 2) & 0x700)],
                                          iq1s_grid_us[qs[1] | ((qh[2] << 5) & 0x700)], iq1s_grid_us[qs[0] | ((qh[2] << 8) & 0x700)]);
                qx[3] = _mm256_set_epi64x(iq1s_grid_us[qs[7] | ((qh[3] >> 1) & 0x700)], iq1s_grid_us[qs[6] | ((qh[3] << 2) & 0x700)],
                                          iq1s_grid_us[qs[5] | ((qh[3] << 5) & 0x700)], iq1s_grid_us[qs[4] | ((qh[3] << 8) & 0x700)]);
                qs += 8; qh += 4;
                for (int iy = 0; iy < nrc_y; ++iy) {
                    auto& ybl = q8.y[iy][2*ibl+i128];
                    auto sumi1 = _mm256_maddubs_epi16(qx[0], _mm256_loadu_si256((const __m256i *)ybl.qs+0));
                    auto sumi2 = _mm256_maddubs_epi16(qx[1], _mm256_loadu_si256((const __m256i *)ybl.qs+1));
                    auto sumi3 = _mm256_maddubs_epi16(qx[2], _mm256_loadu_si256((const __m256i *)ybl.qs+2));
                    auto sumi4 = _mm256_maddubs_epi16(qx[3], _mm256_loadu_si256((const __m256i *)ybl.qs+3));
                    // 0,0,1,1, 0,0,1,1, 0,0,1,1, 0,0,1,1 as int16_t
                    sumi1 = _mm256_add_epi16(_mm256_unpacklo_epi32(sumi1, sumi2), _mm256_unpackhi_epi32(sumi1, sumi2));
                    // 2,2,3,3, 2,2,3,3, 2,2,3,3, 2,2,3,3 as int16_t
                    sumi3 = _mm256_add_epi16(_mm256_unpacklo_epi32(sumi3, sumi4), _mm256_unpackhi_epi32(sumi3, sumi4));
                    sumi1 = _mm256_add_epi16(_mm256_unpacklo_epi64(sumi1, sumi3), _mm256_unpackhi_epi64(sumi1, sumi3));
                    // 0, 1, 2, 3, 0, 1, 2, 3 as int322_t
                    sumi1 = _mm256_madd_epi16(_mm256_set1_epi16(1), sumi1);
                    auto d4 = _mm_castsi128_ps(_mm_slli_epi32(_mm_cvtepu16_epi32(_mm_loadl_epi64((const __m128i *)ybl.d)), 16));
                    auto dy = _mm256_set_m128(d4, d4);
                    acc[iy] = _mm256_fmadd_ps(_mm256_mul_ps(scales[i128], dy), _mm256_cvtepi32_ps(sumi1), acc[iy]);
                }
            }
        }
        for (int iy = 0; iy < nrc_y; ++iy) {
            info.store(ix, iy, 0.125f*hsum_float_8(acc[iy]));
            acc[iy] = _mm256_setzero_ps();
        }
    }
}

template <int nrc_y>
static void mul_mat_iq1_s_r4_q8_1(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
    GGML_ASSERT(nrc_x%4 == 0);
    Q8<nrc_y, block_q8_K128> q8(info);
    int nb = n / 32;
    GGML_ASSERT(nb%4 == 0);
    __m256i qx[4];
    __m256  acc[nrc_y] = {};
    auto m1 = _mm256_set1_epi16(1);
    auto ms = _mm_set1_epi16(-32768);
    float d8[4*nrc_y];
    union { __m256i vec; uint16_t val[16]; } helper;
    struct aux_iq1_s_r4 {
        uint8_t  qs[16];
        uint64_t qh;
    };
    for (int ix = 0; ix < nrc_x; ix += 4) {
        auto dptr = (const ggml_half *)((const char *)vx + ix*bx);
        auto d1 = _mm_cvtph_ps(_mm_loadl_epi64((const __m128i *)dptr));
        auto x = (const aux_iq1_s_r4 *)(dptr + 4);
        for (int ib = 0; ib < nb/4; ++ib) {
            for (int iy = 0; iy < nrc_y; ++iy) {
                auto bsums = _mm_cvtepi16_epi32(_mm_loadl_epi64((const __m128i *)q8.y[iy][ib].bsums));
                _mm_storeu_ps(d8 + 4*iy, _mm_mul_ps(_mm_set1_ps(q8.y[iy][ib].d), _mm_cvtepi32_ps(bsums)));
            }
            for (int k = 0; k < 4; ++k) {
                auto idxh = _mm256_set1_epi64x(x[4*ib+k].qh);
                auto sas = _mm256_castsi256_si128(idxh);
                auto scales4 = _mm_and_si128(_mm_srli_epi16(sas, 12), _mm_set1_epi16(7));
                scales4 = _mm_or_si128(_mm_slli_epi16(scales4, 1), _mm_set1_epi16(1));
                auto signs = _mm_or_si128(_mm_cmpeq_epi16(_mm_and_si128(sas, ms), ms), _mm256_castsi256_si128(m1));
                signs = _mm_add_epi16(_mm_set1_epi16(-8), signs);
                signs = _mm_mullo_epi16(signs, scales4);
                auto delta4 = _mm_mul_ps(_mm_set1_ps(0.0625f), _mm_cvtepi32_ps(_mm_cvtepi16_epi32(signs)));
                auto delta = _mm256_set_m128(delta4, delta4);
                scales4 = _mm_unpacklo_epi16(scales4, scales4); // 0,0, 1,1, 2,2, 3,3
                auto scales = MM256_SET_M128I(scales4, scales4);
                auto idxl = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i *)x[4*ib+k].qs));
                idxh = _mm256_sllv_epi64(idxh, _mm256_set_epi64x(0, 2, 5, 8));
                idxh = _mm256_srlv_epi64(idxh, _mm256_set_epi64x(1, 0, 0, 0));
                helper.vec = _mm256_or_si256(idxl, _mm256_and_si256(_mm256_set1_epi16(0x0700), idxh));
                qx[0] = _mm256_set_epi64x(iq1s_grid_us[helper.val[ 9]], iq1s_grid_us[helper.val[ 8]],
                                          iq1s_grid_us[helper.val[ 1]], iq1s_grid_us[helper.val[ 0]]);
                qx[1] = _mm256_set_epi64x(iq1s_grid_us[helper.val[13]], iq1s_grid_us[helper.val[12]],
                                          iq1s_grid_us[helper.val[ 5]], iq1s_grid_us[helper.val[ 4]]);
                qx[2] = _mm256_set_epi64x(iq1s_grid_us[helper.val[11]], iq1s_grid_us[helper.val[10]],
                                          iq1s_grid_us[helper.val[ 3]], iq1s_grid_us[helper.val[ 2]]);
                qx[3] = _mm256_set_epi64x(iq1s_grid_us[helper.val[15]], iq1s_grid_us[helper.val[14]],
                                          iq1s_grid_us[helper.val[ 7]], iq1s_grid_us[helper.val[ 6]]);
                for (int iy = 0; iy < nrc_y; ++iy) {
                    auto y = _mm256_loadu_si256((const __m256i *)q8.y[iy][ib].qs + k);
#ifdef HAVE_FANCY_SIMD
                    // 0,0, 1,1, 0,0, 1,1 as int32_t
                    auto sumi1 = _mm256_dpbusd_epi32(_mm256_dpbusd_epi32(_mm256_setzero_si256(),
                                qx[0], _mm256_shuffle_epi32(y, 0x44)), qx[1], _mm256_shuffle_epi32(y, 0xee));
                    // 2,2, 3,3, 2,2, 3,3 as int32_t
                    auto sumi2 = _mm256_dpbusd_epi32(_mm256_dpbusd_epi32(_mm256_setzero_si256(),
                                qx[2], _mm256_shuffle_epi32(y, 0x44)), qx[3], _mm256_shuffle_epi32(y, 0xee));
                    auto sumi = _mm256_packs_epi32(sumi1, sumi2);
#else
                    // 4 x row 0, 4 x row 1, 4 x row 0, 4 x row 1
                    auto sumi1 = _mm256_add_epi16(_mm256_maddubs_epi16(qx[0], _mm256_shuffle_epi32(y, 0x44)),
                                                  _mm256_maddubs_epi16(qx[1], _mm256_shuffle_epi32(y, 0xee)));
                    // 4 x row 2, 4 x row 3, 4 x row 2, 4 x row 3
                    auto sumi2 = _mm256_add_epi16(_mm256_maddubs_epi16(qx[2], _mm256_shuffle_epi32(y, 0x44)),
                                                  _mm256_maddubs_epi16(qx[3], _mm256_shuffle_epi32(y, 0xee)));
                    // 0,0, 1,1, 0,0, 1,1  as int32_t
                    sumi1 = _mm256_madd_epi16(m1, sumi1);
                    // 2,2, 3,3, 2,2, 3,3  as int32_t
                    sumi2 = _mm256_madd_epi16(m1, sumi2);
                    // 0,0, 1,1, 2,2, 3,3, 0,0, 1,1, 2,2, 3,3 as int16_t
                    auto sumi = _mm256_packs_epi32(sumi1, sumi2);
#endif
                    sumi = _mm256_madd_epi16(scales, sumi);
                    acc[iy] = _mm256_fmadd_ps(_mm256_set1_ps(q8.y[iy][ib].d), _mm256_cvtepi32_ps(sumi), acc[iy]);
                    acc[iy] = _mm256_fmadd_ps(_mm256_set1_ps(d8[4*iy+k]), delta, acc[iy]);
                }
            }
        }
        for (int iy = 0; iy < nrc_y; ++iy) {
            auto sumf = _mm_add_ps(_mm256_castps256_ps128(acc[iy]), _mm256_extractf128_ps(acc[iy], 1));
            info.store(ix, iy, _mm_mul_ps(d1, sumf));
            acc[iy] = _mm256_setzero_ps();
        }
    }
}

template <int nrc_y>
static void mul_mat_iq1_m_r4_q8_0(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
    GGML_ASSERT(nrc_x%4 == 0);
    Q8<nrc_y, block_q8_K128> q8(info);
    int nb = n / 32;
    GGML_ASSERT(nb%4 == 0);
    auto shuffle0 = _mm256_set_epi64x(0x0909090909090909, 0x0808080808080808, 0x0101010101010101, 0x0000000000000000);
    auto step = _mm256_set1_epi8(2);
#ifndef HAVE_FANCY_SIMD
    auto m1 = _mm256_set1_epi16(1);
#endif
    __m256i qx[4];
    __m256  acc[nrc_y] = {};
    __m256i isum[nrc_y] = {};
    auto ms = _mm_set1_epi8(0x08);
    union { __m256i vec; uint16_t val[16]; } helper;
    for (int ix= 0; ix < nrc_x; ix += 4) {
        auto dptr = (const ggml_half *)((const char *)vx + ix*bx);
        auto d1 = _mm_mul_ps(_mm_set1_ps(0.125f), _mm_cvtph_ps(_mm_loadl_epi64((const __m128i *)dptr)));
        auto x = (const block_iq1_m_r4 *)(dptr + 4);
        for (int ib = 0; ib < nb/4; ++ib) {
            for (int k = 0; k < 4; ++k) {
                auto qh = (const uint32_t *)x[4*ib+k].qh;
                auto idxh = _mm_set_epi32(qh[1] >> 4, qh[1], qh[0] >> 4, qh[0]);
                auto scales4 = _mm_set1_epi32(((const uint32_t *)x[4*ib+k].scales)[0]);
                scales4 = _mm_and_si128(_mm_srlv_epi32(scales4, _mm_set_epi32(4, 0, 4, 0)), _mm_set1_epi8(0xf));
                scales4 = _mm_cvtepu8_epi16(scales4);
                auto scales = MM256_SET_M128I(_mm_unpackhi_epi16(scales4, scales4), _mm_unpacklo_epi16(scales4, scales4));

                auto signs128 = _mm_or_si128(_mm_cmpeq_epi8(_mm_and_si128(idxh, ms), ms), _mm_set1_epi8(1));
                signs128 = _mm_add_epi8(_mm_set1_epi8(-8), signs128);
                auto signs = MM256_SET_M128I(signs128, signs128);
                auto idxl = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i *)x[4*ib+k].qs));
                idxh = _mm_and_si128(idxh, _mm_set1_epi8(0x07));
                helper.vec = _mm256_or_si256(idxl, _mm256_slli_epi16(_mm256_cvtepu8_epi16(idxh), 8));
                qx[0] = _mm256_set_epi64x(iq1s_grid_us[helper.val[ 9]], iq1s_grid_us[helper.val[ 8]],
                                          iq1s_grid_us[helper.val[ 1]], iq1s_grid_us[helper.val[ 0]]);
                qx[1] = _mm256_set_epi64x(iq1s_grid_us[helper.val[13]], iq1s_grid_us[helper.val[12]],
                                          iq1s_grid_us[helper.val[ 5]], iq1s_grid_us[helper.val[ 4]]);
                qx[2] = _mm256_set_epi64x(iq1s_grid_us[helper.val[11]], iq1s_grid_us[helper.val[10]],
                                          iq1s_grid_us[helper.val[ 3]], iq1s_grid_us[helper.val[ 2]]);
                qx[3] = _mm256_set_epi64x(iq1s_grid_us[helper.val[15]], iq1s_grid_us[helper.val[14]],
                                          iq1s_grid_us[helper.val[ 7]], iq1s_grid_us[helper.val[ 6]]);
                qx[0] = _mm256_add_epi8(_mm256_slli_epi16(qx[0], 3), _mm256_shuffle_epi8(signs, shuffle0));
                auto shuffle = _mm256_add_epi8(shuffle0, step);
                qx[2] = _mm256_add_epi8(_mm256_slli_epi16(qx[2], 3), _mm256_shuffle_epi8(signs, shuffle));
                shuffle = _mm256_add_epi8(shuffle, step);
                qx[1] = _mm256_add_epi8(_mm256_slli_epi16(qx[1], 3), _mm256_shuffle_epi8(signs, shuffle));
                shuffle = _mm256_add_epi8(shuffle, step);
                qx[3] = _mm256_add_epi8(_mm256_slli_epi16(qx[3], 3), _mm256_shuffle_epi8(signs, shuffle));
                auto s0 = _mm256_sign_epi8(qx[0], qx[0]);
                auto s1 = _mm256_sign_epi8(qx[1], qx[1]);
                auto s2 = _mm256_sign_epi8(qx[2], qx[2]);
                auto s3 = _mm256_sign_epi8(qx[3], qx[3]);
                for (int iy = 0; iy < nrc_y; ++iy) {
                    auto y = _mm256_loadu_si256((const __m256i *)q8.y[iy][ib].qs + k);
                    auto y1 = _mm256_shuffle_epi32(y, 0x44);
                    auto y2 = _mm256_shuffle_epi32(y, 0xee);
#ifdef HAVE_FANCY_SIMD
                    // 0,0, 1,1, 0,0, 1,1 as int32_t
                    auto sumi1 = _mm256_dpbusd_epi32(_mm256_dpbusd_epi32(_mm256_setzero_si256(),
                                s0, _mm256_sign_epi8(y1, qx[0])), s1, _mm256_sign_epi8(y2, qx[1]));
                    // 2,2, 3,3, 2,2, 3,3 as int32_t
                    auto sumi2 = _mm256_dpbusd_epi32(_mm256_dpbusd_epi32(_mm256_setzero_si256(),
                                s2, _mm256_sign_epi8(y1, qx[2])), s3, _mm256_sign_epi8(y2, qx[3]));
                    auto sumi = _mm256_packs_epi32(sumi1, sumi2);
#else
                    // 4 x row 0, 4 x row 1, 4 x row 0, 4 x row 1
                    auto sumi1 = _mm256_add_epi16(_mm256_maddubs_epi16(s0, _mm256_sign_epi8(y1, qx[0])),
                                                  _mm256_maddubs_epi16(s1, _mm256_sign_epi8(y2, qx[1])));
                    // 4 x row 2, 4 x row 3, 4 x row 2, 4 x row 3
                    auto sumi2 = _mm256_add_epi16(_mm256_maddubs_epi16(s2, _mm256_sign_epi8(y1, qx[2])),
                                                  _mm256_maddubs_epi16(s3, _mm256_sign_epi8(y2, qx[3])));
                    // 0,0, 1,1, 0,0, 1,1  as int32_t
                    sumi1 = _mm256_madd_epi16(m1, sumi1);
                    // 2,2, 3,3, 2,2, 3,3  as int32_t
                    sumi2 = _mm256_madd_epi16(m1, sumi2);
                    // 0,0, 1,1, 2,2, 3,3, 0,0, 1,1, 2,2, 3,3 as int16_t
                    auto sumi = _mm256_packs_epi32(sumi1, sumi2);
#endif
                    isum[iy] = _mm256_add_epi32(isum[iy], _mm256_madd_epi16(scales, sumi));
                }
            }
            for (int iy = 0; iy < nrc_y; ++iy) {
                acc[iy] = _mm256_fmadd_ps(_mm256_set1_ps(q8.y[iy][ib].d), _mm256_cvtepi32_ps(isum[iy]), acc[iy]);
                isum[iy] = _mm256_setzero_si256();
            }
        }
        for (int iy = 0; iy < nrc_y; ++iy) {
            auto sumf = _mm_add_ps(_mm256_castps256_ps128(acc[iy]), _mm256_extractf128_ps(acc[iy], 1));
            info.store(ix, iy, _mm_mul_ps(d1, sumf));
            acc[iy] = _mm256_setzero_ps();
        }
    }
}

template <int nrc> struct Q8_K64 {

    constexpr static int nrc_y = nrc;

    Q8_K64(const DataInfo& info) {
        for (int iy = 0; iy < nrc_y; ++iy) {
            const float * dptr = (const float *)info.src1_row(iy);
            std::memcpy(d + 8*iy, dptr, 8*sizeof(float));
            y[iy] = (const int8_t *)(dptr + 8);
        }
    }

    inline __m256i load_quants(int iy, int i, int j) const { return _mm256_loadu_si256((const __m256i*)y[iy] + 4*i + j); }
    inline __m128  scale(int iy) const { return _mm_loadu_ps(d + 8*iy); }
    inline __m128  minus(int iy) const { return _mm_loadu_ps(d + 8*iy + 4); }

    float d[8*nrc_y];
    const int8_t * y[nrc_y];
};

struct DequantizerIQ1BN {
    const __m256i m1_8   = _mm256_set1_epi8(1);
    static __m256i load_shuffle(int i) {
        static const uint8_t data[128] = {
            0, 255, 0, 255, 0, 255, 0, 255, 0, 255,  1, 255,  1, 255,  1, 255,  1, 255,  1, 255,  2, 255,  2, 255,  2, 255,  2, 255,  2, 255, 12, 255,
            3, 255, 3, 255, 3, 255, 3, 255, 3, 255,  4, 255,  4, 255,  4, 255,  4, 255,  4, 255,  5, 255,  5, 255,  5, 255,  5, 255,  5, 255, 12, 255,
            6, 255, 6, 255, 6, 255, 6, 255, 6, 255,  7, 255,  7, 255,  7, 255,  7, 255,  7, 255,  8, 255,  8, 255,  8, 255,  8, 255,  8, 255, 12, 255,
            9, 255, 9, 255, 9, 255, 9, 255, 9, 255, 10, 255, 10, 255, 10, 255, 10, 255, 10, 255, 11, 255, 11, 255, 11, 255, 11, 255, 11, 255, 12, 255,
        };
        return _mm256_loadu_si256((const __m256i*)data + i);
    }
    const __m256i shuff[4] = { load_shuffle(0), load_shuffle(1), load_shuffle(2), load_shuffle(3) };
    const __m256i mult[4]  = {
            _mm256_set_epi64x(0x5100010003000900, 0x1b00510001000300, 0x09001b0051000100, 0x030009001b005100),
            _mm256_set_epi64x(0x1b00010003000900, 0x1b00510001000300, 0x09001b0051000100, 0x030009001b005100),
            _mm256_set_epi64x(0x0900010003000900, 0x1b00510001000300, 0x09001b0051000100, 0x030009001b005100),
            _mm256_set_epi64x(0x0300010003000900, 0x1b00510001000300, 0x09001b0051000100, 0x030009001b005100),
    };
    const __m256i m3 = _mm256_set1_epi16(3);
#if defined HAVE_FANCY_SIMD && defined __AVX512VBMI__
    const __m256i bmask = _mm256_set_epi8(62, 60, 58, 56, 54, 52, 50, 48, 46, 44, 42, 40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0);
#endif

    IQK_ALWAYS_INLINE void prepare_iq1bn_quants(const block_iq1_bn * x, __m256i& v1, __m256i& v2) const {
        auto data128 = _mm_loadu_si128((const __m128i *)x);  // Note: we load 16 instead of 13 bytes!
        auto data = MM256_SET_M128I(data128, data128);
        auto val1 = _mm256_mulhi_epu16(_mm256_mullo_epi16(_mm256_shuffle_epi8(data, shuff[0]), mult[0]), m3);
        auto val2 = _mm256_mulhi_epu16(_mm256_mullo_epi16(_mm256_shuffle_epi8(data, shuff[1]), mult[1]), m3);
        auto val3 = _mm256_mulhi_epu16(_mm256_mullo_epi16(_mm256_shuffle_epi8(data, shuff[2]), mult[2]), m3);
        auto val4 = _mm256_mulhi_epu16(_mm256_mullo_epi16(_mm256_shuffle_epi8(data, shuff[3]), mult[3]), m3);
#if defined HAVE_FANCY_SIMD && defined __AVX512VBMI__
        v1 = _mm256_permutex2var_epi8(val1, bmask, val2);
        v2 = _mm256_permutex2var_epi8(val3, bmask, val4);
#else
        v1 = _mm256_permute4x64_epi64(_mm256_packs_epi16(val1, val2), 216);
        v2 = _mm256_permute4x64_epi64(_mm256_packs_epi16(val3, val4), 216);
#endif
    }

};

template <int nrc_y>
IQK_NOINLINE void mul_mat_iq1bn_q8_K64(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
    const int nb = n / QK_IQ1BN;
    Q8_K64<nrc_y> q8(info);
    DequantizerIQ1BN deq;
    __m256i accd[nrc_y];
    __m256i val[4];

#ifndef HAVE_FANCY_SIMD
    const auto m1_16  = _mm256_set1_epi16(1);
#endif

    const block_iq1_bn * x;
    const char * cx0 = (const char *)vx;
    float scale;
    ggml_half d16;

    for (int ix = 0; ix < nrc_x; ++ix) {

        const char * cx = cx0 + ix*bx;
        std::memcpy(&d16, cx, sizeof(d16));
        scale = GGML_FP16_TO_FP32(d16);
        cx += sizeof(d16);
        x = (const block_iq1_bn *)cx;

        if constexpr (nrc_y == 1) {
            __m256i acc1 = _mm256_setzero_si256(), acc2 = _mm256_setzero_si256();
            for (int i = 0; i < nb/2; ++i) {
                deq.prepare_iq1bn_quants(x + 2*i + 0, val[0], val[1]);
                deq.prepare_iq1bn_quants(x + 2*i + 1, val[2], val[3]);
#ifdef HAVE_FANCY_SIMD
                acc1 = _mm256_dpbusd_epi32(_mm256_dpbusd_epi32(acc1, val[0], q8.load_quants(0, i, 0)), val[1], q8.load_quants(0, i, 1));
                acc2 = _mm256_dpbusd_epi32(_mm256_dpbusd_epi32(acc2, val[2], q8.load_quants(0, i, 2)), val[3], q8.load_quants(0, i, 3));
#else
                auto dot1 = _mm256_add_epi16(_mm256_maddubs_epi16(val[0], q8.load_quants(0, i, 0)),
                                             _mm256_maddubs_epi16(val[1], q8.load_quants(0, i, 1)));
                auto dot2 = _mm256_add_epi16(_mm256_maddubs_epi16(val[2], q8.load_quants(0, i, 2)),
                                             _mm256_maddubs_epi16(val[3], q8.load_quants(0, i, 3)));
                acc1 = _mm256_add_epi32(acc1, _mm256_madd_epi16(m1_16, dot1));
                acc2 = _mm256_add_epi32(acc2, _mm256_madd_epi16(m1_16, dot2));
#endif
            }
            accd[0] = _mm256_add_epi32(acc1, acc2);
        }
        else {

            for (int iy = 0; iy < nrc_y; ++iy) accd[iy] = _mm256_setzero_si256();

            for (int i = 0; i < nb/2; ++i) {

                deq.prepare_iq1bn_quants(x + 2*i + 0, val[0], val[1]);
                deq.prepare_iq1bn_quants(x + 2*i + 1, val[2], val[3]);

                for (int iy = 0; iy < nrc_y; ++iy) {
#ifdef HAVE_FANCY_SIMD
                    accd[iy]  = _mm256_dpbusd_epi32(_mm256_dpbusd_epi32(_mm256_dpbusd_epi32(_mm256_dpbusd_epi32(accd[iy],
                                        val[0], q8.load_quants(iy, i, 0)),
                                        val[1], q8.load_quants(iy, i, 1)),
                                        val[2], q8.load_quants(iy, i, 2)),
                                        val[3], q8.load_quants(iy, i, 3));
#else
                    auto dot1 = _mm256_add_epi16(_mm256_maddubs_epi16(val[0], q8.load_quants(iy, i, 0)),
                                                 _mm256_maddubs_epi16(val[1], q8.load_quants(iy, i, 1)));
                    auto dot2 = _mm256_add_epi16(_mm256_maddubs_epi16(val[2], q8.load_quants(iy, i, 2)),
                                                 _mm256_maddubs_epi16(val[3], q8.load_quants(iy, i, 3)));
                    dot1 = _mm256_madd_epi16(m1_16, _mm256_add_epi16(dot1, dot2));
                    accd[iy] = _mm256_add_epi32(dot1, accd[iy]);
#endif
                }
            }
        }
        int i = 2*(nb/2);
        if (i < nb) {
            deq.prepare_iq1bn_quants(x + i, val[0], val[1]);
            for (int iy = 0; iy < nrc_y; ++iy) {
#ifdef HAVE_FANCY_SIMD
                accd[iy] = _mm256_dpbusd_epi32(_mm256_dpbusd_epi32(accd[iy],
                            val[0], q8.load_quants(iy, i/2, 0)), val[1], q8.load_quants(iy, i/2, 1));
#else
                auto dot = _mm256_madd_epi16(m1_16, _mm256_add_epi16(_mm256_maddubs_epi16(val[0], q8.load_quants(iy, i/2, 0)),
                                                                     _mm256_maddubs_epi16(val[1], q8.load_quants(iy, i/2, 1))));
                accd[iy] = _mm256_add_epi32(dot, accd[iy]);
#endif
            }
        }

        for (int iy = 0; iy < nrc_y; ++iy) {
            auto vd = q8.scale(iy);
            auto sumi = _mm_add_epi32(_mm256_castsi256_si128(accd[iy]), _mm256_extractf128_si256(accd[iy], 1));
            auto sumf = _mm_fmsub_ps(vd, _mm_cvtepi32_ps(sumi), q8.minus(iy));
            info.store(ix, iy, scale*hsum_float_4(sumf));
        }

    }
}

struct DequantizeIQ2BN final : public BaseDequantizer<block_iq2_bn, true> {
    DequantizeIQ2BN(const void * vx, size_t bx) : BaseDequantizer(vx, bx) {}

    IQK_ALWAYS_INLINE void prepare4(int i, __m256i * val) const {
        auto q2bits_1 = _mm256_loadu_si256((const __m256i *)x[2*i].qs);
        auto q2bits_2 = _mm256_srli_epi16(q2bits_1, 2);
        make2(_mm256_permute2x128_si256(q2bits_1, q2bits_2, 0x20), val+0);
        make2(_mm256_permute2x128_si256(q2bits_1, q2bits_2, 0x31), val+2);
    }
    IQK_ALWAYS_INLINE void make2(__m256i q2_1, __m256i * val) const {
        val[0] = _mm256_and_si256(q2_1, mask2);
        val[1] = _mm256_and_si256(_mm256_srli_epi16(q2_1, 4), mask2);
    }
    IQK_ALWAYS_INLINE void prepare2(int i, __m256i * val) const {
        auto q2bits_1 = _mm_loadu_si128((const __m128i *)x[i].qs);
        make2(MM256_SET_M128I(_mm_srli_epi16(q2bits_1, 2), q2bits_1), val);
    }
    const __m256i m1_8   = _mm256_set1_epi8(1);
    const __m256i mf_8   = _mm256_set1_epi8(16);
    const __m256i mask2  = _mm256_set1_epi8(0x03);
    const __m256i mask3  = _mm256_set1_epi8(0x30);
};

template <int nrc_y>
IQK_NOINLINE void mul_mat_iq2bn_q8_K64(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
    const int nb = n / QK_IQ1BN;
    Q8_K64<nrc_y> q8(info);
    DequantizeIQ2BN deq(vx, bx);
    __m256i  accd[nrc_y];
    __m256i  val[4];

#ifndef HAVE_FANCY_SIMD
    const auto m1_16  = _mm256_set1_epi16(1);
#endif

    for (int ix = 0; ix < nrc_x; ++ix) {

        deq.new_row(ix);

        if constexpr (nrc_y == 1) {
            __m256i acc[2] = {};
            for (int i = 0; i < nb/2; ++i) {
                deq.prepare4(i, val);
#ifdef HAVE_FANCY_SIMD
                acc[0] = _mm256_dpbusd_epi32(_mm256_dpbusd_epi32(acc[0], val[0], q8.load_quants(0, i, 0)),
                                                                         val[1], q8.load_quants(0, i, 1));
                acc[1] = _mm256_dpbusd_epi32(_mm256_dpbusd_epi32(acc[1], val[2], q8.load_quants(0, i, 2)),
                                                                         val[3], q8.load_quants(0, i, 3));
#else
                auto dot1 = _mm256_add_epi16(_mm256_maddubs_epi16(val[0], q8.load_quants(0, i, 0)),
                                             _mm256_maddubs_epi16(val[1], q8.load_quants(0, i, 1)));
                auto dot2 = _mm256_add_epi16(_mm256_maddubs_epi16(val[2], q8.load_quants(0, i, 2)),
                                             _mm256_maddubs_epi16(val[3], q8.load_quants(0, i, 3)));
                acc[0] = _mm256_add_epi32(acc[0], _mm256_madd_epi16(m1_16, dot1));
                acc[1] = _mm256_add_epi32(acc[1], _mm256_madd_epi16(m1_16, dot2));
#endif
            }
            accd[0] = _mm256_add_epi32(acc[0], acc[1]);
        }
        else {

            for (int iy = 0; iy < nrc_y; ++iy) accd[iy] = _mm256_setzero_si256();

            for (int i = 0; i < nb/2; ++i) {
                deq.prepare4(i, val);
                for (int iy = 0; iy < nrc_y; ++iy) {
#ifdef HAVE_FANCY_SIMD
                    accd[iy] = _mm256_dpbusd_epi32(_mm256_dpbusd_epi32(_mm256_dpbusd_epi32(_mm256_dpbusd_epi32(accd[iy],
                                        val[0], q8.load_quants(iy, i, 0)), val[1], q8.load_quants(iy, i, 1)),
                                        val[2], q8.load_quants(iy, i, 2)), val[3], q8.load_quants(iy, i, 3));
#else
                    auto dot = _mm256_madd_epi16(m1_16, _mm256_add_epi16(
                                _mm256_add_epi16(_mm256_maddubs_epi16(val[0], q8.load_quants(iy, i, 0)),
                                                 _mm256_maddubs_epi16(val[1], q8.load_quants(iy, i, 1))),
                                _mm256_add_epi16(_mm256_maddubs_epi16(val[2], q8.load_quants(iy, i, 2)),
                                                 _mm256_maddubs_epi16(val[3], q8.load_quants(iy, i, 3)))));
                    accd[iy] = _mm256_add_epi32(dot, accd[iy]);
#endif
                }
            }
        }
        int i = 2*(nb/2);
        if (i < nb) {
            deq.prepare2(i, val);
            for (int iy = 0; iy < nrc_y; ++iy) {
#ifdef HAVE_FANCY_SIMD
                accd[iy] = _mm256_dpbusd_epi32(_mm256_dpbusd_epi32(accd[iy], val[0], q8.load_quants(iy, i/2, 0)),
                                                                             val[1], q8.load_quants(iy, i/2, 1));
#else
                auto dot = _mm256_madd_epi16(m1_16, _mm256_add_epi16(_mm256_maddubs_epi16(val[0], q8.load_quants(iy, i/2, 0)),
                                                                     _mm256_maddubs_epi16(val[1], q8.load_quants(iy, i/2, 0))));
                accd[iy] = _mm256_add_epi32(dot, accd[iy]);
#endif
            }
        }

        for (int iy = 0; iy < nrc_y; ++iy) {
            auto vd = q8.scale(iy);
            auto sumi = _mm_add_epi32(_mm256_castsi256_si128(accd[iy]), _mm256_extractf128_si256(accd[iy], 1));
            auto sumf = _mm_fmsub_ps(vd, _mm_cvtepi32_ps(sumi), q8.minus(iy));
            info.store(ix, iy, deq.d*hsum_float_4(sumf));
        }
    }
}

template <int nrc_y>
static void mul_mat_iq2_bn_r4_q8_k16_avx2(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
    if (nrc_x%4) {
        printf("%s: %d is not a multiple of 4\n", __func__, nrc_x);
        GGML_ABORT("fatal error");
    }
    Q8_16<nrc_y> q8(info);
    auto m3 = _mm256_set1_epi8(0x3);
    auto m1 = _mm256_set1_epi16(1);
    int nb = n / QK_IQ1BN;
    __m256i qx[4];
    if constexpr (nrc_y > 4) {
    __m256i acc[nrc_y] = {};
    __m128  sum4[nrc_y];
    for (int ix = 0; ix < nrc_x; ix += 4) {
        const float * dptr = (const float *)((const char *)vx + ix*bx);
        auto dl = _mm_loadu_ps(dptr);
        const uint8_t * iq2l = (const uint8_t *)(dptr + 4);
        for (int ib = 0; ib < nb; ++ib) {
            auto bits = _mm256_loadu_si256((const __m256i *)iq2l + 2*ib+0);
            qx[0] = _mm256_and_si256(bits, m3);
            qx[1] = _mm256_and_si256(_mm256_srli_epi16(bits, 2), m3);
            qx[2] = _mm256_and_si256(_mm256_srli_epi16(bits, 4), m3);
            qx[3] = _mm256_and_si256(_mm256_srli_epi16(bits, 6), m3);
            for (int iy = 0; iy < nrc_y; ++iy) {
                auto y = q8.load_quants(iy, 2*ib+0);
                auto sumi1 = _mm256_add_epi16(_mm256_maddubs_epi16(qx[0], _mm256_shuffle_epi32(y, 0x00)),
                                              _mm256_maddubs_epi16(qx[1], _mm256_shuffle_epi32(y, 0x55)));
                auto sumi2 = _mm256_add_epi16(_mm256_maddubs_epi16(qx[2], _mm256_shuffle_epi32(y, 0xaa)),
                                              _mm256_maddubs_epi16(qx[3], _mm256_shuffle_epi32(y, 0xff)));
                acc[iy] = _mm256_add_epi32(acc[iy], _mm256_madd_epi16(m1, _mm256_add_epi16(sumi1, sumi2)));
            }
        }
        for (int iy = 0; iy < nrc_y; ++iy) {
            auto dy = q8.scale(iy);
            auto sumf1 = _mm256_cvtepi32_ps(acc[iy]);
            auto s4 = _mm_mul_ps(_mm256_extractf128_ps(sumf1, 0), _mm_mul_ps(dl, _mm_shuffle_ps(dy, dy, 0x00)));
            s4 = _mm_fmadd_ps(_mm256_extractf128_ps(sumf1, 1), _mm_mul_ps(dl, _mm_shuffle_ps(dy, dy, 0x55)), s4);
            sum4[iy] = _mm_fmadd_ps(dl, _mm_set1_ps(-q8.sum_row(iy)), s4);
            acc[iy] = _mm256_setzero_si256();
        }
        for (int ib = 0; ib < nb; ++ib) {
            auto bits = _mm256_loadu_si256((const __m256i *)iq2l + 2*ib+1);
            qx[0] = _mm256_and_si256(bits, m3);
            qx[1] = _mm256_and_si256(_mm256_srli_epi16(bits, 2), m3);
            qx[2] = _mm256_and_si256(_mm256_srli_epi16(bits, 4), m3);
            qx[3] = _mm256_and_si256(_mm256_srli_epi16(bits, 6), m3);
            for (int iy = 0; iy < nrc_y; ++iy) {
                auto y = q8.load_quants(iy, 2*ib+1);
                auto sumi1 = _mm256_add_epi16(_mm256_maddubs_epi16(qx[0], _mm256_shuffle_epi32(y, 0x00)),
                                              _mm256_maddubs_epi16(qx[1], _mm256_shuffle_epi32(y, 0x55)));
                auto sumi2 = _mm256_add_epi16(_mm256_maddubs_epi16(qx[2], _mm256_shuffle_epi32(y, 0xaa)),
                                              _mm256_maddubs_epi16(qx[3], _mm256_shuffle_epi32(y, 0xff)));
                acc[iy] = _mm256_add_epi32(acc[iy], _mm256_madd_epi16(m1, _mm256_add_epi16(sumi1, sumi2)));
            }
        }
        for (int iy = 0; iy < nrc_y; ++iy) {
            auto dy = q8.scale(iy);
            auto sumf1 = _mm256_cvtepi32_ps(acc[iy]);
            auto s4 = _mm_fmadd_ps(_mm256_extractf128_ps(sumf1, 0), _mm_mul_ps(dl, _mm_shuffle_ps(dy, dy, 0xaa)), sum4[iy]);
            s4 = _mm_fmadd_ps(_mm256_extractf128_ps(sumf1, 1), _mm_mul_ps(dl, _mm_shuffle_ps(dy, dy, 0xff)), s4);
            info.store(ix, iy, s4);
            acc[iy] = _mm256_setzero_si256();
        }
    }
    } else {
    __m256i acc[2*nrc_y] = {};
    for (int ix = 0; ix < nrc_x; ix += 4) {
        const float * dptr = (const float *)((const char *)vx + ix*bx);
        auto dl = _mm_loadu_ps(dptr);
        const uint8_t * iq2l = (const uint8_t *)(dptr + 4);
        for (int ib = 0; ib < nb; ++ib) {
            auto bits = _mm256_loadu_si256((const __m256i *)iq2l + 2*ib+0);
            qx[0] = _mm256_and_si256(bits, m3);
            qx[1] = _mm256_and_si256(_mm256_srli_epi16(bits, 2), m3);
            qx[2] = _mm256_and_si256(_mm256_srli_epi16(bits, 4), m3);
            qx[3] = _mm256_and_si256(_mm256_srli_epi16(bits, 6), m3);
            for (int iy = 0; iy < nrc_y; ++iy) {
                auto y = q8.load_quants(iy, 2*ib+0);
                auto sumi1 = _mm256_add_epi16(_mm256_maddubs_epi16(qx[0], _mm256_shuffle_epi32(y, 0x00)),
                                              _mm256_maddubs_epi16(qx[1], _mm256_shuffle_epi32(y, 0x55)));
                auto sumi2 = _mm256_add_epi16(_mm256_maddubs_epi16(qx[2], _mm256_shuffle_epi32(y, 0xaa)),
                                              _mm256_maddubs_epi16(qx[3], _mm256_shuffle_epi32(y, 0xff)));
                acc[2*iy+0] = _mm256_add_epi32(acc[2*iy+0], _mm256_madd_epi16(m1, _mm256_add_epi16(sumi1, sumi2)));
            }
            bits = _mm256_loadu_si256((const __m256i *)iq2l + 2*ib+1);
            qx[0] = _mm256_and_si256(bits, m3);
            qx[1] = _mm256_and_si256(_mm256_srli_epi16(bits, 2), m3);
            qx[2] = _mm256_and_si256(_mm256_srli_epi16(bits, 4), m3);
            qx[3] = _mm256_and_si256(_mm256_srli_epi16(bits, 6), m3);
            for (int iy = 0; iy < nrc_y; ++iy) {
                auto y = q8.load_quants(iy, 2*ib+1);
                auto sumi1 = _mm256_add_epi16(_mm256_maddubs_epi16(qx[0], _mm256_shuffle_epi32(y, 0x00)),
                                              _mm256_maddubs_epi16(qx[1], _mm256_shuffle_epi32(y, 0x55)));
                auto sumi2 = _mm256_add_epi16(_mm256_maddubs_epi16(qx[2], _mm256_shuffle_epi32(y, 0xaa)),
                                              _mm256_maddubs_epi16(qx[3], _mm256_shuffle_epi32(y, 0xff)));
                acc[2*iy+1] = _mm256_add_epi32(acc[2*iy+1], _mm256_madd_epi16(m1, _mm256_add_epi16(sumi1, sumi2)));
            }
        }
        for (int iy = 0; iy < nrc_y; ++iy) {
            auto dy = q8.scale(iy);
            auto sumf1 = _mm256_cvtepi32_ps(acc[2*iy+0]);
            auto sumf2 = _mm256_cvtepi32_ps(acc[2*iy+1]);
            auto sum4 = _mm_mul_ps(_mm256_extractf128_ps(sumf1, 0), _mm_mul_ps(dl, _mm_shuffle_ps(dy, dy, 0x00)));
            sum4 = _mm_fmadd_ps(_mm256_extractf128_ps(sumf1, 1), _mm_mul_ps(dl, _mm_shuffle_ps(dy, dy, 0x55)), sum4);
            sum4 = _mm_fmadd_ps(_mm256_extractf128_ps(sumf2, 0), _mm_mul_ps(dl, _mm_shuffle_ps(dy, dy, 0xaa)), sum4);
            sum4 = _mm_fmadd_ps(_mm256_extractf128_ps(sumf2, 1), _mm_mul_ps(dl, _mm_shuffle_ps(dy, dy, 0xff)), sum4);
            sum4 = _mm_fmadd_ps(dl, _mm_set1_ps(-q8.sum_row(iy)), sum4);
            info.store(ix, iy, sum4);
            acc[2*iy+0] = acc[2*iy+1] = _mm256_setzero_si256();
        }
    }
    }
}


#ifdef HAVE_FANCY_SIMD
template <int nrc_y>
static void mul_mat_iq2_bn_r4_q8_k16(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
    if (nrc_x%4) {
        printf("%s: %d is not a multiple of 4\n", __func__, nrc_x);
        GGML_ABORT("fatal error");
    }
    if constexpr (nrc_y == 1) {
        mul_mat_iq2_bn_r4_q8_k16_avx2<1>(n, vx, bx, info, nrc_x);
    } else {
    Q8_16<nrc_y> q8(info);
    auto m3 = _mm512_set1_epi8(0x3);
    int nb = n / QK_IQ1BN;
    __m512i acc[2*nrc_y] = {};
    __m512i qx[8];
    for (int ix = 0; ix < nrc_x/8; ++ix) {
        const float * dptr1 = (const float *)((const char *)vx + (8*ix+0)*bx);
        const float * dptr2 = (const float *)((const char *)vx + (8*ix+4)*bx);
        auto dl = _mm_loadu_ps(dptr1);
        auto dh = _mm_loadu_ps(dptr2);
        const uint8_t * iq2l = (const uint8_t *)(dptr1 + 4);
        const uint8_t * iq2h = (const uint8_t *)(dptr2 + 4);
        for (int ib = 0; ib < nb; ++ib) {
            auto bits_l = _mm512_loadu_si512((const __m512i *)iq2l + ib);
            auto bits_h = _mm512_loadu_si512((const __m512i *)iq2h + ib);
            qx[0] = _mm512_and_si512(bits_l, m3);
            qx[1] = _mm512_and_si512(bits_h, m3);
            qx[2] = _mm512_and_si512(_mm512_srli_epi16(bits_l, 2), m3);
            qx[3] = _mm512_and_si512(_mm512_srli_epi16(bits_h, 2), m3);
            qx[4] = _mm512_and_si512(_mm512_srli_epi16(bits_l, 4), m3);
            qx[5] = _mm512_and_si512(_mm512_srli_epi16(bits_h, 4), m3);
            qx[6] = _mm512_and_si512(_mm512_srli_epi16(bits_l, 6), m3);
            qx[7] = _mm512_and_si512(_mm512_srli_epi16(bits_h, 6), m3);
            for (int iy = 0; iy < nrc_y; ++iy) {
                auto y = q8.load_quants64(iy, ib);
                auto sy = _mm512_shuffle_epi32(y, _MM_PERM_ENUM(0x00));
                acc[2*iy+0] = _mm512_dpbusd_epi32(acc[2*iy+0], qx[0], sy);
                acc[2*iy+1] = _mm512_dpbusd_epi32(acc[2*iy+1], qx[1], sy);
                sy = _mm512_shuffle_epi32(y, _MM_PERM_ENUM(0x55));
                acc[2*iy+0] = _mm512_dpbusd_epi32(acc[2*iy+0], qx[2], sy);
                acc[2*iy+1] = _mm512_dpbusd_epi32(acc[2*iy+1], qx[3], sy);
                sy = _mm512_shuffle_epi32(y, _MM_PERM_ENUM(0xaa));
                acc[2*iy+0] = _mm512_dpbusd_epi32(acc[2*iy+0], qx[4], sy);
                acc[2*iy+1] = _mm512_dpbusd_epi32(acc[2*iy+1], qx[5], sy);
                sy = _mm512_shuffle_epi32(y, _MM_PERM_ENUM(0xff));
                acc[2*iy+0] = _mm512_dpbusd_epi32(acc[2*iy+0], qx[6], sy);
                acc[2*iy+1] = _mm512_dpbusd_epi32(acc[2*iy+1], qx[7], sy);
            }
        }
        for (int iy = 0; iy < nrc_y; ++iy) {
            auto dy = q8.scale(iy);
            __m128 sum4;
            for (int k = 0; k < 2; ++k) {
                const auto& dx = k == 0 ? dl : dh;
                auto sumf = _mm512_cvtepi32_ps(acc[2*iy+k]);
                sum4 = _mm_mul_ps  (_mm512_extractf32x4_ps(sumf, 0), _mm_mul_ps(dx, _mm_shuffle_ps(dy, dy, 0x00)));
                sum4 = _mm_fmadd_ps(_mm512_extractf32x4_ps(sumf, 1), _mm_mul_ps(dx, _mm_shuffle_ps(dy, dy, 0x55)), sum4);
                sum4 = _mm_fmadd_ps(_mm512_extractf32x4_ps(sumf, 2), _mm_mul_ps(dx, _mm_shuffle_ps(dy, dy, 0xaa)), sum4);
                sum4 = _mm_fmadd_ps(_mm512_extractf32x4_ps(sumf, 3), _mm_mul_ps(dx, _mm_shuffle_ps(dy, dy, 0xff)), sum4);
                sum4 = _mm_fmadd_ps(dx, _mm_set1_ps(-q8.sum_row(iy)), sum4);
                info.store(8*ix + 4*k, iy, sum4);
            }
            acc[2*iy+0] = acc[2*iy+1] = _mm512_setzero_si512();
        }
    }
    if (int ix = 8*(nrc_x/8); ix < nrc_x) {
        const float * dptr = (const float *)((const char *)vx + ix*bx);
        auto dl = _mm_loadu_ps(dptr);
        const uint8_t * iq2l = (const uint8_t *)(dptr + 4);
        for (int ib = 0; ib < nb; ++ib) {
            auto bits_l = _mm512_loadu_si512((const __m512i *)iq2l + ib);
            qx[0] = _mm512_and_si512(bits_l, m3);
            qx[1] = _mm512_and_si512(_mm512_srli_epi16(bits_l, 2), m3);
            qx[2] = _mm512_and_si512(_mm512_srli_epi16(bits_l, 4), m3);
            qx[3] = _mm512_and_si512(_mm512_srli_epi16(bits_l, 6), m3);
            for (int iy = 0; iy < nrc_y; ++iy) {
                auto y = q8.load_quants64(iy, ib);
                acc[iy] = _mm512_dpbusd_epi32(acc[iy], qx[0], _mm512_shuffle_epi32(y, _MM_PERM_ENUM(0x00)));
                acc[iy] = _mm512_dpbusd_epi32(acc[iy], qx[1], _mm512_shuffle_epi32(y, _MM_PERM_ENUM(0x55)));
                acc[iy] = _mm512_dpbusd_epi32(acc[iy], qx[2], _mm512_shuffle_epi32(y, _MM_PERM_ENUM(0xaa)));
                acc[iy] = _mm512_dpbusd_epi32(acc[iy], qx[3], _mm512_shuffle_epi32(y, _MM_PERM_ENUM(0xff)));
            }
        }
        for (int iy = 0; iy < nrc_y; ++iy) {
            auto dy = q8.scale(iy);
            auto sumf = _mm512_cvtepi32_ps(acc[iy]);
            auto sum4 = _mm_mul_ps(_mm512_extractf32x4_ps(sumf, 0), _mm_mul_ps(dl, _mm_shuffle_ps(dy, dy, 0x00)));
            sum4 = _mm_fmadd_ps(_mm512_extractf32x4_ps(sumf, 1), _mm_mul_ps(dl, _mm_shuffle_ps(dy, dy, 0x55)), sum4);
            sum4 = _mm_fmadd_ps(_mm512_extractf32x4_ps(sumf, 2), _mm_mul_ps(dl, _mm_shuffle_ps(dy, dy, 0xaa)), sum4);
            sum4 = _mm_fmadd_ps(_mm512_extractf32x4_ps(sumf, 3), _mm_mul_ps(dl, _mm_shuffle_ps(dy, dy, 0xff)), sum4);
            sum4 = _mm_fmadd_ps(dl, _mm_set1_ps(-q8.sum_row(iy)), sum4);
            info.store(ix, iy, sum4);
        }
    }
    }
}
#else
template <int nrc_y>
static void mul_mat_iq2_bn_r4_q8_k16(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
    if (nrc_x%4) {
        printf("%s: %d is not a multiple of 4\n", __func__, nrc_x);
        GGML_ABORT("fatal error");
    }
    mul_mat_iq2_bn_r4_q8_k16_avx2<nrc_y>(n, vx, bx, info, nrc_x);
}
#endif

void iqk_convert_iq1_s_q8_k_r8(int n, const void * vx, size_t bx, void * vy, int nrc_x) {
#ifdef HAVE_FANCY_SIMD
    constexpr int k_nr = 16;
    using block_q8_k_r = block_q8_k_r16;
#else
    constexpr int k_nr = 8;
    using block_q8_k_r = block_q8_k_r8;
#endif

    GGML_ASSERT(n%QK_K == 0);
    GGML_ASSERT(nrc_x%k_nr == 0);

    int nb = n/QK_K;

    const block_iq1_s * x8[k_nr];

    block_q8_k_r * y = (block_q8_k_r *)vy;

    int16_t ls[16];

    uint32_t block[8];

    __m256i qx[8];

    for (int ix = 0; ix < nrc_x; ix += k_nr) {
        for (int k = 0; k < k_nr; ++k) x8[k] = (const block_iq1_s *)((const char *)vx + (ix + k)*bx);
        for (int i = 0; i < nb; ++i) {
            for (int k = 0; k < k_nr; ++k) {
                float d = 0.125f * GGML_FP16_TO_FP32(x8[k][i].d);
                auto qs = x8[k][i].qs;
                auto qh = x8[k][i].qh;
                __m256i value;
                for (int ib32 = 0; ib32 < 8; ++ib32) {
                    ls[2*ib32 + 0] = (2*((qh[ib32] >> 12) & 7) + 1);
                    ls[2*ib32 + 1] = ls[2*ib32 + 0];
                    value = _mm256_set_epi64x(iq1s_grid[qs[3] | ((qh[ib32] >> 1) & 0x700)], iq1s_grid[qs[2] | ((qh[ib32] << 2) & 0x700)],
                                              iq1s_grid[qs[1] | ((qh[ib32] << 5) & 0x700)], iq1s_grid[qs[0] | ((qh[ib32] << 8) & 0x700)]);
                    value = _mm256_slli_epi16(_mm256_add_epi8(value, _mm256_set1_epi8(1)), 3);
                    int8_t delta = qh[ib32] & 0x8000 ? -9 : -7;
                    value = _mm256_add_epi8(value, _mm256_set1_epi8(delta));
                    qx[ib32] = value;
                    qs += 4;
                }
                float dnew = convert_to_q8_k_r8<k_nr>(k, 1.f/126, qx, ls, block, y[i].qs);
                y[i].d[k] = GGML_FP32_TO_FP16(d*dnew);
            }
#ifdef HAVE_FANCY_SIMD
            for (int l = 0; l < 64; ++l) {
                auto v = _mm512_xor_si512(_mm512_loadu_si512((const __m512i *)y[i].qs + l), _mm512_set1_epi8(-128));
                _mm512_storeu_si512((__m512i *)y[i].qs + l, v);
            }
#endif
        }
        y += nb;
    }
}

void iqk_convert_iq1_m_q8_k_r8(int n, const void * vx, size_t bx, void * vy, int nrc_x) {
#ifdef HAVE_FANCY_SIMD
    constexpr int k_nr = 16;
    using block_q8_k_r = block_q8_k_r16;
#else
    constexpr int k_nr = 8;
    using block_q8_k_r = block_q8_k_r8;
#endif
    GGML_ASSERT(n%QK_K == 0);
    GGML_ASSERT(nrc_x%k_nr == 0);

    int nb = n/QK_K;

    const block_iq1_m * x8[k_nr];

    block_q8_k_r * y = (block_q8_k_r *)vy;

    int16_t ls[16];

    uint32_t block[8];

    __m256i qx[8];

    auto mask = _mm256_setr_epi32(0x00000008, 0x00000008, 0x00000080, 0x00000080, 0x00080000, 0x00080000, 0x00800000, 0x00800000);

    for (int ix = 0; ix < nrc_x; ix += k_nr) {
        for (int k = 0; k < k_nr; ++k) x8[k] = (const block_iq1_m *)((const char *)vx + (ix + k)*bx);
        for (int i = 0; i < nb; ++i) {
            for (int k = 0; k < k_nr; ++k) {
                const uint16_t * sc = (const uint16_t *)x8[k][i].scales;
                iq1m_scale_t scale;
                scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000);
                float d = 0.125f * GGML_FP16_TO_FP32(scale.f16);
                auto qs = x8[k][i].qs;
                auto qh = x8[k][i].qh;
                __m256i value;
                for (int ib32 = 0; ib32 < 8; ++ib32) {
                    ls[2*ib32 + 0] = (2*((sc[ib32/2] >> (6*(ib32%2)+0)) & 0x7) + 1);
                    ls[2*ib32 + 1] = (2*((sc[ib32/2] >> (6*(ib32%2)+3)) & 0x7) + 1);
                    value = _mm256_set_epi64x(iq1s_grid[qs[3] | ((qh[1] << 4) & 0x700)], iq1s_grid[qs[2] | ((qh[1] << 8) & 0x700)],
                                              iq1s_grid[qs[1] | ((qh[0] << 4) & 0x700)], iq1s_grid[qs[0] | ((qh[0] << 8) & 0x700)]);
                    value = _mm256_slli_epi16(_mm256_add_epi8(value, _mm256_set1_epi8(1)), 3);

                    auto delta_mask = _mm256_cmpeq_epi32(_mm256_and_si256(_mm256_set1_epi32(qh[0] | qh[1] << 16), mask), mask);
                    auto delta = _mm256_add_epi8(_mm256_set1_epi8(7), _mm256_and_si256(delta_mask, _mm256_set1_epi8(2)));
                    qx[ib32] = _mm256_sub_epi8(value, delta);

                    //int64_t delta1 = qh[0] & 0x08 ? 0x0909090909090909 : 0x0707070707070707;
                    //int64_t delta2 = qh[0] & 0x80 ? 0x0909090909090909 : 0x0707070707070707;
                    //int64_t delta3 = qh[1] & 0x08 ? 0x0909090909090909 : 0x0707070707070707;
                    //int64_t delta4 = qh[1] & 0x80 ? 0x0909090909090909 : 0x0707070707070707;
                    //value = _mm256_sub_epi8(value, _mm256_set_epi64x(delta4, delta3, delta2, delta1));
                    //qx[ib32] = value;
                    qs += 4;
                    qh += 2;
                }
                float dnew = convert_to_q8_k_r8<k_nr>(k, 1.f/126, qx, ls, block, y[i].qs);
                y[i].d[k] = GGML_FP32_TO_FP16(d*dnew);
            }
#ifdef HAVE_FANCY_SIMD
            for (int l = 0; l < 64; ++l) {
                auto v = _mm512_xor_si512(_mm512_loadu_si512((const __m512i *)y[i].qs + l), _mm512_set1_epi8(-128));
                _mm512_storeu_si512((__m512i *)y[i].qs + l, v);
            }
#endif
        }
        y += nb;
    }
}

void iqk_convert_iq1_s_q8_0_r8(int n, const void * vx, size_t bx, void * vy, int nrc_x) {
    GGML_ASSERT(n%QK_K == 0);
    GGML_ASSERT(nrc_x%8 == 0);

    int nb = n/QK_K;

    const block_iq1_s * x8[8];

    block_q8_0_r8 * y = (block_q8_0_r8 *)vy;

    ggml_half dh[8];
    uint16_t all_ls[64];

    uint32_t block[8];

    for (int ix = 0; ix < nrc_x; ix += 8) {
        for (int k = 0; k < 8; ++k) x8[k] = (const block_iq1_s *)((const char *)vx + (ix + k)*bx);
        for (int i = 0; i < nb; ++i) {
            for (int k = 0; k < 8; ++k) {
                dh[k] = x8[k][i].d;
                auto qs = x8[k][i].qs;
                auto qh = x8[k][i].qh;
                __m256i value;
                for (int ib32 = 0; ib32 < 8; ++ib32) {
                    all_ls[8*ib32 + k] = (2*((qh[ib32] >> 12) & 7) + 1);
                    value = _mm256_set_epi64x(iq1s_grid[qs[3] | ((qh[ib32] >> 1) & 0x700)], iq1s_grid[qs[2] | ((qh[ib32] << 2) & 0x700)],
                                              iq1s_grid[qs[1] | ((qh[ib32] << 5) & 0x700)], iq1s_grid[qs[0] | ((qh[ib32] << 8) & 0x700)]);
                    value = _mm256_slli_epi16(_mm256_add_epi8(value, _mm256_set1_epi8(1)), 3);
                    int8_t delta = qh[ib32] & 0x8000 ? -9 : -7;
                    value = _mm256_add_epi8(value, _mm256_set1_epi8(delta));
                    _mm256_storeu_si256((__m256i *)block, value);
                    auto q8 = (uint32_t *)y[ib32].qs;
                    for (int l = 0; l < 4; ++l) {
                        q8[8*l + k +  0] = block[l + 0];
                        q8[8*l + k + 32] = block[l + 4];
                    }
                    qs += 4;
                }
            }
            auto vd = _mm256_mul_ps(_mm256_set1_ps(0.125f), _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)dh)));
            for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
                auto iscales16 = _mm_loadu_si128((const __m128i *)all_ls + ib32);
                auto iscales32 = _mm256_cvtepi16_epi32(iscales16);
                auto scales = _mm256_mul_ps(vd, _mm256_cvtepi32_ps(iscales32));
                _mm_storeu_si128((__m128i *)y[ib32].d, _mm256_cvtps_ph(scales, _MM_FROUND_TO_NEAREST_INT));
            }
            y += QK_K/32;
        }
    }
}

} // namespace

bool iqk_set_kernels_1bit(int ne00, int typeA, int typeB, std::array<mul_mat_t, IQK_MAX_NY>& funcs, mul_mat_t& func16) {

    auto expected_typeB = GGML_TYPE_Q8_K128;
    auto actual_typeB   = ggml_type(typeB);

    func16 = nullptr;

    switch (typeA) {
        case GGML_TYPE_IQ1_S:
            if (ne00%QK_K != 0) return false;
            if (actual_typeB == GGML_TYPE_Q8_2_X4) {
                IQK_SET_MUL_MAT_FUNCTIONS(mul_mat_iq1_s_q8_2_x4, funcs);
                expected_typeB = GGML_TYPE_Q8_2_X4;
            } else {
                IQK_SET_MUL_MAT_FUNCTIONS(mul_mat_iq1_s_q8_K, funcs);
#ifdef HAVE_FANCY_SIMD
                func16 = mul_mat_iq1_s_q8_K<16>;
#endif
                expected_typeB = GGML_TYPE_Q8_K;
            }
            break;
        case GGML_TYPE_IQ1_S_R4:
            if (ne00%128 != 0) return false;
            IQK_SET_MUL_MAT_FUNCTIONS(mul_mat_iq1_s_r4_q8_1, funcs);
#ifdef HAVE_FANCY_SIMD
            func16 = mul_mat_iq1_s_r4_q8_1<16>;
#endif
            break;
        case GGML_TYPE_IQ1_M:
            if (ne00%QK_K != 0) return false;
            IQK_SET_MUL_MAT_FUNCTIONS(mul_mat_iq1_m_q8_K, funcs);
            expected_typeB = GGML_TYPE_Q8_K;
            break;
        case GGML_TYPE_IQ1_M_R4:
            if (ne00%128 != 0) return false;
            IQK_SET_MUL_MAT_FUNCTIONS(mul_mat_iq1_m_r4_q8_0, funcs);
#ifdef HAVE_FANCY_SIMD
            func16 = mul_mat_iq1_m_r4_q8_0<16>;
#endif
            break;
        case GGML_TYPE_IQ1_BN:
            if (ne00 % QK_IQ1BN != 0) return false;
            IQK_SET_MUL_MAT_FUNCTIONS(mul_mat_iq1bn_q8_K64, funcs);
            expected_typeB = GGML_TYPE_Q8_K64;
            break;
        case GGML_TYPE_IQ2_BN:
            if (ne00 % QK_IQ1BN != 0) return false;
            IQK_SET_MUL_MAT_FUNCTIONS(mul_mat_iq2bn_q8_K64, funcs);
            expected_typeB = GGML_TYPE_Q8_K64;
            break;
        case GGML_TYPE_IQ2_BN_R4:
            if (ne00 % QK_IQ1BN != 0) return false;
            IQK_SET_MUL_MAT_FUNCTIONS(mul_mat_iq2_bn_r4_q8_k16, funcs);
            expected_typeB = GGML_TYPE_Q8_K16;
            break;

        default:
            return false;
    }

    return actual_typeB == expected_typeB;

}

bool iqk_convert_1bit_q80_r8(int type, int n, const void * vx, size_t bx, void * vy, int nrc_x) {
    if (n%QK_K != 0 || nrc_x%8 != 0) return false;
    switch (ggml_type(type)) {
        case GGML_TYPE_IQ1_S: iqk_convert_iq1_s_q8_k_r8(n, vx, bx, vy, nrc_x); break;
        case GGML_TYPE_IQ1_M: iqk_convert_iq1_m_q8_k_r8(n, vx, bx, vy, nrc_x); break;
        default: return false;
    }
    return true;
}

#else
// -------------------------------- __aarch64__

namespace {

template <int nrc> struct Q8_K64 {

    constexpr static int nrc_y = nrc;

    Q8_K64(const DataInfo& info) {
        for (int iy = 0; iy < nrc_y; ++iy) {
            auto dptr = (const float *)info.src1_row(iy);
            std::memcpy(d + 8*iy, dptr, 8*sizeof(float));
            y[iy] = (const int8_t *)(dptr + 8);
        }
    }

    inline int8x16x4_t load_quants64(int iy, int i, int j) const { return vld1q_s8_x4(y[iy] + 128*i + 64*j); }
    inline int8x16x2_t load_quants(int iy, int i, int j) const { return vld1q_s8_x2(y[iy] + 128*i + 32*j); }
    inline float32x4_t scale(int iy) const { return vld1q_f32(d + 8*iy); }
    inline float32x4_t minus(int iy) const { return vld1q_f32(d + 8*iy + 4); }

    float d[8*nrc_y];
    const int8_t * y[nrc_y];
};

struct DequantizerIQ1BN {
    const uint8x16_t m1 = vdupq_n_u8(1);

    static inline uint8x16x4_t load_shuffles() {
        static const uint8_t data[64] = {0, 0, 0, 0, 0,  1,  1,  1,  1,  1,  2,  2,  2,  2,  2, 12,
                                         3, 3, 3, 3, 3,  4,  4,  4,  4,  4,  5,  5,  5,  5,  5, 12,
                                         6, 6, 6, 6, 6,  7,  7,  7,  7,  7,  8,  8,  8,  8,  8, 12,
                                         9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 12};
        return vld1q_u8_x4(data);
    }
    static inline uint8x16x4_t load_mult() {
        static const uint8_t data[64] = {81, 27, 9, 3, 1, 81, 27, 9, 3, 1, 81, 27, 9, 3, 1, 81,
                                         81, 27, 9, 3, 1, 81, 27, 9, 3, 1, 81, 27, 9, 3, 1, 27,
                                         81, 27, 9, 3, 1, 81, 27, 9, 3, 1, 81, 27, 9, 3, 1,  9,
                                         81, 27, 9, 3, 1, 81, 27, 9, 3, 1, 81, 27, 9, 3, 1,  3};
        return vld1q_u8_x4(data);
    }
    const uint8x16x4_t shuff = load_shuffles();
    const uint8x16x4_t mult  = load_mult();

    IQK_ALWAYS_INLINE void prepare_iq1bn_quants(const block_iq1_bn * x, int8x16x4_t& v) const {
        auto data = vld1q_u8((const uint8_t *)x);
        for (int k = 0; k < 4; ++k) {
            auto val = vmulq_u8(vqtbl1q_u8(data, shuff.val[k]), mult.val[k]);
            val = vshrq_n_u8(vhaddq_u8(val, vshrq_n_u8(val, 1)), 6);
            v.val[k] = vsubq_s8(vreinterpretq_s8_u8(val), m1);
        }
    }

    IQK_ALWAYS_INLINE void prepare_iq1bn_quants_nosub(const block_iq1_bn * x, int8x16x4_t& v) const {
        auto data = vld1q_u8((const uint8_t *)x);
        for (int k = 0; k < 4; ++k) {
            auto val = vmulq_u8(vqtbl1q_u8(data, shuff.val[k]), mult.val[k]);
            v.val[k] = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(val, vshrq_n_u8(val, 1)), 6));
        }
    }
};

template <int nrc_y>
static void mul_mat_iq1bn_q8_K64(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
    const int nb = n / QK_IQ1BN;

    Q8_K64<nrc_y> q8(info);
    DequantizerIQ1BN deq;

    int32x4_t   accd[nrc_y];
    int8x16x4_t v1, v2;

    float scale;
    ggml_half d16;
    char * c16 = (char *)&d16;

    for (int ix = 0; ix < nrc_x; ++ix) {

        const char * cx = ((const char *)vx + ix*bx);
        c16[0] = cx[0]; c16[1] = cx[1];
        //std::memcpy(&d16, cx, sizeof(d16));
        cx += sizeof(d16);
        scale = GGML_FP16_TO_FP32(d16);

        const block_iq1_bn * x = (const block_iq1_bn *)cx;

        if constexpr (nrc_y == 1) {
            int32x4_t acc[4] = {};
            for (int i = 0; i < nb/2; ++i) {
                deq.prepare_iq1bn_quants_nosub(x+2*i+0, v1);
                auto q = q8.load_quants64(0, i, 0);
                for (int j = 0; j < 4; ++j) acc[j] = ggml_vdotq_s32(acc[j], q.val[j], v1.val[j]);
                deq.prepare_iq1bn_quants_nosub(x+2*i+1, v2);
                q = q8.load_quants64(0, i, 1);
                for (int j = 0; j < 4; ++j) acc[j] = ggml_vdotq_s32(acc[j], q.val[j], v2.val[j]);
            }
            accd[0] = vaddq_s32(vaddq_s32(acc[0], acc[1]), vaddq_s32(acc[2], acc[3]));
        }
        else {

            for (int iy = 0; iy < nrc_y; ++iy) accd[iy] = vdupq_n_s32(0);

            for (int i = 0; i < nb/2; ++i) {

                deq.prepare_iq1bn_quants_nosub(x+2*i+0, v1);
                deq.prepare_iq1bn_quants_nosub(x+2*i+1, v2);

                for (int iy = 0; iy < nrc_y; ++iy) {
                    auto q = q8.load_quants(iy, i, 0);
                    accd[iy] = ggml_vdotq_s32(ggml_vdotq_s32(accd[iy], q.val[0], v1.val[0]), q.val[1], v1.val[1]);
                    q = q8.load_quants(iy, i, 1);
                    accd[iy] = ggml_vdotq_s32(ggml_vdotq_s32(accd[iy], q.val[0], v1.val[2]), q.val[1], v1.val[3]);
                    q = q8.load_quants(iy, i, 2);
                    accd[iy] = ggml_vdotq_s32(ggml_vdotq_s32(accd[iy], q.val[0], v2.val[0]), q.val[1], v2.val[1]);
                    q = q8.load_quants(iy, i, 3);
                    accd[iy] = ggml_vdotq_s32(ggml_vdotq_s32(accd[iy], q.val[0], v2.val[2]), q.val[1], v2.val[3]);
                }
            }
        }
        int i = 2*(nb/2);
        if (i < nb) {
            deq.prepare_iq1bn_quants_nosub(x+i, v1);
            if constexpr (nrc_y == 1) {
                auto q = q8.load_quants(0, i/2, 0);
                for (int j = 0; j < 4; ++j) {
                    accd[0] = ggml_vdotq_s32(accd[0], q.val[j], v1.val[j]);
                }
            } else {
                for (int iy = 0; iy < nrc_y; ++iy) {
                    auto q = q8.load_quants(iy, i/2, 0);
                    accd[iy] = ggml_vdotq_s32(ggml_vdotq_s32(accd[iy], q.val[0], v1.val[0]), q.val[1], v1.val[1]);
                    q = q8.load_quants(iy, i/2, 1);
                    accd[iy] = ggml_vdotq_s32(ggml_vdotq_s32(accd[iy], q.val[0], v1.val[2]), q.val[1], v1.val[3]);
                }
            }
        }

        for (int iy = 0; iy < nrc_y; ++iy) {
            info.store(ix, iy, -scale * vaddvq_f32(vfmsq_f32(q8.minus(iy), q8.scale(iy), vcvtq_f32_s32(accd[iy]))));
        }

    }
}

template <int nrc> struct Q8_16 {

    constexpr static int nrc_y = nrc;

    Q8_16(const DataInfo& info) {
        for (int iy = 0; iy < nrc_y; ++iy) {
            auto ptr = (const float *)info.src1_row(iy);
            std::memcpy(d + 5*iy, ptr, 5*sizeof(float));
            y[iy] = (const int8_t *)(ptr + 5);
        }
    }

    inline int8x16x4_t load_quants(int iy, int i) const { return vld1q_s8_x4(y[iy] + 64*i); }
    inline int8x16x2_t load_quants_32(int iy, int i) const { return vld1q_s8_x2(y[iy] + 32*i); }
    inline float scale(int iy, int k) const { return d[5*iy+k]; }
    inline float sum_row(int iy) const { return d[5*iy + 4]; }
    inline float32x4_t scale(int iy) const { return vld1q_f32(d + 5*iy); }

    float d[5*nrc_y];
    const int8_t * y[nrc_y];
};

template <int nrc_y>
static IQK_NOINLINE void mul_mat_iq2_bn_r4_q8_k16(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
    if (nrc_x%4) {
        printf("%s: %d is not a multiple of 4\n", __func__, nrc_x);
        GGML_ABORT("fatal error");
    }
    Q8_16<nrc_y> q8(info);
    auto m3 = vdupq_n_u8(0x3);
    int nb = n / QK_IQ1BN;
    if constexpr (nrc_y == 1) {
        auto mc = vdupq_n_u8(0xc);
        int32x4_t acc[8];
        for (int ix = 0; ix < nrc_x; ix += 4) {
            for (int k = 0; k < 8; ++k) acc[k] = vdupq_n_s32(0);
            const float * dptr = (const float *)((const char *)vx + ix*bx);
            auto dl = vld1q_f32(dptr);
            const uint8_t * iq2 = (const uint8_t *)(dptr + 4);
            for (int ib = 0; ib < nb; ++ib) {
                auto y = q8.load_quants(0, ib);
                for (int j = 0; j < 4; ++j) {
                    auto bits1 = vld1q_u8(iq2 + 64*ib + 16*j);
                    auto bits2 = vshrq_n_u8(bits1, 4);
                    acc[2*j+0] = vdotq_laneq_s32(acc[2*j+0], vandq_u8(bits1, m3), y.val[j], 0);
                    acc[2*j+1] = vdotq_laneq_s32(acc[2*j+1], vandq_u8(bits1, mc), y.val[j], 1);
                    acc[2*j+0] = vdotq_laneq_s32(acc[2*j+0], vandq_u8(bits2, m3), y.val[j], 2);
                    acc[2*j+1] = vdotq_laneq_s32(acc[2*j+1], vandq_u8(bits2, mc), y.val[j], 3);
                }
            }
            auto dy = vmulq_f32(dl, vdupq_n_f32(q8.scale(0, 0)));
            auto sumf1 = vmulq_f32(  vcvtq_f32_s32(acc[0]), dy);
            auto sumf2 = vmulq_f32(  vcvtq_f32_s32(acc[1]), dy);
            dy = vmulq_f32(dl, vdupq_n_f32(q8.scale(0, 1)));
            sumf1 = vfmaq_f32(sumf1, vcvtq_f32_s32(acc[2]), dy);
            sumf2 = vfmaq_f32(sumf2, vcvtq_f32_s32(acc[3]), dy);
            dy = vmulq_f32(dl, vdupq_n_f32(q8.scale(0, 2)));
            sumf1 = vfmaq_f32(sumf1, vcvtq_f32_s32(acc[4]), dy);
            sumf2 = vfmaq_f32(sumf2, vcvtq_f32_s32(acc[5]), dy);
            dy = vmulq_f32(dl, vdupq_n_f32(q8.scale(0, 3)));
            sumf1 = vfmaq_f32(sumf1, vcvtq_f32_s32(acc[6]), dy);
            sumf2 = vfmaq_f32(sumf2, vcvtq_f32_s32(acc[7]), dy);
            auto sumf = vfmaq_f32(sumf1, vdupq_n_f32(0.25f), sumf2);
            sumf = vfmaq_f32(sumf, dl, vdupq_n_f32(-q8.sum_row(0)));
            info.store(ix, 0, sumf);
        }
    } else {
        int32x4_t acc[4*nrc_y] = {};
        uint8x16_t qx[8];
        for (int ix = 0; ix < nrc_x; ix += 4) {
            const float * dptr = (const float *)((const char *)vx + ix*bx);
            auto dl = vld1q_f32(dptr);
            const uint8_t * iq2 = (const uint8_t *)(dptr + 4);
            for (int ib = 0; ib < nb; ++ib) {
                auto bits = vld1q_u8_x2(iq2 + 64*ib);
                qx[0] = vandq_u8(bits.val[0], m3);
                qx[1] = vandq_u8(vshrq_n_u8(bits.val[0], 2), m3);
                qx[2] = vandq_u8(vshrq_n_u8(bits.val[0], 4), m3);
                qx[3] = vshrq_n_u8(bits.val[0], 6);
                qx[4] = vandq_u8(bits.val[1], m3);
                qx[5] = vandq_u8(vshrq_n_u8(bits.val[1], 2), m3);
                qx[6] = vandq_u8(vshrq_n_u8(bits.val[1], 4), m3);
                qx[7] = vshrq_n_u8(bits.val[1], 6);
                for (int iy = 0; iy < nrc_y; ++iy) {
                    auto y = q8.load_quants_32(iy, 2*ib+0);
                    acc[4*iy + 0] = vdotq_laneq_s32(acc[4*iy + 0], qx[0], y.val[0], 0);
                    acc[4*iy + 0] = vdotq_laneq_s32(acc[4*iy + 0], qx[1], y.val[0], 1);
                    acc[4*iy + 0] = vdotq_laneq_s32(acc[4*iy + 0], qx[2], y.val[0], 2);
                    acc[4*iy + 0] = vdotq_laneq_s32(acc[4*iy + 0], qx[3], y.val[0], 3);
                    acc[4*iy + 1] = vdotq_laneq_s32(acc[4*iy + 1], qx[4], y.val[1], 0);
                    acc[4*iy + 1] = vdotq_laneq_s32(acc[4*iy + 1], qx[5], y.val[1], 1);
                    acc[4*iy + 1] = vdotq_laneq_s32(acc[4*iy + 1], qx[6], y.val[1], 2);
                    acc[4*iy + 1] = vdotq_laneq_s32(acc[4*iy + 1], qx[7], y.val[1], 3);
                }
                bits = vld1q_u8_x2(iq2 + 64*ib + 32);
                qx[0] = vandq_u8(bits.val[0], m3);
                qx[1] = vandq_u8(vshrq_n_u8(bits.val[0], 2), m3);
                qx[2] = vandq_u8(vshrq_n_u8(bits.val[0], 4), m3);
                qx[3] = vshrq_n_u8(bits.val[0], 6);
                qx[4] = vandq_u8(bits.val[1], m3);
                qx[5] = vandq_u8(vshrq_n_u8(bits.val[1], 2), m3);
                qx[6] = vandq_u8(vshrq_n_u8(bits.val[1], 4), m3);
                qx[7] = vshrq_n_u8(bits.val[1], 6);
                for (int iy = 0; iy < nrc_y; ++iy) {
                    auto y = q8.load_quants_32(iy, 2*ib+1);
                    acc[4*iy + 2] = vdotq_laneq_s32(acc[4*iy + 2], qx[0], y.val[0], 0);
                    acc[4*iy + 2] = vdotq_laneq_s32(acc[4*iy + 2], qx[1], y.val[0], 1);
                    acc[4*iy + 2] = vdotq_laneq_s32(acc[4*iy + 2], qx[2], y.val[0], 2);
                    acc[4*iy + 2] = vdotq_laneq_s32(acc[4*iy + 2], qx[3], y.val[0], 3);
                    acc[4*iy + 3] = vdotq_laneq_s32(acc[4*iy + 3], qx[4], y.val[1], 0);
                    acc[4*iy + 3] = vdotq_laneq_s32(acc[4*iy + 3], qx[5], y.val[1], 1);
                    acc[4*iy + 3] = vdotq_laneq_s32(acc[4*iy + 3], qx[6], y.val[1], 2);
                    acc[4*iy + 3] = vdotq_laneq_s32(acc[4*iy + 3], qx[7], y.val[1], 3);
                }
            }
            for (int iy = 0; iy < nrc_y; ++iy) {
                auto dy = q8.scale(iy);
                float32x4_t sumf = vmulq_f32(vcvtq_f32_s32(acc[4*iy+0]), vmulq_laneq_f32(dl, dy, 0));
                sumf = vfmaq_f32(sumf, vcvtq_f32_s32(acc[4*iy+1]), vmulq_laneq_f32(dl, dy, 1));
                sumf = vfmaq_f32(sumf, vcvtq_f32_s32(acc[4*iy+2]), vmulq_laneq_f32(dl, dy, 2));
                sumf = vfmaq_f32(sumf, vcvtq_f32_s32(acc[4*iy+3]), vmulq_laneq_f32(dl, dy, 3));
                sumf = vfmaq_f32(sumf, dl, vdupq_n_f32(-q8.sum_row(iy)));
                info.store(ix, iy, sumf);
                acc[4*iy+0] = acc[4*iy+1] = acc[4*iy+2] = acc[4*iy+3] = vdupq_n_s32(0);
            }
        }
    }
}

template <int nrc_y>
static void mul_mat_iq2bn_q8_K64(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
    const int nb = n / QK_IQ1BN;

    Q8_K64<nrc_y> q8(info);

    int32x4_t   accd[nrc_y];

    const auto mask2  = vdupq_n_s8(3);

    for (int ix = 0; ix < nrc_x; ++ix) {

        const float * dptr = (const float *)((const char *)vx + ix*bx);
        const float d = *dptr;
        const block_iq2_bn * x = (const block_iq2_bn *)(dptr + 1);

        if constexpr (nrc_y == 1) {
            int8x16x4_t v1;
            int32x4_t acc[4] = {};
            for (int i = 0; i < nb/2; ++i) {
                for (int j = 0; j < 2; ++j) {
                    auto q = q8.load_quants64(0, i, j);
                    auto q2bits = vld1q_u8(x[2*i+j].qs);
                    v1.val[0] = vandq_s8(q2bits, mask2);
                    v1.val[1] = vandq_s8(vshrq_n_u8(q2bits, 2), mask2);
                    v1.val[2] = vandq_s8(vshrq_n_u8(q2bits, 4), mask2);
                    v1.val[3] = vshrq_n_u8(q2bits, 6);
                    acc[0] = ggml_vdotq_s32(acc[0], q.val[0], v1.val[0]);
                    acc[1] = ggml_vdotq_s32(acc[1], q.val[1], v1.val[1]);
                    acc[2] = ggml_vdotq_s32(acc[2], q.val[2], v1.val[2]);
                    acc[3] = ggml_vdotq_s32(acc[3], q.val[3], v1.val[3]);
                }
            }
            accd[0] = vaddq_s32(vaddq_s32(acc[0], acc[1]), vaddq_s32(acc[2], acc[3]));
        } else {
            int8x16x4_t v1, v2;
            for (int iy = 0; iy < nrc_y; ++iy) accd[iy] = vdupq_n_s32(0);
            for (int i = 0; i < nb/2; ++i) {
                auto q2bits = vld1q_u8(x[2*i+0].qs);
                v1.val[0] = vandq_s8(q2bits, mask2);
                v1.val[1] = vandq_s8(vshrq_n_u8(q2bits, 2), mask2);
                v1.val[2] = vandq_s8(vshrq_n_u8(q2bits, 4), mask2);
                v1.val[3] = vshrq_n_u8(q2bits, 6);
                q2bits = vld1q_u8(x[2*i+1].qs);
                v2.val[0] = vandq_s8(q2bits, mask2);
                v2.val[1] = vandq_s8(vshrq_n_u8(q2bits, 2), mask2);
                v2.val[2] = vandq_s8(vshrq_n_u8(q2bits, 4), mask2);
                v2.val[3] = vshrq_n_u8(q2bits, 6);
                for (int iy = 0; iy < nrc_y; ++iy) {
                    auto q = q8.load_quants(iy, i, 0);
                    accd[iy] = ggml_vdotq_s32(ggml_vdotq_s32(accd[iy], q.val[0], v1.val[0]), q.val[1], v1.val[1]);
                    q = q8.load_quants(iy, i, 1);
                    accd[iy] = ggml_vdotq_s32(ggml_vdotq_s32(accd[iy], q.val[0], v1.val[2]), q.val[1], v1.val[3]);
                    q = q8.load_quants(iy, i, 2);
                    accd[iy] = ggml_vdotq_s32(ggml_vdotq_s32(accd[iy], q.val[0], v2.val[0]), q.val[1], v2.val[1]);
                    q = q8.load_quants(iy, i, 3);
                    accd[iy] = ggml_vdotq_s32(ggml_vdotq_s32(accd[iy], q.val[0], v2.val[2]), q.val[1], v2.val[3]);
                }
            }
        }
        int i = 2*(nb/2);
        if (i < nb) {
            auto q2bits = vld1q_u8(x[i].qs);
            int8x16x4_t v1;
            v1.val[0] = vandq_s8(q2bits, mask2);
            v1.val[1] = vandq_s8(vshrq_n_u8(q2bits, 2), mask2);
            v1.val[2] = vandq_s8(vshrq_n_u8(q2bits, 4), mask2);
            v1.val[3] = vshrq_n_u8(q2bits, 6);
            for (int iy = 0; iy < nrc_y; ++iy) {
                auto q = q8.load_quants(iy, i/2, 0);
                accd[iy] = ggml_vdotq_s32(ggml_vdotq_s32(accd[iy], q.val[0], v1.val[0]), q.val[1], v1.val[1]);
                q = q8.load_quants(iy, i/2, 1);
                accd[iy] = ggml_vdotq_s32(ggml_vdotq_s32(accd[iy], q.val[0], v1.val[2]), q.val[1], v1.val[3]);
            }
        }

        for (int iy = 0; iy < nrc_y; ++iy) {
            info.store(ix, iy, -d*vaddvq_f32(vfmsq_f32(q8.minus(iy), q8.scale(iy), vcvtq_f32_s32(accd[iy]))));
        }
    }
}

template <int nrc_y>
static void mul_mat_iq1_s_r4_q8_1(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
    GGML_ASSERT(nrc_x%4 == 0);
    Q8<nrc_y, block_q8_K128> q8(info);
    int nb = n / 32;
    GGML_ASSERT(nb%4 == 0);
    uint8x16_t qx[8];
    float32x4_t acc[nrc_y] = {};
    auto ms = vdup_n_u16(0x8000);
    auto mask = vdupq_n_s8(0x03);
    float d8[4*nrc_y];
    for (int ix= 0; ix < nrc_x; ix += 4) {
        auto dptr = (const ggml_half *)((const char *)vx + ix*bx);
        auto d1 = vcvt_f32_f16(vld1_f16((const float16_t *)dptr));
        auto x = (const block_iq1_s_r4 *)(dptr + 4);
        for (int ib = 0; ib < nb/4; ++ib) {
            for (int iy = 0; iy < nrc_y; ++iy) {
                auto scales = vcvtq_f32_s32(vmovl_s16(vld1_s16(q8.y[iy][ib].bsums)));
                vst1q_f32(d8+4*iy, vmulq_f32(vdupq_n_f32(q8.y[iy][ib].d), scales));
            }
            for (int k = 0; k < 4; ++k) {
                auto sas = vld1_u16(x[4*ib+k].qh);
                auto scales4 = vand_u16(vshr_n_u16(sas, 12), vdup_n_u16(7));
                scales4 = vorr_u16(vshl_n_u16(scales4, 1), vdup_n_u16(1));
                auto signs = vreinterpret_s16_u16(vorr_u16(vceq_u16(vand_u16(sas, ms), ms), vdup_n_u16(1)));
                signs = vadd_s16(vdup_n_s16(-8), signs);
                auto delta4 = vmulq_f32(vdupq_n_f32(0.125f), vcvtq_f32_s32(vmull_s16(signs, scales4)));
                qx[0] = vreinterpretq_u8_u32(uint32x4_t{iq1s_grid_us_neon[x[4*ib+k].qs[ 0] | ((x[4*ib+k].qh[0] << 8) & 0x0700)],
                                                        iq1s_grid_us_neon[x[4*ib+k].qs[ 1] | ((x[4*ib+k].qh[1] << 8) & 0x0700)],
                                                        iq1s_grid_us_neon[x[4*ib+k].qs[ 2] | ((x[4*ib+k].qh[2] << 8) & 0x0700)],
                                                        iq1s_grid_us_neon[x[4*ib+k].qs[ 3] | ((x[4*ib+k].qh[3] << 8) & 0x0700)]});
                qx[2] = vreinterpretq_u8_u32(uint32x4_t{iq1s_grid_us_neon[x[4*ib+k].qs[ 4] | ((x[4*ib+k].qh[0] << 5) & 0x0700)],
                                                        iq1s_grid_us_neon[x[4*ib+k].qs[ 5] | ((x[4*ib+k].qh[1] << 5) & 0x0700)],
                                                        iq1s_grid_us_neon[x[4*ib+k].qs[ 6] | ((x[4*ib+k].qh[2] << 5) & 0x0700)],
                                                        iq1s_grid_us_neon[x[4*ib+k].qs[ 7] | ((x[4*ib+k].qh[3] << 5) & 0x0700)]});
                qx[4] = vreinterpretq_u8_u32(uint32x4_t{iq1s_grid_us_neon[x[4*ib+k].qs[ 8] | ((x[4*ib+k].qh[0] << 2) & 0x0700)],
                                                        iq1s_grid_us_neon[x[4*ib+k].qs[ 9] | ((x[4*ib+k].qh[1] << 2) & 0x0700)],
                                                        iq1s_grid_us_neon[x[4*ib+k].qs[10] | ((x[4*ib+k].qh[2] << 2) & 0x0700)],
                                                        iq1s_grid_us_neon[x[4*ib+k].qs[11] | ((x[4*ib+k].qh[3] << 2) & 0x0700)]});
                qx[6] = vreinterpretq_u8_u32(uint32x4_t{iq1s_grid_us_neon[x[4*ib+k].qs[12] | ((x[4*ib+k].qh[0] >> 1) & 0x0700)],
                                                        iq1s_grid_us_neon[x[4*ib+k].qs[13] | ((x[4*ib+k].qh[1] >> 1) & 0x0700)],
                                                        iq1s_grid_us_neon[x[4*ib+k].qs[14] | ((x[4*ib+k].qh[2] >> 1) & 0x0700)],
                                                        iq1s_grid_us_neon[x[4*ib+k].qs[15] | ((x[4*ib+k].qh[3] >> 1) & 0x0700)]});
                qx[1] = vandq_u8(vshrq_n_u8(qx[0], 4), mask); qx[0] = vandq_u8(qx[0], mask);
                qx[3] = vandq_u8(vshrq_n_u8(qx[2], 4), mask); qx[2] = vandq_u8(qx[2], mask);
                qx[5] = vandq_u8(vshrq_n_u8(qx[4], 4), mask); qx[4] = vandq_u8(qx[4], mask);
                qx[7] = vandq_u8(vshrq_n_u8(qx[6], 4), mask); qx[6] = vandq_u8(qx[6], mask);
                auto scales = vmovl_u16(scales4);
                for (int iy = 0; iy < nrc_y; ++iy) {
                    auto y = vld1q_s8_x2(q8.y[iy][ib].qs + 32*k);
                    auto sumi = vdupq_n_s32(0);
                    sumi = vdotq_laneq_s32(sumi, vreinterpretq_s8_u8(qx[0]), y.val[0], 0);
                    sumi = vdotq_laneq_s32(sumi, vreinterpretq_s8_u8(qx[1]), y.val[0], 1);
                    sumi = vdotq_laneq_s32(sumi, vreinterpretq_s8_u8(qx[2]), y.val[0], 2);
                    sumi = vdotq_laneq_s32(sumi, vreinterpretq_s8_u8(qx[3]), y.val[0], 3);
                    sumi = vdotq_laneq_s32(sumi, vreinterpretq_s8_u8(qx[4]), y.val[1], 0);
                    sumi = vdotq_laneq_s32(sumi, vreinterpretq_s8_u8(qx[5]), y.val[1], 1);
                    sumi = vdotq_laneq_s32(sumi, vreinterpretq_s8_u8(qx[6]), y.val[1], 2);
                    sumi = vdotq_laneq_s32(sumi, vreinterpretq_s8_u8(qx[7]), y.val[1], 3);
                    sumi = vmulq_s32(scales, sumi);
                    acc[iy] = vfmaq_f32(acc[iy], vdupq_n_f32(q8.y[iy][ib].d), vcvtq_f32_s32(sumi));
                    acc[iy] = vfmaq_f32(acc[iy], vdupq_n_f32(d8[4*iy+k]), delta4);
                }
            }
        }
        for (int iy = 0; iy < nrc_y; ++iy) {
            info.store(ix, iy, vmulq_f32(d1, acc[iy]));
            acc[iy] = vdupq_n_f32(0.f);
        }
    }
}

template <int nrc_y>
static void mul_mat_iq1_m_r4_q8_0(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
    GGML_ASSERT(nrc_x%4 == 0);
    Q8<nrc_y, block_q8_K128> q8(info);
    int nb = n / 32;
    GGML_ASSERT(nb%4 == 0);
    int8x16_t qx[8];
    float32x4_t acc[nrc_y] = {};
    int32x4_t isum[nrc_y] = {};
    auto shuffle0 = uint32x4_t{0x00000000, 0x01010101, 0x02020202, 0x03030303};
    auto step = vdupq_n_u8(4);
    auto ms = vdupq_n_u8(0x08);
    auto mask = vdupq_n_s8(0x18);
    for (int ix= 0; ix < nrc_x; ix += 4) {
        auto dptr = (const ggml_half *)((const char *)vx + ix*bx);
        auto d1 = vmulq_f32(vdupq_n_f32(0.125f), vcvt_f32_f16(vld1_f16((const float16_t *)dptr)));
        auto x = (const block_iq1_m_r4 *)(dptr + 4);
        for (int ib = 0; ib < nb/4; ++ib) {
            for (int k = 0; k < 4; ++k) {
                auto scales4 = vdup_n_u32(((const uint32_t *)x[4*ib+k].scales)[0]);
                scales4 = vand_u8(vshl_u32(scales4, int32x2_t{0, -4}), vdup_n_u8(0xf));
                auto scales16 = vmovl_u8(scales4);
                auto scales1 = vmovl_u16(vget_low_u16(scales16));
                auto scales2 = vmovl_u16(vget_high_u16(scales16));
                auto qh = (const uint32_t *)x[4*ib+k].qh;
                auto idxh = uint32x4_t{qh[0], qh[0] >> 4, qh[1], qh[1] >> 4};
                auto signs = vreinterpretq_s8_u8(vorrq_u8(vceqq_u8(vandq_u8(idxh, ms), ms), vdupq_n_u8(1)));
                signs = vaddq_s8(signs, vdupq_n_s8(-8));
                qx[0] = vreinterpretq_s8_u32(uint32x4_t{iq1s_grid_us_neon[x[4*ib+k].qs[ 0] | ((x[4*ib+k].qh[0] << 8) & 0x0700)],
                                                        iq1s_grid_us_neon[x[4*ib+k].qs[ 1] | ((x[4*ib+k].qh[1] << 8) & 0x0700)],
                                                        iq1s_grid_us_neon[x[4*ib+k].qs[ 2] | ((x[4*ib+k].qh[2] << 8) & 0x0700)],
                                                        iq1s_grid_us_neon[x[4*ib+k].qs[ 3] | ((x[4*ib+k].qh[3] << 8) & 0x0700)]});
                qx[2] = vreinterpretq_s8_u32(uint32x4_t{iq1s_grid_us_neon[x[4*ib+k].qs[ 4] | ((x[4*ib+k].qh[0] << 4) & 0x0700)],
                                                        iq1s_grid_us_neon[x[4*ib+k].qs[ 5] | ((x[4*ib+k].qh[1] << 4) & 0x0700)],
                                                        iq1s_grid_us_neon[x[4*ib+k].qs[ 6] | ((x[4*ib+k].qh[2] << 4) & 0x0700)],
                                                        iq1s_grid_us_neon[x[4*ib+k].qs[ 7] | ((x[4*ib+k].qh[3] << 4) & 0x0700)]});
                qx[4] = vreinterpretq_s8_u32(uint32x4_t{iq1s_grid_us_neon[x[4*ib+k].qs[ 8] | ((x[4*ib+k].qh[4] << 8) & 0x0700)],
                                                        iq1s_grid_us_neon[x[4*ib+k].qs[ 9] | ((x[4*ib+k].qh[5] << 8) & 0x0700)],
                                                        iq1s_grid_us_neon[x[4*ib+k].qs[10] | ((x[4*ib+k].qh[6] << 8) & 0x0700)],
                                                        iq1s_grid_us_neon[x[4*ib+k].qs[11] | ((x[4*ib+k].qh[7] << 8) & 0x0700)]});
                qx[6] = vreinterpretq_s8_u32(uint32x4_t{iq1s_grid_us_neon[x[4*ib+k].qs[12] | ((x[4*ib+k].qh[4] << 4) & 0x0700)],
                                                        iq1s_grid_us_neon[x[4*ib+k].qs[13] | ((x[4*ib+k].qh[5] << 4) & 0x0700)],
                                                        iq1s_grid_us_neon[x[4*ib+k].qs[14] | ((x[4*ib+k].qh[6] << 4) & 0x0700)],
                                                        iq1s_grid_us_neon[x[4*ib+k].qs[15] | ((x[4*ib+k].qh[7] << 4) & 0x0700)]});
                auto shuffle = shuffle0;
                for (int j = 0; j < 4; ++j) {
                    auto s = vqtbl1q_s8(signs, shuffle);
                    qx[2*j+1] = vaddq_s8(s, vandq_s8(vshrq_n_s8(qx[2*j+0], 1), mask));
                    qx[2*j+0] = vaddq_s8(s, vandq_s8(vshlq_n_s8(qx[2*j+0], 3), mask));
                    shuffle = vaddq_u8(shuffle, step);
                }
                for (int iy = 0; iy < nrc_y; ++iy) {
                    auto y = vld1q_s8_x2(q8.y[iy][ib].qs + 32*k);
                    auto sumi1 = vdupq_n_s32(0);
                    auto sumi2 = vdupq_n_s32(0);
                    sumi1 = vdotq_laneq_s32(sumi1, vreinterpretq_s8_u8(qx[0]), y.val[0], 0);
                    sumi1 = vdotq_laneq_s32(sumi1, vreinterpretq_s8_u8(qx[1]), y.val[0], 1);
                    sumi1 = vdotq_laneq_s32(sumi1, vreinterpretq_s8_u8(qx[2]), y.val[0], 2);
                    sumi1 = vdotq_laneq_s32(sumi1, vreinterpretq_s8_u8(qx[3]), y.val[0], 3);
                    sumi2 = vdotq_laneq_s32(sumi2, vreinterpretq_s8_u8(qx[4]), y.val[1], 0);
                    sumi2 = vdotq_laneq_s32(sumi2, vreinterpretq_s8_u8(qx[5]), y.val[1], 1);
                    sumi2 = vdotq_laneq_s32(sumi2, vreinterpretq_s8_u8(qx[6]), y.val[1], 2);
                    sumi2 = vdotq_laneq_s32(sumi2, vreinterpretq_s8_u8(qx[7]), y.val[1], 3);
                    isum[iy] = vmlaq_s32(vmlaq_s32(isum[iy], sumi1, scales1), sumi2, scales2);
                }
            }
            for (int iy = 0; iy < nrc_y; ++iy) {
                acc[iy] = vfmaq_f32(acc[iy], vdupq_n_f32(q8.y[iy][ib].d), vcvtq_f32_s32(isum[iy]));
                isum[iy] = vdupq_n_s32(0);
            }
        }
        for (int iy = 0; iy < nrc_y; ++iy) {
            info.store(ix, iy, vmulq_f32(d1, acc[iy]));
            acc[iy] = vdupq_n_f32(0.f);
        }
    }
}

void mul_mat_iq1_s_r4_q8_1_1(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
    GGML_ASSERT(nrc_x%4 == 0);
    Q8<1, block_q8_K128> q8(info);
    int nb = n / 32;
    GGML_ASSERT(nb%4 == 0);
    int8x16_t qx[8];
    float32x4_t acc[2] = {};
    int32x4_t isum[8];
    auto ms = vdup_n_u16(0x8000);
    for (int ix= 0; ix < nrc_x; ix += 4) {
        auto dptr = (const ggml_half *)((const char *)vx + ix*bx);
        auto d1 = vcvt_f32_f16(vld1_f16((const float16_t *)dptr));
        auto x = (const block_iq1_s_r4 *)(dptr + 4);
        for (int ib = 0; ib < nb/4; ++ib) {
            auto scale_yd = vdupq_n_f32(q8.y[0][ib].d);
            auto scale_ym = vmulq_f32(scale_yd, vcvtq_f32_s32(vmovl_s16(vld1_s16(q8.y[0][ib].bsums))));
            for (int k = 0; k < 4; ++k) {
                auto sas = vld1_u16(x[4*ib+k].qh);
                auto scales4 = vand_u16(vshr_n_u16(sas, 12), vdup_n_u16(7));
                scales4 = vorr_u16(vshl_n_u16(scales4, 1), vdup_n_u16(1));
                auto signs = vreinterpret_s16_u16(vorr_u16(vceq_u16(vand_u16(sas, ms), ms), vdup_n_u16(1)));
                isum[k+4] = vmull_s16(signs, scales4);
                qx[0] = vreinterpretq_s8_u64(uint64x2_t{iq1s_grid[x[4*ib+k].qs[ 0] | ((x[4*ib+k].qh[0] << 8) & 0x0700)],
                                                        iq1s_grid[x[4*ib+k].qs[ 4] | ((x[4*ib+k].qh[0] << 5) & 0x0700)]});
                qx[1] = vreinterpretq_s8_u64(uint64x2_t{iq1s_grid[x[4*ib+k].qs[ 8] | ((x[4*ib+k].qh[0] << 2) & 0x0700)],
                                                        iq1s_grid[x[4*ib+k].qs[12] | ((x[4*ib+k].qh[0] >> 1) & 0x0700)]});
                qx[2] = vreinterpretq_s8_u64(uint64x2_t{iq1s_grid[x[4*ib+k].qs[ 1] | ((x[4*ib+k].qh[1] << 8) & 0x0700)],
                                                        iq1s_grid[x[4*ib+k].qs[ 5] | ((x[4*ib+k].qh[1] << 5) & 0x0700)]});
                qx[3] = vreinterpretq_s8_u64(uint64x2_t{iq1s_grid[x[4*ib+k].qs[ 9] | ((x[4*ib+k].qh[1] << 2) & 0x0700)],
                                                        iq1s_grid[x[4*ib+k].qs[13] | ((x[4*ib+k].qh[1] >> 1) & 0x0700)]});
                qx[4] = vreinterpretq_s8_u64(uint64x2_t{iq1s_grid[x[4*ib+k].qs[ 2] | ((x[4*ib+k].qh[2] << 8) & 0x0700)],
                                                        iq1s_grid[x[4*ib+k].qs[ 6] | ((x[4*ib+k].qh[2] << 5) & 0x0700)]});
                qx[5] = vreinterpretq_s8_u64(uint64x2_t{iq1s_grid[x[4*ib+k].qs[10] | ((x[4*ib+k].qh[2] << 2) & 0x0700)],
                                                        iq1s_grid[x[4*ib+k].qs[14] | ((x[4*ib+k].qh[2] >> 1) & 0x0700)]});
                qx[6] = vreinterpretq_s8_u64(uint64x2_t{iq1s_grid[x[4*ib+k].qs[ 3] | ((x[4*ib+k].qh[3] << 8) & 0x0700)],
                                                        iq1s_grid[x[4*ib+k].qs[ 7] | ((x[4*ib+k].qh[3] << 5) & 0x0700)]});
                qx[7] = vreinterpretq_s8_u64(uint64x2_t{iq1s_grid[x[4*ib+k].qs[11] | ((x[4*ib+k].qh[3] << 2) & 0x0700)],
                                                        iq1s_grid[x[4*ib+k].qs[15] | ((x[4*ib+k].qh[3] >> 1) & 0x0700)]});
                auto scales = vmovl_u16(scales4);
                auto y = vld1q_s8_x2(q8.y[0][ib].qs + 32*k);
                auto sumi1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), qx[0], y.val[0]), qx[1], y.val[1]);
                auto sumi2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), qx[2], y.val[0]), qx[3], y.val[1]);
                auto sumi3 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), qx[4], y.val[0]), qx[5], y.val[1]);
                auto sumi4 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), qx[6], y.val[0]), qx[7], y.val[1]);
                sumi1 = vpaddq_s32(sumi1, sumi2);
                sumi3 = vpaddq_s32(sumi3, sumi4);
                isum[k] = vmulq_s32(scales, vpaddq_s32(sumi1, sumi3));
            }
            acc[0] = vfmaq_laneq_f32(acc[0], vcvtq_f32_s32(isum[0]), scale_yd, 0);
            acc[0] = vfmaq_laneq_f32(acc[0], vcvtq_f32_s32(isum[1]), scale_yd, 1);
            acc[0] = vfmaq_laneq_f32(acc[0], vcvtq_f32_s32(isum[2]), scale_yd, 2);
            acc[0] = vfmaq_laneq_f32(acc[0], vcvtq_f32_s32(isum[3]), scale_yd, 3);
            acc[1] = vfmaq_laneq_f32(acc[1], vcvtq_f32_s32(isum[4]), scale_ym, 0);
            acc[1] = vfmaq_laneq_f32(acc[1], vcvtq_f32_s32(isum[5]), scale_ym, 1);
            acc[1] = vfmaq_laneq_f32(acc[1], vcvtq_f32_s32(isum[6]), scale_ym, 2);
            acc[1] = vfmaq_laneq_f32(acc[1], vcvtq_f32_s32(isum[7]), scale_ym, 3);
        }
        info.store(ix, 0, vmulq_f32(d1, vfmaq_f32(acc[0], acc[1], vdupq_n_f32(IQ1S_DELTA))));
        acc[0] = acc[1] = vdupq_n_f32(0.f);
    }
}

template <int nrc_y>
void mul_mat_iq1_s_q8_K(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
    GGML_ASSERT(n%QK_K == 0);
    Q8<nrc_y, block_q8_K> q8(info);
    int8x16_t qx[16];
    int32x4_t scales[2];
    int16x4_t deltas[2];
    float32x4_t acc[nrc_y] = {};
    auto delta_mask = vdupq_n_u16(0x8000);
    for (int ix = 0; ix < nrc_x; ++ix) {
        auto iq1s = (const block_iq1_s *)((const char *)vx + ix*bx);
        for (int ibl = 0; ibl < n/QK_K; ++ibl) {
            float d = GGML_FP16_TO_FP32(iq1s[ibl].d);
            auto qhb = vld1q_u16(iq1s[ibl].qh);
            auto scales128 = vandq_u16(vshrq_n_u16(qhb, 12), vdupq_n_u16(7));
            scales128 = vaddq_u16(vshlq_n_u16(scales128, 1), vdupq_n_u16(1));
            auto mask = vceqq_u16(vandq_u16(qhb, delta_mask), delta_mask);
            // Note: we explicitely assume IQ1S_DELTA = 0.125
            auto deltas128 = vsubq_s16(vbicq_s16(scales128, mask), vandq_s16(scales128, mask));
            //auto deltas128 = vorrq_s16(vandq_s16(vdupq_n_s16(-1), mask), vbicq_s16(vdupq_n_s16(1), mask));
            //deltas128 = vmulq_s16(scales128, deltas128);
            scales128 = vshlq_n_u16(scales128, 3);
            auto qs = iq1s[ibl].qs;
            auto qh = iq1s[ibl].qh;
            for (int ib64 = 0; ib64 < QK_K/64; ++ib64) {
                qx[4*ib64+0] = vreinterpretq_s8_u64(uint64x2_t{iq1s_grid[qs[0] | ((qh[2*ib64+0] << 8) & 0x700)], iq1s_grid[qs[1] | ((qh[2*ib64+0] << 5) & 0x700)]});
                qx[4*ib64+1] = vreinterpretq_s8_u64(uint64x2_t{iq1s_grid[qs[2] | ((qh[2*ib64+0] << 2) & 0x700)], iq1s_grid[qs[3] | ((qh[2*ib64+0] >> 1) & 0x700)]});
                qx[4*ib64+2] = vreinterpretq_s8_u64(uint64x2_t{iq1s_grid[qs[4] | ((qh[2*ib64+1] << 8) & 0x700)], iq1s_grid[qs[5] | ((qh[2*ib64+1] << 5) & 0x700)]});
                qx[4*ib64+3] = vreinterpretq_s8_u64(uint64x2_t{iq1s_grid[qs[6] | ((qh[2*ib64+1] << 2) & 0x700)], iq1s_grid[qs[7] | ((qh[2*ib64+1] >> 1) & 0x700)]});
                qs += 8;
            }
            scales[0] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16 (scales128)));
            scales[1] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales128)));
            deltas[0] = vget_low_s16 (deltas128);
            deltas[1] = vget_high_s16(deltas128);
            for (int iy = 0; iy < nrc_y; ++iy) {
                auto bsums = q8.load_bsums8(iy, ibl);
                auto sumi = vdupq_n_s32(0);
                sumi = vmlal_s16(sumi, deltas[0], vget_low_s16 (bsums));
                sumi = vmlal_s16(sumi, deltas[1], vget_high_s16(bsums));
                for (int k = 0; k < QK_K/128; ++k) {
                    auto qy = q8.load_quants_64(iy, ibl, 2*k+0);
                    auto dot1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), qx[8*k+0], qy.val[0]), qx[8*k+1], qy.val[1]);
                    auto dot2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), qx[8*k+2], qy.val[2]), qx[8*k+3], qy.val[3]);
                    auto dot12 = vpaddq_s32(dot1, dot2);
                    qy = q8.load_quants_64(iy, ibl, 2*k+1);
                    auto dot3 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), qx[8*k+4], qy.val[0]), qx[8*k+5], qy.val[1]);
                    auto dot4 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), qx[8*k+6], qy.val[2]), qx[8*k+7], qy.val[3]);
                    auto dot34 = vpaddq_s32(dot3, dot4);
                    auto dot = vpaddq_s32(dot12, dot34);
                    sumi = vmlaq_s32(sumi, dot, scales[k]);
                }
                acc[iy] = vfmaq_f32(acc[iy], vdupq_n_f32(d*q8.scale(iy, ibl)), vcvtq_f32_s32(sumi));
            }
        }
        for (int iy = 0; iy < nrc_y; ++iy) {
            info.store(ix, iy, 0.125f*vaddvq_f32(acc[iy]));
            acc[iy] = vdupq_n_f32(0);
        }
    }
}

template <int nrc_y>
void mul_mat_iq1_m_q8_K(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
    GGML_ASSERT(n%QK_K == 0);
    Q8<nrc_y, block_q8_K> q8(info);
    int8x16x2_t qx[8];
    int32x4x4_t scales;
    float32x4_t acc[nrc_y] = {};
    uint8x16x2_t scale_shuffle = {vreinterpretq_u8_u64(uint64x2_t{0x0100010001000100, 0x0302030203020302}),
                                  vreinterpretq_u8_u64(uint64x2_t{0x0504050405040504, 0x0706070607060706})};
    uint64x2x2_t delta_mask = {uint64x2_t{0x0008, 0x0080}, uint64x2_t{0x0800, 0x8000}};
    iq1m_scale_t block_scale;
    for (int ix = 0; ix < nrc_x; ++ix) {
        auto iq1m = (const block_iq1_m *)((const char *)vx + ix*bx);
        for (int ibl = 0; ibl < n/QK_K; ++ibl) {
            const uint16_t * sc = (const uint16_t *)iq1m[ibl].scales; // 4 x uint16_t, each containing 4 scales
            block_scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000);
            float d = GGML_FP16_TO_FP32(block_scale.f16);
            auto qs = iq1m[ibl].qs;
            auto qh = iq1m[ibl].qh;
            auto aux8 = vld1_u8(iq1m[ibl].scales);
            auto aux16 = vcombine_u8(aux8, aux8);
            uint16x8x2_t sc16 = { vreinterpretq_u16_u8(vqtbl1q_u8(aux16, scale_shuffle.val[0])), vreinterpretq_u16_u8(vqtbl1q_u8(aux16, scale_shuffle.val[1])) };
            sc16.val[0] = vmulq_u16(vandq_u16(sc16.val[0], vdupq_n_u64(0x0e0001c000380007)), vdupq_n_u64(0x0001000800400200));
            sc16.val[1] = vmulq_u16(vandq_u16(sc16.val[1], vdupq_n_u64(0x0e0001c000380007)), vdupq_n_u64(0x0001000800400200));
            sc16.val[0] = vaddq_u16(vshrq_n_u16(sc16.val[0], 8), vdupq_n_u16(1));
            sc16.val[1] = vaddq_u16(vshrq_n_u16(sc16.val[1], 8), vdupq_n_u16(1));
            scales.val[0] = vmovl_s16(vget_low_s16 (sc16.val[0]));
            scales.val[1] = vmovl_s16(vget_high_s16(sc16.val[0]));
            scales.val[2] = vmovl_s16(vget_low_s16 (sc16.val[1]));
            scales.val[3] = vmovl_s16(vget_high_s16(sc16.val[1]));
            for (int ib64 = 0; ib64 < QK_K/64; ++ib64) {
                qx[2*ib64+0] = {vreinterpretq_s8_u64(uint64x2_t{iq1s_grid_us[qs[0] | (((uint16_t)qh[0] << 8) & 0x700)],
                                                                iq1s_grid_us[qs[1] | (((uint16_t)qh[0] << 4) & 0x700)]}),
                                vreinterpretq_s8_u64(uint64x2_t{iq1s_grid_us[qs[2] | (((uint16_t)qh[1] << 8) & 0x700)],
                                                                iq1s_grid_us[qs[3] | (((uint16_t)qh[1] << 4) & 0x700)]})};
                qx[2*ib64+1] = {vreinterpretq_s8_u64(uint64x2_t{iq1s_grid_us[qs[4] | (((uint16_t)qh[2] << 8) & 0x700)],
                                                                iq1s_grid_us[qs[5] | (((uint16_t)qh[2] << 4) & 0x700)]}),
                                vreinterpretq_s8_u64(uint64x2_t{iq1s_grid_us[qs[6] | (((uint16_t)qh[3] << 8) & 0x700)],
                                                                iq1s_grid_us[qs[7] | (((uint16_t)qh[3] << 4) & 0x700)]})};
                auto qh16 = (const uint16_t *)qh;
                auto h1 = vdupq_n_u64(qh16[0]);
                auto h2 = vdupq_n_u64(qh16[1]);
                auto delta1 = vsubq_s8(vdupq_n_s8(8), vorrq_s8(vreinterpretq_s8_u64(vceqq_u64(vandq_u64(h1, delta_mask.val[0]), delta_mask.val[0])), vdupq_n_s8(1)));
                auto delta2 = vsubq_s8(vdupq_n_s8(8), vorrq_s8(vreinterpretq_s8_u64(vceqq_u64(vandq_u64(h1, delta_mask.val[1]), delta_mask.val[1])), vdupq_n_s8(1)));
                qx[2*ib64+0].val[0] = vsubq_s8(vshlq_n_s8(qx[2*ib64+0].val[0], 3), delta1);
                qx[2*ib64+0].val[1] = vsubq_s8(vshlq_n_s8(qx[2*ib64+0].val[1], 3), delta2);
                delta1 = vsubq_s8(vdupq_n_s8(8), vorrq_s8(vreinterpretq_s8_u64(vceqq_u64(vandq_u64(h2, delta_mask.val[0]), delta_mask.val[0])), vdupq_n_s8(1)));
                delta2 = vsubq_s8(vdupq_n_s8(8), vorrq_s8(vreinterpretq_s8_u64(vceqq_u64(vandq_u64(h2, delta_mask.val[1]), delta_mask.val[1])), vdupq_n_s8(1)));
                qx[2*ib64+1].val[0] = vsubq_s8(vshlq_n_s8(qx[2*ib64+1].val[0], 3), delta1);
                qx[2*ib64+1].val[1] = vsubq_s8(vshlq_n_s8(qx[2*ib64+1].val[1], 3), delta2);
                qs += 8;
                qh += 4;
            }
            for (int iy = 0; iy < nrc_y; ++iy) {
                auto sumi = vdupq_n_s32(0);
                for (int j = 0; j < 4; ++j) {
                    auto y1 = q8.load_quants(iy, ibl, 2*j+0);
                    auto dot1 = ggml_vdotq_s32(vdupq_n_s32(0), qx[2*j+0].val[0], y1.val[0]);
                    auto dot2 = ggml_vdotq_s32(vdupq_n_s32(0), qx[2*j+0].val[1], y1.val[1]);
                    auto y2 = q8.load_quants(iy, ibl, 2*j+1);
                    auto dot3 = ggml_vdotq_s32(vdupq_n_s32(0), qx[2*j+1].val[0], y2.val[0]);
                    auto dot4 = ggml_vdotq_s32(vdupq_n_s32(0), qx[2*j+1].val[1], y2.val[1]);
                    auto dot  = vpaddq_s32(vpaddq_s32(dot1, dot2), vpaddq_s32(dot3, dot4));
                    sumi = vmlaq_s32(sumi, dot, scales.val[j]);
                }
                acc[iy] = vfmaq_f32(acc[iy], vdupq_n_f32(d*q8.scale(iy, ibl)), vcvtq_f32_s32(sumi));
            }
        }
        for (int iy = 0; iy < nrc_y; ++iy) {
            info.store(ix, iy, 0.125f*vaddvq_f32(acc[iy]));
            acc[iy] = vdupq_n_f32(0.f);
        }
    }
}

inline float convert_to_q8_k_r8(float d0, const int8x16x2_t * qx, const int8_t * scales, uint32_t * block, uint32_t * q8_k) {
    auto max_i16 = vdupq_n_u16(0);
    int16x8x4_t q[8];
    for (int ib32 = 0; ib32 < 8; ++ib32) {
        auto scale_l = vdup_n_s8(scales[2*ib32+0]);
        auto scale_h = vdup_n_s8(scales[2*ib32+1]);
        q[ib32].val[0] = vmull_s8(scale_l, vget_low_s8 (qx[ib32].val[0]));
        q[ib32].val[1] = vmull_s8(scale_l, vget_high_s8(qx[ib32].val[0]));
        q[ib32].val[2] = vmull_s8(scale_h, vget_low_s8 (qx[ib32].val[1]));
        q[ib32].val[3] = vmull_s8(scale_h, vget_high_s8(qx[ib32].val[1]));
        max_i16 = vmaxq_u16(max_i16, vmaxq_u16(vabsq_s16(q[ib32].val[0]), vabsq_s16(q[ib32].val[1])));
        max_i16 = vmaxq_u16(max_i16, vmaxq_u16(vabsq_s16(q[ib32].val[2]), vabsq_s16(q[ib32].val[3])));
    }
    uint16_t imax = vmaxvq_u16(max_i16);
    if (!imax) {
        for (int ib32 = 0; ib32 < 8; ++ib32) for (int l = 0; l < 8; ++l) q8_k[64*ib32 + 8*l] = 0;
        return 0.f;
    }
    float dnew = float(imax) * d0;
    //auto max_u32 = vmaxq_u32(vmovl_u16(vget_low_u16(max_i16)), vmovl_u16(vget_high_u16(max_i16)));
    //auto max_f32 = vcvtq_f32_u32(max_u32);
    //auto dnew = vmaxvq_f32(max_f32) * d0;
    bool needs_scaling = true;
    if (dnew <= 1.f) {
        dnew = 1.f; needs_scaling = false;
    }
    auto scale = vdupq_n_f32(1/dnew);
    for (int ib32 = 0; ib32 < 8; ++ib32) {
        if (needs_scaling) {
            for (int l = 0; l < 4; ++l) {
                auto i1 = vcvtnq_s32_f32(vmulq_f32(scale, vcvtq_f32_s32(vmovl_s16(vget_low_s16 (q[ib32].val[l])))));
                auto i2 = vcvtnq_s32_f32(vmulq_f32(scale, vcvtq_f32_s32(vmovl_s16(vget_high_s16(q[ib32].val[l])))));
                q[ib32].val[l] = vcombine_s16(vmovn_s32(i1), vmovn_s32(i2));
            }
        }
        for (int l = 0; l < 2; ++l) {
            auto s8 = vcombine_s8(vmovn_s16(q[ib32].val[2*l+0]), vmovn_s16(q[ib32].val[2*l+1]));
            vst1q_s8((int8_t *)block + 16*l, s8);
        }
        auto qb = q8_k + 64*ib32;
        for (int l = 0; l < 8; ++l) {
            qb[8*l] = block[l];
        }
    }
    return dnew;
}

void iqk_convert_iq1_s_q8_k_r8(int n, const void * vx, size_t bx, void * vy, int nrc_x) {
    GGML_ASSERT(n%QK_K == 0);
    GGML_ASSERT(nrc_x%8 == 0);

    int nb = n/QK_K;

    const block_iq1_s * x8[8];

    block_q8_k_r8 * y = (block_q8_k_r8 *)vy;

    int8_t ls[16];

    uint32_t block[8];

    int8x16x2_t qx[8];

    for (int ix = 0; ix < nrc_x; ix += 8) {
        for (int k = 0; k < 8; ++k) x8[k] = (const block_iq1_s *)((const char *)vx + (ix + k)*bx);
        for (int i = 0; i < nb; ++i) {
            for (int k = 0; k < 8; ++k) {
                float d = 0.125f * GGML_FP16_TO_FP32(x8[k][i].d);
                auto qs = x8[k][i].qs;
                auto qh = x8[k][i].qh;
                int8x16x2_t value;
                for (int ib32 = 0; ib32 < 8; ++ib32) {
                    ls[2*ib32 + 0] = ls[2*ib32 + 1] = (2*((qh[ib32] >> 12) & 7) + 1);
                    value.val[0] = vreinterpretq_s8_u64(uint64x2_t{iq1s_grid[qs[0] | ((qh[ib32] << 8) & 0x700)], iq1s_grid[qs[1] | ((qh[ib32] << 5) & 0x700)]});
                    value.val[1] = vreinterpretq_s8_u64(uint64x2_t{iq1s_grid[qs[2] | ((qh[ib32] << 2) & 0x700)], iq1s_grid[qs[3] | ((qh[ib32] >> 1) & 0x700)]});
                    value.val[0] = vshlq_n_s8(vaddq_s8(value.val[0], vdupq_n_s8(1)), 3);
                    value.val[1] = vshlq_n_s8(vaddq_s8(value.val[1], vdupq_n_s8(1)), 3);
                    auto delta = vdupq_n_s8(qh[ib32] & 0x8000 ? -9 : -7);
                    qx[ib32].val[0] = vaddq_s8(value.val[0], delta);
                    qx[ib32].val[1] = vaddq_s8(value.val[1], delta);
                    qs += 4;
                }
                float dnew = convert_to_q8_k_r8(1.f/126, qx, ls, block, (uint32_t *)y[i].qs + k);
                y[i].d[k] = GGML_FP32_TO_FP16(d*dnew);
            }
        }
        y += nb;
    }
}

void iqk_convert_iq1_m_q8_k_r8(int n, const void * vx, size_t bx, void * vy, int nrc_x) {
    GGML_ASSERT(n%QK_K == 0);
    GGML_ASSERT(nrc_x%8 == 0);

    int nb = n/QK_K;

    const block_iq1_m * x8[8];

    block_q8_k_r8 * y = (block_q8_k_r8 *)vy;

    int8_t ls[16];

    uint32_t block[8];

    int8x16x2_t qx[8];

    uint32x4x2_t mask = {uint32x4_t{0x00000008, 0x00000008, 0x00000080, 0x00000080}, uint32x4_t {0x00080000, 0x00080000, 0x00800000, 0x00800000}};

    for (int ix = 0; ix < nrc_x; ix += 8) {
        for (int k = 0; k < 8; ++k) x8[k] = (const block_iq1_m *)((const char *)vx + (ix + k)*bx);
        for (int i = 0; i < nb; ++i) {
            for (int k = 0; k < 8; ++k) {
                const uint16_t * sc = (const uint16_t *)x8[k][i].scales;
                iq1m_scale_t scale;
                scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000);
                float d = 0.125f * GGML_FP16_TO_FP32(scale.f16);
                auto qs = x8[k][i].qs;
                auto qh = x8[k][i].qh;
                int8x16x2_t value;
                for (int ib32 = 0; ib32 < 8; ++ib32) {
                    ls[2*ib32 + 0] = (2*((sc[ib32/2] >> (6*(ib32%2)+0)) & 0x7) + 1);
                    ls[2*ib32 + 1] = (2*((sc[ib32/2] >> (6*(ib32%2)+3)) & 0x7) + 1);
                    //value = _mm256_set_epi64x(iq1s_grid[qs[3] | ((qh[1] << 4) & 0x700)], iq1s_grid[qs[2] | ((qh[1] << 8) & 0x700)],
                    //                          iq1s_grid[qs[1] | ((qh[0] << 4) & 0x700)], iq1s_grid[qs[0] | ((qh[0] << 8) & 0x700)]);
                    value.val[0] = vreinterpretq_s8_u64(uint64x2_t{iq1s_grid[qs[0] | ((qh[0] << 8) & 0x700)], iq1s_grid[qs[1] | ((qh[0] << 4) & 0x700)]});
                    value.val[1] = vreinterpretq_s8_u64(uint64x2_t{iq1s_grid[qs[2] | ((qh[1] << 8) & 0x700)], iq1s_grid[qs[3] | ((qh[1] << 4) & 0x700)]});
                    value.val[0] = vshlq_n_s8(vaddq_s8(value.val[0], vdupq_n_s8(1)), 3);
                    value.val[1] = vshlq_n_s8(vaddq_s8(value.val[1], vdupq_n_s8(1)), 3);

                    auto aux = vdupq_n_u32(qh[0] | qh[1] << 16);
                    uint32x4x2_t delta_mask{ vceqq_u32(vandq_u32(aux, mask.val[0]), mask.val[0]), vceqq_u32(vandq_u32(aux, mask.val[1]), mask.val[1]) };
                    uint8x16x2_t delta{ vaddq_s8(vdupq_n_s8(7), vandq_s8(vdupq_n_s8(2), vreinterpretq_s8_u32(delta_mask.val[0]))),
                                        vaddq_s8(vdupq_n_s8(7), vandq_s8(vdupq_n_s8(2), vreinterpretq_s8_u32(delta_mask.val[1]))) };
                    qx[ib32].val[0] = vsubq_s8(value.val[0], delta.val[0]);
                    qx[ib32].val[1] = vsubq_s8(value.val[1], delta.val[1]);

                    qs += 4;
                    qh += 2;
                }
                float dnew = convert_to_q8_k_r8(1.f/126, qx, ls, block, (uint32_t *)y[i].qs + k);
                y[i].d[k] = GGML_FP32_TO_FP16(d*dnew);
            }
        }
        y += nb;
    }
}

}

bool iqk_set_kernels_1bit(int ne00, int typeA, int typeB, std::array<mul_mat_t, IQK_MAX_NY>& funcs, mul_mat_t& func16) {

    auto expected_Btype = GGML_TYPE_Q8_K128;

    func16 = nullptr;

    switch (typeA) {
        case GGML_TYPE_IQ1_BN:
            if (ne00 % QK_IQ1BN != 0) return false;
            IQK_SET_MUL_MAT_FUNCTIONS(mul_mat_iq1bn_q8_K64, funcs);
            expected_Btype = GGML_TYPE_Q8_K64;
            break;
        case GGML_TYPE_IQ2_BN:
            if (ne00 % QK_IQ1BN != 0) return false;
            IQK_SET_MUL_MAT_FUNCTIONS(mul_mat_iq2bn_q8_K64, funcs);
            expected_Btype = GGML_TYPE_Q8_K64;
            break;
        case GGML_TYPE_IQ2_BN_R4:
            if (ne00 % QK_IQ1BN != 0) return false;
            funcs[0] = mul_mat_iq2_bn_r4_q8_k16<1>;
            funcs[1] = mul_mat_iq2_bn_r4_q8_k16<2>;
            funcs[2] = mul_mat_iq2_bn_r4_q8_k16<3>;
            funcs[3] = mul_mat_iq2_bn_r4_q8_k16<4>;
            funcs[4] = mul_mat_iq2_bn_r4_q8_k16<5>;
            //funcs[5] = mul_mat_iq2_bn_r4_q8_k16<6>;
            //funcs[6] = mul_mat_iq2_bn_r4_q8_k16<7>;
            //funcs[7] = mul_mat_iq2_bn_r4_q8_k16<8>;
            expected_Btype = GGML_TYPE_Q8_K16;
            break;
        case GGML_TYPE_IQ1_S:
            if (ne00%QK_K != 0) return false;
            IQK_SET_MUL_MAT_FUNCTIONS(mul_mat_iq1_s_q8_K, funcs);
            func16 = mul_mat_iq1_s_q8_K<16>;
            expected_Btype = GGML_TYPE_Q8_K;
            break;
        case GGML_TYPE_IQ1_S_R4:
            if (ne00%128 != 0) return false;
            IQK_SET_MUL_MAT_FUNCTIONS(mul_mat_iq1_s_r4_q8_1, funcs);
            funcs[0] = mul_mat_iq1_s_r4_q8_1_1;
            func16 = mul_mat_iq1_s_r4_q8_1<16>;
            expected_Btype = GGML_TYPE_Q8_K128;
            break;
        case GGML_TYPE_IQ1_M:
            if (ne00%QK_K != 0) return false;
            IQK_SET_MUL_MAT_FUNCTIONS(mul_mat_iq1_m_q8_K, funcs);
            func16 = mul_mat_iq1_m_q8_K<16>;
            expected_Btype = GGML_TYPE_Q8_K;
            break;
        case GGML_TYPE_IQ1_M_R4:
            if (ne00%128 != 0) return false;
            IQK_SET_MUL_MAT_FUNCTIONS(mul_mat_iq1_m_r4_q8_0, funcs);
            func16 = mul_mat_iq1_m_r4_q8_0<16>;
            expected_Btype = GGML_TYPE_Q8_K128;
            break;
        default:
            return false;
    }

    return ggml_type(typeB) == expected_Btype;

}

bool iqk_convert_1bit_q80_r8(int type, int n, const void * vx, size_t bx, void * vy, int nrc_x) {
    if (n%QK_K != 0 || nrc_x%8 != 0) return false;
    switch (ggml_type(type)) {
        case GGML_TYPE_IQ1_S: iqk_convert_iq1_s_q8_k_r8(n, vx, bx, vy, nrc_x); break;
        case GGML_TYPE_IQ1_M: iqk_convert_iq1_m_q8_k_r8(n, vx, bx, vy, nrc_x); break;
        default: return false;
    }
    return true;
}

#endif

#endif
