#include "hex.h"
#include "../arch/asm.h"

OPEN_JLIB_NS


uwl HEX::encode (void* dst_buf, const void* src_buf, uwl len, bool lower_case) noexcept {
  auto tr = lower_case ? "0123456789abcdef" : "0123456789ABCDEF";
  auto src = (const u8*)src_buf;
  auto dst = (c8*)dst_buf;
#if __AVX2__
  auto tbl = AVX2_VBROADCAST(I128, u8v32, 0, tr);
  auto msk = AVX2_VPBROADCAST(B, u8v32, 0xF);
  for (auto end = src + (len & ~31); src < end; src += 32, dst += 64) {
    u8v32 v0, v1, v2;
    asm volatile (R"(
      VLDDQU (%[src]), %[v0]
      VPSRLW $4, %[v0], %[v1]
      VPAND %[msk], %[v0], %[v0]
      VPAND %[msk], %[v1], %[v1]
      VPSHUFB %[v0], %[tbl], %[v0]
      VPSHUFB %[v1], %[tbl], %[v1]
      VPUNPCKHBW %[v0], %[v1], %[v2]
      VPUNPCKLBW %[v0], %[v1], %[v1]
      VPERM2I128 $0x20, %[v2], %[v1], %[v0]
      VMOVUPS %[v0], (%[dst])
      VPERM2I128 $0x31, %[v2], %[v1], %[v0]
      VMOVUPS %[v0], 32(%[dst])
      )"
      :[v0]"=&x"(v0), [v1]"=&x"(v1), [v2]"=&x"(v2)
      :[dst]"r"(dst), [src]"r"(src), [tbl]"x"(tbl), [msk]"x"(msk)
      :"memory"
    );
  }
  for (len &= 31; len >= 8; len -= 8, src += 8, dst += 16) {
    u8v16 v0, v1;
    asm volatile (R"(
      VMOVQ (%[src]), %[v0]
      VPSRLW $4, %[v0], %[v1]
      VPAND %x[msk], %[v0], %[v0]
      VPAND %x[msk], %[v1], %[v1]
      VPUNPCKLBW %[v0], %[v1], %[v0]
      VPSHUFB %[v0], %x[tbl], %[v0]
      VMOVUPS %[v0], (%[dst])
      )"
      :[v0]"=&x"(v0), [v1]"=&x"(v1)
      :[dst]"r"(dst), [src]"r"(src), [tbl]"x"(tbl), [msk]"x"(msk)
      :"memory"
    );
  }
#elif __aarch64__
  auto tbl = A64_LD1(u8v16, 0, tr);
  auto msk = A64_DUP(u8v16, 16B, w, 0xF);
  for (auto end = src + (len & ~15); src < end;) {
    u8v16 v0, v1, v2;
    asm volatile (R"(
      LD1 { %[v0].16B }, [%[src]], #16
      USHR %[v1].16B, %[v0].16B, #4
      AND %[v0].16B, %[v0].16B, %[msk].16B
      TBL %[v0].16B, { %[tbl].16B }, %[v0].16B
      TBL %[v1].16B, { %[tbl].16B }, %[v1].16B
      ZIP1 %[v2].16B, %[v1].16B, %[v0].16B
      ST1 { %[v2].16B }, [%[dst]], #16
      ZIP2 %[v2].16B, %[v1].16B, %[v0].16B
      ST1 { %[v2].16B }, [%[dst]], #16
      )"
      :[v0]"=&w"(v0), [v1]"=&w"(v1), [v2]"=&w"(v2), [dst]"+r"(dst), [src]"+r"(src)
      :[tbl]"w"(tbl), [msk]"w"(msk)
      :"memory"
    );
  }
  for (len &= 15; len >= 4; len -= 4) {
    u8v16 v0, v1;
    asm volatile (R"(
      LD1 { %[v0].S }[0], [%[src]], #4
      USHR %[v1].8B, %[v0].8B, #4
      AND %[v0].8B, %[v0].8B, %[msk].8B
      TBL %[v0].8B, { %[tbl].16B }, %[v0].8B
      TBL %[v1].8B, { %[tbl].16B }, %[v1].8B
      ZIP1 %[v0].8B, %[v1].8B, %[v0].8B
      ST1 { %[v0].D }[0], [%[dst]], #8
      )"
      :[v0]"=&w"(v0), [v1]"=&w"(v1), [dst]"+r"(dst), [src]"+r"(src)
      :[tbl]"w"(tbl), [msk]"w"(msk)
      :"memory"
    );
  }
#endif
  for (auto end = src + len; src < end; ++src) {
    *dst++ = tr[*src >> 4];
    *dst++ = tr[*src & 0xF];
  }
  return dst - (c8*)dst_buf;
}


uwl HEX::decode (void* dst_buf, const void* src_buf, uwl len) noexcept {
  if (len & 1)
    return 0;
  auto src = (const c8*)src_buf;
  auto dst = (u8*)dst_buf;
#if __AVX2__
  auto a = AVX2_VPBROADCAST(B, u8v32, 'a' - 1);
  auto a_ = AVX2_VPBROADCAST(B, u8v32, 'a' - 10);
  auto A = AVX2_VPBROADCAST(B, u8v32, 'A' - 1);
  auto A_ = AVX2_VPBROADCAST(B, u8v32, 'A' - 10);
  auto d = AVX2_VPBROADCAST(B, u8v32, '0' - 1);
  auto d_ = AVX2_VPBROADCAST(B, u8v32, '0' - 0);
  for (auto end = src + (len & ~31); src < end; src += 32, dst += 16) {
    u8v32 v0, v1, v2;
    asm volatile (R"(
      VLDDQU (%[src]), %[v0]
      VPCMPGTB %[a], %[v0], %[v1]
      VPSUBB %[a_], %[v0], %[v2]
      VPBLENDVB %[v1], %[v2], %[v0], %[v0]
      VPCMPGTB %[A], %[v0], %[v1]
      VPSUBB %[A_], %[v0], %[v2]
      VPBLENDVB %[v1], %[v2], %[v0], %[v0]
      VPCMPGTB %[d], %[v0], %[v1]
      VPSUBB %[d_], %[v0], %[v2]
      VPBLENDVB %[v1], %[v2], %[v0], %[v0]
      VPSLLW $8, %[v0], %[v1]
      VPSRLW $4, %[v1], %[v1]
      VPSRLW $8, %[v0], %[v0]
      VPOR %[v1], %[v0], %[v0]
      VEXTRACTI128 $1, %[v0], %x[v1]
      VPACKUSWB %x[v1], %x[v0], %x[v0]
      VMOVUPS %x[v0], (%[dst])
      )"
      :[v0]"=&x"(v0), [v1]"=&x"(v1), [v2]"=&x"(v2)
      :[dst]"r"(dst), [src]"r"(src), [a]"x"(a), [a_]"x"(a_), [A]"x"(A), [A_]"x"(A_), [d]"x"(d), [d_]"x"(d_)
    );
  }
  for (len &= 31; len >= 8; len -= 8, src += 8, dst += 4) {
    u8v16 v0, v1, v2;
    asm volatile (R"(
      VMOVQ (%[src]), %[v0]
      VPCMPGTB %x[a], %[v0], %[v1]
      VPSUBB %x[a_], %[v0], %[v2]
      VPBLENDVB %[v1], %[v2], %[v0], %[v0]
      VPCMPGTB %x[A], %[v0], %[v1]
      VPSUBB %x[A_], %[v0], %[v2]
      VPBLENDVB %[v1], %[v2], %[v0], %[v0]
      VPCMPGTB %x[d], %[v0], %[v1]
      VPSUBB %x[d_], %[v0], %[v2]
      VPBLENDVB %[v1], %[v2], %[v0], %[v0]
      VPSLLW $8, %[v0], %[v1]
      VPSRLW $4, %[v1], %[v1]
      VPSRLW $8, %[v0], %[v0]
      VPOR %[v1], %[v0], %[v0]
      VPACKUSWB %[v0], %[v0], %[v0]
      VMOVD %[v0], (%[dst])
      )"
      :[v0]"=&x"(v0), [v1]"=&x"(v1), [v2]"=&x"(v2)
      :[dst]"r"(dst), [src]"r"(src), [a]"x"(a), [a_]"x"(a_), [A]"x"(A), [A_]"x"(A_), [d]"x"(d), [d_]"x"(d_)
    );
  }
#elif __aarch64__
  auto a = A64_DUP(u8v16, 16B, w, 'a' - 0);
  auto a_ = A64_DUP(u8v16, 16B, w, 'a' - 10);
  auto A = A64_DUP(u8v16, 16B, w, 'A' - 0);
  auto A_ = A64_DUP(u8v16, 16B, w, 'A' - 10);
  auto d = A64_DUP(u8v16, 16B, w, '0' - 0);
  for (auto end = src + (len & ~15); src < end;) {
    u8v16 v0, v1, v2;
    asm volatile (R"(
      LD1 { %[v0].16B }, [%[src]], #16
      CMGE %[v1].16B, %[v0].16B, %[a].16B
      SUB %[v2].16B, %[v0].16B, %[a_].16B
      BSL %[v1].16B, %[v2].16B, %[v0].16B
      CMGE %[v2].16B, %[v1].16B, %[A].16B
      SUB %[v0].16B, %[v1].16B, %[A_].16B
      BSL %[v2].16B, %[v0].16B, %[v1].16B
      CMGE %[v0].16B, %[v2].16B, %[d].16B
      SUB %[v1].16B, %[v2].16B, %[d].16B
      BSL %[v0].16B, %[v1].16B, %[v2].16B
      SHL %[v1].8H, %[v0].8H, #12
      ORR %[v0].16B, %[v0].16B, %[v1].16B
      UQSHRN %[v0].8B, %[v0].8H, #8
      ST1 { %[v0].D }[0], [%[dst]], #8
      )"
      :[v0]"=&w"(v0), [v1]"=&w"(v1), [v2]"=&w"(v2), [dst]"+r"(dst), [src]"+r"(src)
      :[a]"w"(a), [a_]"w"(a_), [A]"w"(A), [A_]"w"(A_), [d]"w"(d)
      :"memory"
    );
  }
  for (len &= 15; len >= 4; len -= 4) {
    u8v16 v0, v1, v2;
    asm volatile (R"(
      LD1 { %[v0].S }[0], [%[src]], #4
      CMGE %[v1].16B, %[v0].16B, %[a].16B
      SUB %[v2].16B, %[v0].16B, %[a_].16B
      BSL %[v1].16B, %[v2].16B, %[v0].16B
      CMGE %[v2].16B, %[v1].16B, %[A].16B
      SUB %[v0].16B, %[v1].16B, %[A_].16B
      BSL %[v2].16B, %[v0].16B, %[v1].16B
      CMGE %[v0].16B, %[v2].16B, %[d].16B
      SUB %[v1].16B, %[v2].16B, %[d].16B
      BSL %[v0].16B, %[v1].16B, %[v2].16B
      SHL %[v1].8H, %[v0].8H, #12
      ORR %[v0].16B, %[v0].16B, %[v1].16B
      UQSHRN %[v0].8B, %[v0].8H, #8
      ST1 { %[v0].H }[0], [%[dst]], #2
      )"
      :[v0]"=&w"(v0), [v1]"=&w"(v1), [v2]"=&w"(v2), [dst]"+r"(dst), [src]"+r"(src)
      :[a]"w"(a), [a_]"w"(a_), [A]"w"(A), [A_]"w"(A_), [d]"w"(d)
      :"memory"
    );
  }
#else
  auto tr =
    "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
    "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
    "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
    "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\xFF\xFF\xFF\xFF\xFF\xFF"
    "\xFF\x0A\x0B\x0C\x0D\x0E\x0F\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
    "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
    "\xFF\x0A\x0B\x0C\x0D\x0E\x0F\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
    "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
    "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
    "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
    "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
    "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
    "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
    "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
    "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
    "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF";
  for (auto end = src + len; src < end; ++dst) {
    auto h4 = (u8)tr[*src++];
    auto l4 = (u8)tr[*src++];
    *dst = (u8)((h4 << 4) | l4);
  }
  len = 0;
#endif
  for (auto end = src + len; src < end; ++dst) {
    auto ch = *src++;
    auto h4 = ch >= 'a' ? ch - 'W' : (ch >= 'A' ? ch - '7' : ch - '0');
    auto cl = *src++;
    auto l4 = cl >= 'a' ? cl - 'W' : (cl >= 'A' ? cl - '7' : cl - '0');
    *dst = (u8)((h4 << 4) | l4);
  }
  return dst - (u8*)dst_buf;
}


CLOSE_JLIB_NS
