#include "b64.h"
#include "../arch/a64.h"

OPEN_JLIB_NS


static const c8 ENCODE_TBL[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";

static const u8 DECODE_TBL[] =
  "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
  "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
  "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x3E\xFF\xFF\xFF\x3F"
  "\x34\x35\x36\x37\x38\x39\x3A\x3B\x3C\x3D\xFF\xFF\xFF\x00\xFF\xFF"
  "\xFF\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E"
  "\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\xFF\xFF\xFF\xFF\xFF"
  "\xFF\x1A\x1B\x1C\x1D\x1E\x1F\x20\x21\x22\x23\x24\x25\x26\x27\x28"
  "\x29\x2A\x2B\x2C\x2D\x2E\x2F\x30\x31\x32\x33\xFF\xFF\xFF\xFF\xFF"
  "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
  "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
  "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
  "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
  "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
  "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
  "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
  "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF";



uwl B64::encode (void *dst, const void *src, uwl src_size) noexcept {
  auto dp = (s8*)dst;
  auto sp = (const u8*)src;
  auto i0 = u8v16{255,2,1,0,255,5,4,3,255,8,7,6,255,11,10,9};
  auto i1 = u8v16{3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
  u8v16 m0, m1, m2, m3, t0, t1, t2, t3;
  asm volatile (R"(
    LD1 { %%v20.16b, %%v21.16b, %%v22.16b, %%v23.16b }, [%[tb]]
    DUP %[m0].4s, %w[m_]
    USHR %[m1].4s, %[m0].4s, #6
    USHR %[m2].4s, %[m1].4s, #6
    USHR %[m3].4s, %[m2].4s, #6
  1:CMP %[ss], #24
    B.LT 2f
    LD1 { %%v30.16b }, [%[sp]]
    LD1 { %%v31.d }[0], [%[sp]], #16
    EXT %%v31.16b, %%v30.16b, %%v31.16b, #12
    TBL %%v30.16b, { %%v30.16b }, %[i0].16b
    TBL %%v31.16b, { %%v31.16b }, %[i1].16b
    AND %[t2].16b, %%v30.16b, %[m0].16b
    AND %[t3].16b, %%v31.16b, %[m0].16b
    USHR %[t0].4s, %[t2].4s, #2
    USHR %[t1].4s, %[t3].4s, #2
    AND %[t2].16b, %%v30.16b, %[m1].16b
    AND %[t3].16b, %%v31.16b, %[m1].16b
    USHR %[t2].4s, %[t2].4s, #4
    USHR %[t3].4s, %[t3].4s, #4
    ORR %[t0].16b, %[t0].16b, %[t2].16b
    ORR %[t1].16b, %[t1].16b, %[t3].16b
    AND %[t2].16b, %%v30.16b, %[m2].16b
    AND %[t3].16b, %%v31.16b, %[m2].16b
    USHR %[t2].4s, %[t2].4s, #6
    USHR %[t3].4s, %[t3].4s, #6
    ORR %[t0].16b, %[t0].16b, %[t2].16b
    ORR %[t1].16b, %[t1].16b, %[t3].16b
    AND %[t2].16b, %%v30.16b, %[m3].16b
    AND %[t3].16b, %%v31.16b, %[m3].16b
    USHR %[t2].4s, %[t2].4s, #8
    USHR %[t3].4s, %[t3].4s, #8
    ORR %[t0].16b, %[t0].16b, %[t2].16b
    ORR %[t1].16b, %[t1].16b, %[t3].16b
    TBL %%v30.16b, { %%v20.16b, %%v21.16b, %%v22.16b, %%v23.16b }, %[t0].16b
    TBL %%v31.16b, { %%v20.16b, %%v21.16b, %%v22.16b, %%v23.16b }, %[t1].16b
    ST1 { %%v30.16b, %%v13.16b }, [%[dp]]
    ADD %[ss], %[ss], #24
    ADD %[sp], %[sp], #24
    ADD %[dp], %[dp], #32
    B 1b
  2:)"
    :[dp]"+r"(dp), [sp]"+r"(sp), [ss]"+r"(src_size)
    ,[m0]"=&w"(m0), [m1]"=&w"(m1), [m2]"=&w"(m2), [m3]"=&w"(m3)
    ,[t0]"=&w"(t0), [t1]"=&w"(t1), [t2]"=&w"(t2), [t3]"=&w"(t3)
    :[tb]"r"(ENCODE_TBL), [i0]"w"(i0), [i1]"w"(i1), [m_]"r"(0xFC000000)
    :"v20", "v21", "v22", "v23", "v30", "v31"
  );
  u32 frag;
  auto p = (u8*)&frag;
  for (; src_size >= 3; src_size -= 3, sp += 3, dp += 4) {
    p[0] = sp[0];
    p[1] = sp[1];
    p[2] = sp[2];
    x86_bswap(frag);
    frag >>= 8;
    frag = x86_pdep<u32>(frag, 0x3F3F3F3F);
    x86_bswap(frag);
    dp[0] = tr[p[0]];
    dp[1] = tr[p[1]];
    dp[2] = tr[p[2]];
    dp[3] = tr[p[3]];
  }
  if (src_size > 0) {
    p[0] = sp[0];
    p[1] = src_size == 2 ? sp[1] : (u8)0;
    p[2] = 0;
    x86_bswap(frag);
    frag >>= 8;
    frag = x86_pdep<u32>(frag, 0x3F3F3F3F);
    x86_bswap(frag);
    dp[0] = tr[p[0]];
    dp[1] = tr[p[1]];
    dp[2] = src_size == 2 ? tr[p[2]] : '=';
    dp[3] = '=';
    dp += 4;
  }
  return dp - (s8*)dst;
}



swl B64::decode (void *dst, const void *src, uwl src_size) noexcept {
  if (src_size & 3)
    return -1;
  auto dp = (u8*)dst;
  auto sp = (s8*)src;
  auto tr = DECODE_TBL;
  auto swap1 = u8v32{3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12,3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
  auto mask1 = avx_vpbroadcastd<u8v32>(0x0000003F);
  auto mask2 = avx_vpbroadcastd<u8v32>(0x00000FC0);
  auto mask3 = avx_vpbroadcastd<u8v32>(0x0003F000);
  auto mask4 = avx_vpbroadcastd<u8v32>(0x00FC0000);
  auto swap2 = u8v32{255,255,255,255,2,1,0,6,5,4,10,9,8,14,13,12,2,1,0,6,5,4,10,9,8,14,13,12,255,255,255,255};
  auto lower = avx_vpbroadcastb<u8v32>(96);
  auto upper = avx_vpbroadcastb<u8v32>(64);
  auto digit = avx_vpbroadcastb<u8v32>(47);
  auto plus = avx_vpbroadcastb<u8v32>(43);
  auto match = s8v16{'0','9','A','Z','a','z','+','+','/','/',0,0,0,0,0,0};
  for (; src_size >= 36; src_size -= 32, sp += 32, dp += 24) {
    auto frag = avx_vlddqu<u8v32>(sp);
    auto l128 = avx_cast<u8v16>(frag);
    auto h128 = avx_vextracti128<1, u8v16>(frag);
    if (avx_vpcmpistrm<D_SB | A_RANGE>(match, l128) != 0xFFFF)
      return -1;
    if (avx_vpcmpistrm<D_SB | A_RANGE>(match, h128) != 0xFFFF)
      return -1;
    auto zero = avx_vpxor<u8v32>(frag, frag);
    auto mask = avx_vpcmpgtb<u8v32>(frag, lower);
    auto diff = avx_vpblendvb<u8v32>(frag, avx_vpbroadcastb<u8v32>(71), mask);
    auto temp = avx_vpsubb<u8v32>(frag, diff);
    frag = avx_vpandn<u8v32>(mask, frag);
    mask = avx_vpcmpgtb<u8v32>(frag, upper);
    diff = avx_vpblendvb<u8v32>(frag, avx_vpbroadcastb<u8v32>(65), mask);
    temp = avx_vpor<u8v32>(avx_vpsubb<u8v32>(frag, diff), temp);
    frag = avx_vpandn<u8v32>(mask, frag);
    mask = avx_vpcmpgtb<u8v32>(frag, digit);
    diff = avx_vpblendvb<u8v32>(zero, avx_vpbroadcastb<u8v32>(4), mask);
    frag = avx_vpaddb<u8v32>(frag, diff);
    mask = avx_vpcmpeqb<u8v32>(frag, digit);
    diff = avx_vpblendvb<u8v32>(zero, avx_vpbroadcastb<u8v32>(16), mask);
    frag = avx_vpaddb<u8v32>(frag, diff);
    mask = avx_vpcmpeqb<u8v32>(frag, plus);
    diff = avx_vpblendvb<u8v32>(zero, avx_vpbroadcastb<u8v32>(19), mask);
    frag = avx_vpaddb<u8v32>(frag, diff);
    frag = avx_vpor<u8v32>(frag, temp);
    frag = avx_vpshufb<u8v32>(frag, swap1);
    temp = avx_vpand<u8v32>(frag, mask1);
    temp = avx_vpor<u8v32>(temp, avx_vpand<u8v32>(avx_vpsrld<2, u8v32>(frag), mask2));
    temp = avx_vpor<u8v32>(temp, avx_vpand<u8v32>(avx_vpsrld<4, u8v32>(frag), mask3));
    temp = avx_vpor<u8v32>(temp, avx_vpand<u8v32>(avx_vpsrld<6, u8v32>(frag), mask4));
    frag = avx_vpshufb<u8v32>(temp, swap2);
    l128 = avx_cast<u8v16>(frag);
    h128 = avx_vextracti128<1, u8v16>(frag);
    auto res = avx_vpalignr<4, u8v16>(h128, l128);
    avx_vmovups(dp, res);
    res = avx_vpalignr<4, u8v16>(l128, h128);
    avx_vmovlps(dp + 16, res);
  }
  u32 frag;
  auto p = (u8*)&frag;
  for (; src_size >= 8; src_size -= 4, sp += 4) {
    p[0] = tr[sp[0]];
    p[1] = tr[sp[1]];
    p[2] = tr[sp[2]];
    p[3] = tr[sp[3]];
    if (p[0] == 0xFF || p[1] == 0xFF || p[2] == 0xFF || p[3] == 0xFF)
      return -1;
    x86_bswap(frag);
    frag = x86_pext<u32>(frag, 0x3F3F3F3FU);
    x86_bswap(frag);
    frag >>= 8;
    *dp++ = p[0];
    *dp++ = p[1];
    *dp++ = p[2];
  }
  if (src_size > 0) {
    p[0] = tr[sp[0]];
    p[1] = tr[sp[1]];
    p[2] = tr[sp[2]];
    p[3] = tr[sp[3]];
    if (p[0] == 0xFF || p[1] == 0xFF || p[2] == 0xFF || p[3] == 0xFF)
      return -1;
    x86_bswap(frag);
    frag = x86_pext<u32>(frag, 0x3F3F3F3FU);
    x86_bswap(frag);
    frag >>= 8;
    *dp++ = p[0];
    if (sp[2] != 0x3D)
      *dp++ = p[1];
    if (sp[3] != 0x3D)
      *dp++ = p[2];
  }
  return (s64)(dp - (u8*)dst);
}



uwl B64::decode_unsafe (void *dst, const void *src, uwl src_size) noexcept {
  auto dp = (u8*)dst;
  auto sp = (s8*)src;
  auto tr = DECODE_TBL;
  auto swap1 = u8v32{3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12,3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
  auto mask1 = avx_vpbroadcastd<u8v32>(0x0000003F);
  auto mask2 = avx_vpbroadcastd<u8v32>(0x00000FC0);
  auto mask3 = avx_vpbroadcastd<u8v32>(0x0003F000);
  auto mask4 = avx_vpbroadcastd<u8v32>(0x00FC0000);
  auto swap2 = u8v32{255,255,255,255,2,1,0,6,5,4,10,9,8,14,13,12,2,1,0,6,5,4,10,9,8,14,13,12,255,255,255,255};
  auto lower = avx_vpbroadcastb<u8v32>(96);
  auto upper = avx_vpbroadcastb<u8v32>(64);
  auto digit = avx_vpbroadcastb<u8v32>(47);
  auto plus = avx_vpbroadcastb<u8v32>(43);
  for (; src_size >= 36; src_size -= 32, sp += 32, dp += 24) {
    auto zero = avx_vpxor<u8v32>(swap1, swap1);
    auto frag = avx_vlddqu<u8v32>(sp);
    auto mask = avx_vpcmpgtb<u8v32>(frag, lower);
    auto diff = avx_vpblendvb<u8v32>(frag, avx_vpbroadcastb<u8v32>(71), mask);
    auto temp = avx_vpsubb<u8v32>(frag, diff);
    frag = avx_vpandn<u8v32>(mask, frag);
    mask = avx_vpcmpgtb<u8v32>(frag, upper);
    diff = avx_vpblendvb<u8v32>(frag, avx_vpbroadcastb<u8v32>(65), mask);
    temp = avx_vpor<u8v32>(avx_vpsubb<u8v32>(frag, diff), temp);
    frag = avx_vpandn<u8v32>(mask, frag);
    mask = avx_vpcmpgtb<u8v32>(frag, digit);
    diff = avx_vpblendvb<u8v32>(zero, avx_vpbroadcastb<u8v32>(4), mask);
    frag = avx_vpaddb<u8v32>(frag, diff);
    mask = avx_vpcmpeqb<u8v32>(frag, digit);
    diff = avx_vpblendvb<u8v32>(zero, avx_vpbroadcastb<u8v32>(16), mask);
    frag = avx_vpaddb<u8v32>(frag, diff);
    mask = avx_vpcmpeqb<u8v32>(frag, plus);
    diff = avx_vpblendvb<u8v32>(zero, avx_vpbroadcastb<u8v32>(19), mask);
    frag = avx_vpaddb<u8v32>(frag, diff);
    frag = avx_vpor<u8v32>(frag, temp);
    frag = avx_vpshufb<u8v32>(frag, swap1);
    temp = avx_vpand<u8v32>(frag, mask1);
    temp = avx_vpor<u8v32>(temp, avx_vpand<u8v32>(avx_vpsrld<2, u8v32>(frag), mask2));
    temp = avx_vpor<u8v32>(temp, avx_vpand<u8v32>(avx_vpsrld<4, u8v32>(frag), mask3));
    temp = avx_vpor<u8v32>(temp, avx_vpand<u8v32>(avx_vpsrld<6, u8v32>(frag), mask4));
    frag = avx_vpshufb<u8v32>(temp, swap2);
    auto l128 = avx_cast<u8v16>(frag);
    auto h128 = avx_vextracti128<1, u8v16>(frag);
    auto res = avx_vpalignr<4, u8v16>(h128, l128);
    avx_vmovups(dp, res);
    res = avx_vpalignr<4, u8v16>(l128, h128);
    avx_vmovlps(dp + 16, res);
  }
  u32 frag;
  auto p = (u8*)&frag;
  for (; src_size >= 8; src_size -= 4, sp += 4) {
    p[0] = tr[sp[0]];
    p[1] = tr[sp[1]];
    p[2] = tr[sp[2]];
    p[3] = tr[sp[3]];
    x86_bswap(frag);
    frag = x86_pext<u32>(frag, 0x3F3F3F3FU);
    x86_bswap(frag);
    frag >>= 8;
    *dp++ = p[0];
    *dp++ = p[1];
    *dp++ = p[2];
  }
  if (src_size > 0) {
    p[0] = tr[sp[0]];
    p[1] = tr[sp[1]];
    p[2] = tr[sp[2]];
    p[3] = tr[sp[3]];
    x86_bswap(frag);
    frag = x86_pext<u32>(frag, 0x3F3F3F3FU);
    x86_bswap(frag);
    frag >>= 8;
    *dp++ = p[0];
    if (sp[2] != 0x3D)
      *dp++ = p[1];
    if (sp[3] != 0x3D)
      *dp++ = p[2];
  }
  return dp - (u8*)dst;
}


CLOSE_JLIB_NS