#include "b64.h"
#include "../arch/asm.h"

OPEN_JLIB_NS


uwl B64::encode (void* dst_buf, const void* src_buf, uwl len, bool padding) noexcept {
  auto src = (const u8*)src_buf;
  auto dst = (c8*)dst_buf;
  auto tail = len % 3;
  len -= tail;
#if __AVX2__
  auto A = AVX2_VPBROADCAST(B, u8v32, 26);
  auto A_ = AVX2_VPBROADCAST(B, u8v32, 65);
  auto a = AVX2_VPBROADCAST(B, u8v32, 52);
  auto a_ = AVX2_VPBROADCAST(B, u8v32, 71);
  auto d_ = AVX2_VPBROADCAST(B, u8v32, 4);
  auto p = AVX2_VPBROADCAST(B, u8v32, 62);
  auto p_ = AVX2_VPBROADCAST(B, u8v32, 19);
  auto b = AVX2_VPBROADCAST(B, u8v32, 63);
  auto b_ = AVX2_VPBROADCAST(B, u8v32, 16);
  auto swap = AVX2_VBROADCAST(I128, u8v32, 0, "\2\1\1\0\5\4\4\3\10\7\7\6\13\12\12\11");
  auto msk0 = AVX2_VPBROADCAST(D, u8v32, 0x003F003F);
  auto msk1 = AVX2_VPS(LLD, u8v32, t, 8, msk0);
  for (auto end = src + (len - len % 24); src < end; src += 24, dst += 32) {
    u8v32 v0, v1, v2, v3;
    asm volatile (R"(
      VLDDQU (%[src]), %x[v0]
      VMOVQ 16(%[src]), %x[v1]
      VPALIGNR $12, %x[v0], %x[v1], %x[v1]
      VINSERTI128 $1, %x[v1], %[v0], %[v0]
      VPSHUFB %[swap], %[v0], %[v0]
      VPSRLD $26, %[v0], %[v1]
      VPSLLD $10, %[v0], %[v2]
      VPOR %[v2], %[v1], %[v1]
      VPAND %[msk0], %[v1], %[v1]
      VPSLLD $24, %[v0], %[v2]
      VPSRLD $12, %[v0], %[v3]
      VPOR %[v3], %[v2], %[v2]
      VPAND %[msk1], %[v2], %[v2]
      VPOR %[v2], %[v1], %[v0]
      VPCMPGTB %[v0], %[A], %[v1]
      VPADDB %[A_], %[v0], %[v2]
      VPBLENDVB %[v1], %[v2], %[v0], %[v0]
      VPCMPGTB %[v0], %[a], %[v1]
      VPADDB %[a_], %[v0], %[v2]
      VPBLENDVB %[v1], %[v2], %[v0], %[v0]
      VPCMPGTB %[v0], %[p], %[v1]
      VPSUBB %[d_], %[v0], %[v2]
      VPBLENDVB %[v1], %[v2], %[v0], %[v0]
      VPCMPEQB %[v0], %[p], %[v1]
      VPSUBB %[p_], %[v0], %[v2]
      VPBLENDVB %[v1], %[v2], %[v0], %[v0]
      VPCMPEQB %[v0], %[b], %[v1]
      VPSUBB %[b_], %[v0], %[v2]
      VPBLENDVB %[v1], %[v2], %[v0], %[v0]
      VMOVUPS %[v0], (%[dst])
      )"
      :[v0]"=&x"(v0), [v1]"=&x"(v1), [v2]"=&x"(v2), [v3]"=&x"(v3)
      :[dst]"r"(dst), [src]"r"(src), [A]"x"(A), [A_]"x"(A_), [a]"x"(a), [a_]"x"(a_), [d_]"x"(d_), [p]"x"(p), [p_]"x"(p_), [b]"x"(b), [b_]"x"(b_), [swap]"x"(swap), [msk0]"x"(msk0), [msk1]"x"(msk1)
      :"memory"
    );
  }
  for (len %= 24; len >= 3; len -= 3, src += 3, dst += 4) {
    u8v16 v0, v1, v2;
    u32 r0;
    asm volatile (R"(
      MOVBEW (%[src]), %w[r0]
      SHLL $8, %[r0]
      MOVB 2(%[src]), %b[r0]
      PDEPL %[msk], %[r0], %[r0]
      BSWAP %[r0]
      VMOVD %[r0], %[v0]
      VPCMPGTB %[v0], %x[A], %[v1]
      VPADDB %x[A_], %[v0], %[v2]
      VPBLENDVB %[v1], %[v2], %[v0], %[v0]
      VPCMPGTB %[v0], %x[a], %[v1]
      VPADDB %x[a_], %[v0], %[v2]
      VPBLENDVB %[v1], %[v2], %[v0], %[v0]
      VPCMPGTB %[v0], %x[p], %[v1]
      VPSUBB %x[d_], %[v0], %[v2]
      VPBLENDVB %[v1], %[v2], %[v0], %[v0]
      VPCMPEQB %[v0], %x[p], %[v1]
      VPSUBB %x[p_], %[v0], %[v2]
      VPBLENDVB %[v1], %[v2], %[v0], %[v0]
      VPCMPEQB %[v0], %x[b], %[v1]
      VPSUBB %x[b_], %[v0], %[v2]
      VPBLENDVB %[v1], %[v2], %[v0], %[v0]
      VMOVD %[v0], (%[dst])
      )"
      :[r0]"=&r"(r0), [v0]"=&x"(v0), [v1]"=&x"(v1), [v2]"=&x"(v2)
      :[dst]"r"(dst), [src]"r"(src), [msk]"r"(0x3F3F3F3F), [A]"x"(A), [A_]"x"(A_), [a]"x"(a), [a_]"x"(a_), [d_]"x"(d_), [p]"x"(p), [p_]"x"(p_), [b]"x"(b), [b_]"x"(b_)
      :"memory"
    );
  }
  if (tail == 2) {
    auto i0 = src[0] >> 2;
    auto i1 = ((src[0] << 4) & 0x30) | (src[1] >> 4);
    auto i2 = ((src[1] << 2) & 0x3C);
    *dst++ = (c8)(i0 < 26 ? i0 + 65 : (i0 < 52 ? i0 + 71 : (i0 < 62 ? i0 - 4 : (i0 == 62 ? '+' : '/'))));
    *dst++ = (c8)(i1 < 26 ? i1 + 65 : (i1 < 52 ? i1 + 71 : (i1 < 62 ? i1 - 4 : (i1 == 62 ? '+' : '/'))));
    *dst++ = (c8)(i2 < 26 ? i2 + 65 : (i2 < 52 ? i2 + 71 : (i2 < 62 ? i2 - 4 : (i2 == 62 ? '+' : '/'))));
    if (padding)
      *dst++ = '=';
  }
  else if (tail == 1) {
    auto i0 = src[0] >> 2;
    auto i1 = (src[0] << 4) & 0x30;
    *dst++ = (c8)(i0 < 26 ? i0 + 65 : (i0 < 52 ? i0 + 71 : (i0 < 62 ? i0 - 4 : (i0 == 62 ? '+' : '/'))));
    *dst++ = (c8)(i1 < 26 ? i1 + 65 : (i1 < 52 ? i1 + 71 : (i1 < 62 ? i1 - 4 : (i1 == 62 ? '+' : '/'))));
    if (padding) {
      *dst++ = '=';
      *dst++ = '=';
    }
  }
#else
  auto tr = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
#if __aarch64__
  auto swap = A64_LD1(u8v16, 0, "\2\1\1\0\5\4\4\3\10\7\7\6\13\12\12\11");
  auto msk0 = A64_DUP(u8v16, 4S, w, 0x003F003F);
  auto msk1 = A64_VVI(SHL, u8v16, 4S, msk0, 8);
  register u8v16 tbl0 asm("v20");
  register u8v16 tbl1 asm("v21");
  register u8v16 tbl2 asm("v22");
  register u8v16 tbl3 asm("v23");
  asm volatile ("LD1 { %[v0].16B, %[v1].16B, %[v2].16B, %[v3].16B }, [%[src]]": [v0]"=&w"(tbl0), [v1]"=&w"(tbl1), [v2]"=&w"(tbl2), [v3]"=&w"(tbl3): [src]"r"(tr));
  for (auto end = src + (len - len % 12); src < end;) {
    u8v16 v0, v1, v2, v3;
    asm volatile (R"(
      LD1 { %[v0].D }[0], [%[src]], #8
      LD1 { %[v0].S }[2], [%[src]], #4
      TBL %[v0].16B, { %[v0].16B }, %[swap].16B
      USHR %[v1].4S, %[v0].4S, #26
      SHL %[v2].4S, %[v0].4S, #10
      ORR %[v1].16B, %[v1].16B, %[v2].16B
      AND %[v1].16B, %[v1].16B, %[msk0].16B
      SHL %[v2].4S, %[v0].4S, #24
      USHR %[v3].4S, %[v0].4S, #12
      ORR %[v2].16B, %[v2].16B, %[v3].16B
      AND %[v2].16B, %[v2].16B, %[msk1].16B
      ORR %[v0].16B, %[v1].16B, %[v2].16B
      TBL %[v0].16B, { %[tbl0].16B, %[tbl1].16B, %[tbl2].16B, %[tbl3].16B }, %[v0].16B
      ST1 { %[v0].16B }, [%[dst]], #16
      )"
      :[v0]"=&w"(v0), [v1]"=&w"(v1), [v2]"=&w"(v2), [v3]"=&w"(v3), [dst]"+r"(dst), [src]"+r"(src)
      :[tbl0]"w"(tbl0), [tbl1]"w"(tbl1), [tbl2]"w"(tbl2), [tbl3]"w"(tbl3), [swap]"w"(swap), [msk0]"w"(msk0), [msk1]"w"(msk1)
      :"memory"
    );
  }
  for (len %= 12; len >= 3; len -= 3) {
    u8v16 v0, v1, v2, v3;
    u32 r0;
    asm volatile (R"(
      LD1 { %[v0].H }[0], [%[src]], #2
      LD1 { %[v0].B }[2], [%[src]], #1
      TBL %[v0].8B, { %[v0].16B }, %[swap].8B
      USHR %[v1].2S, %[v0].2S, #26
      SHL %[v2].2S, %[v0].2S, #10
      ORR %[v1].8B, %[v1].8B, %[v2].8B
      AND %[v1].8B, %[v1].8B, %[msk0].8B
      SHL %[v2].2S, %[v0].2S, #24
      USHR %[v3].2S, %[v0].2S, #12
      ORR %[v2].8B, %[v2].8B, %[v3].8B
      AND %[v2].8B, %[v2].8B, %[msk1].8B
      ORR %[v0].8B, %[v1].8B, %[v2].8B
      TBL %[v0].8B, { %[tbl0].16B, %[tbl1].16B, %[tbl2].16B, %[tbl3].16B }, %[v0].8B
      ST1 { %[v0].S }[0], [%[dst]], #4
      )"
    :[v0]"=&w"(v0), [v1]"=&w"(v1), [v2]"=&w"(v2), [v3]"=&w"(v3), [dst]"+r"(dst), [src]"+r"(src)
    :[tbl0]"w"(tbl0), [tbl1]"w"(tbl1), [tbl2]"w"(tbl2), [tbl3]"w"(tbl3), [swap]"w"(swap), [msk0]"w"(msk0), [msk1]"w"(msk1)
    :"memory"
    );
  }
#else
  for (auto end = src + len; src < end; src += 3, dst += 4) {
    auto i0 = src[0] >> 2;
    auto i1 = ((src[0] << 4) & 0x30) | (src[1] >> 4);
    auto i2 = ((src[1] << 2) & 0x3C) | (src[2] >> 6);
    auto i3 = src[2] & 0x3F;
    dst[0] = tr[i0];
    dst[1] = tr[i1];
    dst[2] = tr[i2];
    dst[3] = tr[i3];
  }
#endif
  if (tail == 2) {
    auto i0 = src[0] >> 2;
    auto i1 = ((src[0] << 4) & 0x30) | (src[1] >> 4);
    auto i2 = ((src[1] << 2) & 0x3C);
    *dst++ = tr[i0];
    *dst++ = tr[i1];
    *dst++ = tr[i2];
    if (padding)
      *dst++ = '=';
  }
  else if (tail == 1) {
    auto i0 = src[0] >> 2;
    auto i1 = (src[0] << 4) & 0x30;
    *dst++ = tr[i0];
    *dst++ = tr[i1];
    if (padding) {
      *dst++ = '=';
      *dst++ = '=';
    }
  }
#endif
  return dst - (c8*)dst_buf;
}


uwl B64::decode (void* dst_buf, const void* src_buf, uwl len) noexcept {
  if (!len)
    return 0;
  auto src = (const c8*)src_buf;
  auto dst = (u8*)dst_buf;
  auto tail = len & 3;
  if (tail) {
    if (tail == 1)
      return 0;
    len -= tail;
  }
  else if (src[len - 1] == '=') {
    tail = src[len - 2] == '=' ? 2 : 3;
    len -= 4;
  }
#if __AVX2__
  auto p = AVX2_VPBROADCAST(B, u8v32, '+' + 0);
  auto p_ = AVX2_VPBROADCAST(B, u8v32, 19);
  auto b = AVX2_VPBROADCAST(B, u8v32, '/' + 0);
  auto b_ = AVX2_VPBROADCAST(B, u8v32, 16);
  auto d = AVX2_VPBROADCAST(B, u8v32, '9' + 1);
  auto d_ = AVX2_VPBROADCAST(B, u8v32, 4);
  auto a = AVX2_VPBROADCAST(B, u8v32, 'a' - 1);
  auto a_ = AVX2_VPBROADCAST(B, u8v32, 71);
  auto A = AVX2_VPBROADCAST(B, u8v32, 'A' - 1);
  auto A_ = AVX2_VPBROADCAST(B, u8v32, 65);
  auto swap = AVX2_VBROADCAST(I128, u8v32, 0, "\3\2\1\7\6\5\13\12\11\17\16\15\0\0\0\0");
  for (auto end = src + (len & ~31); src < end; src += 32, dst += 24) {
    u8v32 v0, v1, v2;
    asm volatile (R"(
      VLDDQU (%[src]), %[v0]
      VPCMPEQB %[p], %[v0], %[v1]
      VPADDB %[p_], %[v0], %[v2]
      VPBLENDVB %[v1], %[v2], %[v0], %[v0]
      VPCMPEQB %[b], %[v0], %[v1]
      VPADDB %[b_], %[v0], %[v2]
      VPBLENDVB %[v1], %[v2], %[v0], %[v0]
      VPCMPGTB %[v0], %[d], %[v1]
      VPADDB %[d_], %[v0], %[v2]
      VPBLENDVB %[v1], %[v2], %[v0], %[v0]
      VPCMPGTB %[a], %[v0], %[v1]
      VPSUBB %[a_], %[v0], %[v2]
      VPBLENDVB %[v1], %[v2], %[v0], %[v0]
      VPCMPGTB %[A], %[v0], %[v1]
      VPSUBB %[A_], %[v0], %[v2]
      VPBLENDVB %[v1], %[v2], %[v0], %[v0]
      VPSLLW $10, %[v0], %[v1]
      VPSRLW $8, %[v0], %[v2]
      VPSLLW $4, %[v2], %[v2]
      VPOR %[v2], %[v1], %[v0]
      VPSLLD $16, %[v0], %[v1]
      VPSRLD $20, %[v0], %[v2]
      VPSLLD $8, %[v2], %[v2]
      VPOR %[v2], %[v1], %[v0]
      VPSHUFB %[swap], %[v0], %[v0]
      VEXTRACTI128 $1, %[v0], %x[v1]
      VINSERTPS $0x30, %x[v1], %x[v0], %x[v0]
      VMOVUPS %x[v0], (%[dst])
      VPERMILPS $0x99, %x[v1], %x[v1]
      VMOVQ %x[v1], 16(%[dst])
      )"
      :[v0]"=&x"(v0), [v1]"=&x"(v1), [v2]"=&x"(v2)
      :[dst]"r"(dst), [src]"r"(src), [p]"x"(p), [p_]"x"(p_), [b]"x"(b), [b_]"x"(b_), [d]"x"(d), [d_]"x"(d_), [a]"x"(a), [a_]"x"(a_), [A]"x"(A), [A_]"x"(A_), [swap]"x"(swap)
      :"memory"
    );
  }
  for (len &= 31; len; len -= 4, src += 4, dst += 3) {
    u8v16 v0, v1, v2;
    u32 r0;
    asm volatile (R"(
      VMOVD (%[src]), %[v0]
      VPCMPEQB %x[p], %[v0], %[v1]
      VPADDB %x[p_], %[v0], %[v2]
      VPBLENDVB %[v1], %[v2], %[v0], %[v0]
      VPCMPEQB %x[b], %[v0], %[v1]
      VPADDB %x[b_], %[v0], %[v2]
      VPBLENDVB %[v1], %[v2], %[v0], %[v0]
      VPCMPGTB %[v0], %x[d], %[v1]
      VPADDB %x[d_], %[v0], %[v2]
      VPBLENDVB %[v1], %[v2], %[v0], %[v0]
      VPCMPGTB %x[a], %[v0], %[v1]
      VPSUBB %x[a_], %[v0], %[v2]
      VPBLENDVB %[v1], %[v2], %[v0], %[v0]
      VPCMPGTB %x[A], %[v0], %[v1]
      VPSUBB %x[A_], %[v0], %[v2]
      VPBLENDVB %[v1], %[v2], %[v0], %[v0]
      VMOVD %[v0], %[r0]
      BSWAP %[r0]
      PEXTL %[msk], %[r0], %[r0]
      MOVBEW %w[r0], 1(%[dst])
      SHRL $16, %[r0]
      MOVB %b[r0], (%[dst])
      )"
      :[v0]"=&x"(v0), [v1]"=&x"(v1), [v2]"=&x"(v2), [r0]"=&r"(r0)
      :[dst]"r"(dst), [src]"r"(src), [msk]"r"(0x3F3F3F3F), [p]"x"(p), [p_]"x"(p_), [b]"x"(b), [b_]"x"(b_), [d]"x"(d), [d_]"x"(d_), [a]"x"(a), [a_]"x"(a_), [A]"x"(A), [A_]"x"(A_)
      :"memory"
    );
  }
#elif __aarch64__
  auto p = A64_DUP(u8v16, 16B, w, '+' + 0);
  auto p_ = A64_DUP(u8v16, 16B, w, 19);
  auto b = A64_DUP(u8v16, 16B, w, '/' + 0);
  auto b_ = A64_DUP(u8v16, 16B, w, 16);
  auto d = A64_DUP(u8v16, 16B, w, '9' + 1);
  auto d_ = A64_DUP(u8v16, 16B, w, 4);
  auto a = A64_DUP(u8v16, 16B, w, 'a' - 1);
  auto a_ = A64_DUP(u8v16, 16B, w, 71);
  auto A = A64_DUP(u8v16, 16B, w, 'A' - 1);
  auto A_ = A64_DUP(u8v16, 16B, w, 65);
  auto swap = A64_LD1(u8v16, 0, "\3\2\1\7\6\5\13\12\11\17\16\15\0\0\0\0");
  for (auto end = src + (len & ~15); src < end;) {
    u8v16 v0, v1, v2;
    asm volatile (R"(
      LD1 { %[v0].16B }, [%[src]], #16
      CMEQ %[v1].16B, %[v0].16B, %[p].16B
      ADD %[v2].16B, %[v0].16B, %[p_].16B
      BSL %[v1].16B, %[v2].16B, %[v0].16B
      CMEQ %[v2].16B, %[v1].16B, %[b].16B
      ADD %[v0].16B, %[v1].16B, %[b_].16B
      BSL %[v2].16B, %[v0].16B, %[v1].16B
      CMGT %[v0].16B, %[d].16B, %[v2].16B
      ADD %[v1].16B, %[v2].16B, %[d_].16B
      BSL %[v0].16B, %[v1].16B, %[v2].16B
      CMGT %[v1].16B, %[v0].16B, %[a].16B
      SUB %[v2].16B, %[v0].16B, %[a_].16B
      BSL %[v1].16B, %[v2].16B, %[v0].16B
      CMGT %[v2].16B, %[v1].16B, %[A].16B
      SUB %[v0].16B, %[v1].16B, %[A_].16B
      BSL %[v2].16B, %[v0].16B, %[v1].16B
      SHL %[v0].8H, %[v2].8H, #10
      USHR %[v1].8H, %[v2].8H, #8
      SHL %[v1].8H, %[v1].8H, #4
      ORR %[v0].16B, %[v0].16B, %[v1].16B
      SHL %[v1].4S, %[v0].4S, #16
      USHR %[v2].4S, %[v0].4S, #20
      SHL %[v2].4S, %[v2].4S, #8
      ORR %[v0].16B, %[v1].16B, %[v2].16B
      TBL %[v0].16B, { %[v0].16B }, %[swap].16B
      ST1 { %[v0].D }[0], [%[dst]], #8
      ST1 { %[v0].S }[2], [%[dst]], #4
      )"
      :[v0]"=&w"(v0), [v1]"=&w"(v1), [v2]"=&w"(v2), [dst]"+r"(dst), [src]"+r"(src)
      :[p]"w"(p), [p_]"w"(p_), [b]"w"(b), [b_]"w"(b_), [d]"w"(d), [d_]"w"(d_), [a]"w"(a), [a_]"w"(a_), [A]"w"(A), [A_]"w"(A_), [swap]"w"(swap)
      :"memory"
    );
  }
  for (len &= 15; len; len -= 4) {
    u8v16 v0, v1, v2;
    asm volatile (R"(
      LD1 { %[v0].S }[0], [%[src]], #4
      CMEQ %[v1].8B, %[v0].8B, %[p].8B
      ADD %[v2].8B, %[v0].8B, %[p_].8B
      BSL %[v1].8B, %[v2].8B, %[v0].8B
      CMEQ %[v2].8B, %[v1].8B, %[b].8B
      ADD %[v0].8B, %[v1].8B, %[b_].8B
      BSL %[v2].8B, %[v0].8B, %[v1].8B
      CMGT %[v0].8B, %[d].8B, %[v2].8B
      ADD %[v1].8B, %[v2].8B, %[d_].8B
      BSL %[v0].8B, %[v1].8B, %[v2].8B
      CMGT %[v1].8B, %[v0].8B, %[a].8B
      SUB %[v2].8B, %[v0].8B, %[a_].8B
      BSL %[v1].8B, %[v2].8B, %[v0].8B
      CMGT %[v2].8B, %[v1].8B, %[A].8B
      SUB %[v0].8B, %[v1].8B, %[A_].8B
      BSL %[v2].8B, %[v0].8B, %[v1].8B
      SHL %[v0].4H, %[v2].4H, #10
      USHR %[v1].4H, %[v2].4H, #8
      SHL %[v1].4H, %[v1].4H, #4
      ORR %[v0].8B, %[v0].8B, %[v1].8B
      SHL %[v1].2S, %[v0].2S, #16
      USHR %[v2].2S, %[v0].2S, #20
      SHL %[v2].2S, %[v2].2S, #8
      ORR %[v0].8B, %[v1].8B, %[v2].8B
      TBL %[v0].8B, { %[v0].16B }, %[swap].8B
      ST1 { %[v0].H }[0], [%[dst]], #2
      ST1 { %[v0].B }[2], [%[dst]], #1
      )"
      :[v0]"=&w"(v0), [v1]"=&w"(v1), [v2]"=&w"(v2), [dst]"+r"(dst), [src]"+r"(src)
      :[p]"w"(p), [p_]"w"(p_), [b]"w"(b), [b_]"w"(b_), [d]"w"(d), [d_]"w"(d_), [a]"w"(a), [a_]"w"(a_), [A]"w"(A), [A_]"w"(A_), [swap]"w"(swap)
      :"memory"
    );
  }
#else
  auto tr =
    "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
    "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
    "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x3E\xFF\xFF\xFF\x3F"
    "\x34\x35\x36\x37\x38\x39\x3A\x3B\x3C\x3D\xFF\xFF\xFF\x00\xFF\xFF"
    "\xFF\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E"
    "\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\xFF\xFF\xFF\xFF\xFF"
    "\xFF\x1A\x1B\x1C\x1D\x1E\x1F\x20\x21\x22\x23\x24\x25\x26\x27\x28"
    "\x29\x2A\x2B\x2C\x2D\x2E\x2F\x30\x31\x32\x33\xFF\xFF\xFF\xFF\xFF"
    "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
    "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
    "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
    "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
    "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
    "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
    "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
    "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF";
  for (auto end = src + len; src < end; src += 4, dst += 3) {
    auto s0 = tr[src[0]];
    auto s1 = tr[src[1]];
    auto s2 = tr[src[2]];
    auto s3 = tr[src[3]];
    dst[0] = (u8)((s0 << 2) | ((s1 >> 4) & 0x03));
    dst[1] = (u8)((s1 << 4) | ((s2 >> 2) & 0x0F));
    dst[2] = (u8)((s2 << 6) | s3);
  }
#endif
  if (tail) {
    auto s0 = src[0];
    s0 += s0 >= 'a' ? -71 : (s0 >= 'A' ? -65 : (s0 >= 48 ? 4 : (s0 == '+' ? 19 : 16)));
    auto s1 = src[1];
    s1 += s1 >= 'a' ? -71 : (s1 >= 'A' ? -65 : (s1 >= 48 ? 4 : (s1 == '+' ? 19 : 16)));
    *dst++ = (u8)((s0 << 2) | ((s1 >> 4) & (c8)0x03));
    if (tail == 3) {
      auto s2 = src[2];
      s2 += s2 >= 'a' ? -71 : (s2 >= 'A' ? -65 : (s2 >= 48 ? 4 : (s2 == '+' ? 19 : 16)));
      *dst++ = (u8)((s1 << 4) | ((s2 >> 2) & 0x0F));
    }
  }
  return dst - (u8*)dst_buf;
}


CLOSE_JLIB_NS
