#pragma once

#include "vec4.h"
#include "box3.h"
#include "../arch/avx.h"

OPEN_JLIB_NS


struct Mat4 {

  f32v4 c0, c1, c2, c3;

  inline Mat4 () noexcept = default;
  inline Mat4 (f32 s00, f32 s10, f32 s20, f32 s30, f32 s01, f32 s11, f32 s21, f32 s31, f32 s02, f32 s12, f32 s22, f32 s32, f32 s03, f32 s13, f32 s23, f32 s33) noexcept
  : c0(f32v4{s00, s01, s02, s03})
  , c1(f32v4{s10, s11, s12, s13})
  , c2(f32v4{s20, s21, s22, s23})
  , c3(f32v4{s30, s31, s32, s33}) {
  }
  inline Mat4 (f32v4 c0, f32v4 c1, f32v4 c2, f32v4 c3) noexcept : c0(c0), c1(c1), c2(c2), c3(c3) {
  }
  inline Mat4 (Vec4 c0, Vec4 c1, Vec4 c2, Vec4 c3) noexcept : c0(c0.p), c1(c1.p), c2(c2.p), c3(c3.p) {
  }
  inline Mat4 (const f32* m) noexcept
  : c0(avx_vlddqu<f32v4>(m))
  , c1(avx_vlddqu<f32v4>(m+4))
  , c2(avx_vlddqu<f32v4>(m+8))
  , c3(avx_vlddqu<f32v4>(m+12)) {
  }


  template<uwl Index>
  inline f32 c () const noexcept;


  inline Vec4 operator* (Vec4 v) const noexcept {
    auto d = c0 * v[0];
    d += c1 * v[1];
    d += c2 * v[2];
    d += c3 * v[3];
    return d;
  }
  inline Mat4 operator* (const Mat4& m) const noexcept {
    auto d0 = c0 * m.c0[0];
    d0 += c1 * m.c0[1];
    d0 += c2 * m.c0[2];
    d0 += c3 * m.c0[3];
    auto d1 = c0 * m.c1[0];
    d1 += c1 * m.c1[1];
    d1 += c2 * m.c1[2];
    d1 += c3 * m.c1[3];
    auto d2 = c0 * m.c2[0];
    d2 += c1 * m.c2[1];
    d2 += c2 * m.c2[2];
    d2 += c3 * m.c2[3];
    auto d3 = c0 * m.c3[0];
    d3 += c1 * m.c3[1];
    d3 += c2 * m.c3[2];
    d3 += c3 * m.c3[3];
    return {d0, d1, d2, d3};
  }


  inline f32 operator* () const noexcept {
    f32v4 a0, ab, t0, t1, t2;
    t0 = avx_vshufps<0x40, f32v4>(c0, c0);
    t2 = avx_vshufps<0xF9, f32v4>(c1, c1);
    t0 = t0 * t2;
    t1 = avx_vshufps<0x40, f32v4>(c1, c1);
    t2 = avx_vshufps<0xF9, f32v4>(c0, c0);
    t1 = t1 * t2;
    a0 = t0 - t1;
    t0 = avx_vshufps<0x16, f32v4>(c2, c2);
    t2 = avx_vshufps<0xAF, f32v4>(c3, c3);
    t0 = t0 * t2;
    t1 = avx_vshufps<0x16, f32v4>(c3, c3);
    t2 = avx_vshufps<0xAF, f32v4>(c2, c2);
    t1 = t1 * t2;
    t0 = t0 - t1;
    a0 = a0 * t0;
    t0 = avx_vshufps<0x09, f32v4>(c0, c2);
    t2 = avx_vshufps<0x7E, f32v4>(c1, c3);
    t0 = t0 * t2;
    t1 = avx_vshufps<0x09, f32v4>(c1, c3);
    t2 = avx_vshufps<0x7E, f32v4>(c0, c2);
    t1 = t1 * t2;
    ab = t0 - t1;
    t0 = avx_vinsertps<0x6A, f32v4>(ab, ab);
    t1 = avx_vshufps<0x32, f32v4>(ab, ab);
    ab = t0 * t1;
    a0 = avx_vhsubps<f32v4>(a0, ab);
    a0 = avx_vhaddps<f32v4>(a0, a0);
    return avx_vhaddps<f32>(a0, a0);
  }


  inline Mat4 operator~ () const noexcept {
    f32v4 d0, d1, d2, d3, t0, t1;
    t0 = avx_vunpcklps<f32v4>(c0, c1);
    t1 = avx_vunpcklps<f32v4>(c2, c3);
    d0 = avx_vshufps<0x44, f32v4>(t0, t1);
    d1 = avx_vshufps<0xEE, f32v4>(t0, t1);
    t0 = avx_vunpckhps<f32v4>(c0, c1);
    t1 = avx_vunpckhps<f32v4>(c2, c3);
    d2 = avx_vshufps<0x44, f32v4>(t0, t1);
    d3 = avx_vshufps<0xEE, f32v4>(t0, t1);
    return {d0, d1, d2, d3};
  }
  inline Mat4 operator- () const noexcept {
    f32v4 a0, b0, ab, t0, t1, t2, dt, d0, d1, d2, d3, s0, s1, s2, s3;
    t0 = avx_vshufps<0x40, f32v4>(c0, c0);
    t1 = avx_vshufps<0xF9, f32v4>(c1, c1);
    t0 = t0 * t1;
    t1 = avx_vshufps<0x40, f32v4>(c1, c1);
    t2 = avx_vshufps<0xF9, f32v4>(c0, c0);
    t1 = t1 * t2;
    a0 = t0 - t1;
    t0 = avx_vshufps<0x16, f32v4>(c2, c2);
    t1 = avx_vshufps<0xAF, f32v4>(c3, c3);
    t0 = t0 * t1;
    t1 = avx_vshufps<0x16, f32v4>(c3, c3);
    t2 = avx_vshufps<0xAF, f32v4>(c2, c2);
    t1 = t1 * t2;
    b0 = t0 - t1;
    t0 = avx_vshufps<0x09, f32v4>(c0, c2);
    t1 = avx_vshufps<0x7E, f32v4>(c1, c3);
    t0 = t0 * t1;
    t1 = avx_vshufps<0x09, f32v4>(c1, c3);
    t2 = avx_vshufps<0x7E, f32v4>(c0, c2);
    t1 = t1 * t2;
    ab = t0 - t1;
    t0 = avx_vinsertps<0x6A, f32v4>(ab, ab);
    t1 = avx_vshufps<0x32, f32v4>(ab, ab);
    t0 = t0 * t1;
    dt = a0 * b0;
    dt = avx_vhsubps<f32v4>(dt, t0);
    dt = avx_vhaddps<f32v4>(dt, dt);
    dt = avx_vhaddps<f32v4>(dt, dt);
    d1 = avx_vxorps<f32v4>(dt, dt);
    d3 = d1;
    d0 = d1 - c0;
    d2 = d1 - c2;
    t0 = avx_vunpcklps<f32v4>(d0, c1);
    t1 = avx_vunpcklps<f32v4>(d2, c3);
    s0 = avx_vshufps<0x22, f32v4>(t0, t1);
    s1 = avx_vshufps<0x77, f32v4>(t0, t1);
    t0 = avx_vunpckhps<f32v4>(d0, c1);
    t1 = avx_vunpckhps<f32v4>(d2, c3);
    s2 = avx_vshufps<0x22, f32v4>(t0, t1);
    s3 = avx_vshufps<0x77, f32v4>(t0, t1);
    t0 = avx_vshufps<0x50, f32v4>(b0, ab);
    d0 = s1 * t0;
    t1 = s0 * t0;
    d1 = d1 - t1;
    t0 = avx_vshufps<0xF5, f32v4>(b0, a0);
    d2 = s0 * t0;
    t1 = s2 * t0;
    d0 = d0 - t1;
    t0 = avx_vshufps<0x0A, f32v4>(b0, ab);
    t1 = s3 * t0;
    d0 = d0 + t1;
    d0 = d0 / dt;
    t1 = s0 * t0;
    d3 = d3 - t1;
    t0 = avx_vshufps<0xAA, f32v4>(ab, a0);
    t1 = s2 * t0;
    d1 = d1 + t1;
    t1 = s1 * t0;
    d2 = d2 - t1;
    t0 = avx_vshufps<0x5F, f32v4>(b0, a0);
    t1 = s1 * t0;
    d3 = d3 + t1;
    t1 = s3 * t0;
    d1 = d1 - t1;
    d1 = d1 / dt;
    t0 = avx_vshufps<0x0F, f32v4>(ab, a0);
    t1 = s3 * t0;
    d2 = d2 + t1;
    d2 = d2 / dt;
    t1 = s2 * t0;
    d3 = d3 - t1;
    d3 = d3 / dt;
    return {d0, d1, d2, d3};
  }


  inline bool inverse (f32 threshold = 0.0000001F) noexcept {
    f32v4 a0, b0, ab, t0, t1, t2, dt, s0, s1, s2, s3;
    t0 = avx_vshufps<0x40, f32v4>(c0, c0);
    t1 = avx_vshufps<0xF9, f32v4>(c1, c1);
    t0 = t0 * t1;
    t1 = avx_vshufps<0x40, f32v4>(c1, c1);
    t2 = avx_vshufps<0xF9, f32v4>(c0, c0);
    t1 = t1 * t2;
    a0 = t0 - t1;
    t0 = avx_vshufps<0x16, f32v4>(c2, c2);
    t1 = avx_vshufps<0xAF, f32v4>(c3, c3);
    t0 = t0 * t1;
    t1 = avx_vshufps<0x16, f32v4>(c3, c3);
    t2 = avx_vshufps<0xAF, f32v4>(c2, c2);
    t1 = t1 * t2;
    b0 = t0 - t1;
    t0 = avx_vshufps<0x09, f32v4>(c0, c2);
    t1 = avx_vshufps<0x7E, f32v4>(c1, c3);
    t0 = t0 * t1;
    t1 = avx_vshufps<0x09, f32v4>(c1, c3);
    t2 = avx_vshufps<0x7E, f32v4>(c0, c2);
    t1 = t1 * t2;
    ab = t0 - t1;
    t0 = avx_vinsertps<0x6A, f32v4>(ab, ab);
    t1 = avx_vshufps<0x32, f32v4>(ab, ab);
    t0 = t0 * t1;
    dt = a0 * b0;
    dt = avx_vhsubps<f32v4>(dt, t0);
    dt = avx_vhaddps<f32v4>(dt, dt);
    dt = avx_vhaddps<f32v4>(dt, dt);
    t2 = avx_vxorps<f32v4>(dt, dt);
    t0 = avx_vsubss<f32v4>(t2, dt);
    t0 = avx_vmaxss<f32v4>(t0, dt);
    if (t0[0] < threshold)
      return false;
    c0 = t2 - c0;
    c2 = t2 - c2;
    t0 = avx_vunpcklps<f32v4>(c0, c1);
    t1 = avx_vunpcklps<f32v4>(c2, c3);
    s0 = avx_vshufps<0x22, f32v4>(t0, t1);
    s1 = avx_vshufps<0x77, f32v4>(t0, t1);
    t0 = avx_vunpckhps<f32v4>(c0, c1);
    t1 = avx_vunpckhps<f32v4>(c2, c3);
    s2 = avx_vshufps<0x22, f32v4>(t0, t1);
    s3 = avx_vshufps<0x77, f32v4>(t0, t1);
    c1 = t2;
    c3 = t2;
    t0 = avx_vshufps<0x50, f32v4>(b0, ab);
    c0 = s1 * t0;
    t1 = s0 * t0;
    c1 = c1 - t1;
    t0 = avx_vshufps<0xF5, f32v4>(b0, a0);
    c2 = s0 * t0;
    t1 = s2 * t0;
    c0 = c0 - t1;
    t0 = avx_vshufps<0x0A, f32v4>(b0, ab);
    t1 = s3 * t0;
    c0 = c0 + t1;
    c0 = c0 / dt;
    t1 = s0 * t0;
    c3 = c3 - t1;
    t0 = avx_vshufps<0xAA, f32v4>(ab, a0);
    t1 = s2 * t0;
    c1 = c1 + t1;
    t1 = s1 * t0;
    c2 = c2 - t1;
    t0 = avx_vshufps<0x5F, f32v4>(b0, a0);
    t1 = s1 * t0;
    c3 = c3 + t1;
    t1 = s3 * t0;
    c1 = c1 - t1;
    c1 = c1 / dt;
    t0 = avx_vshufps<0x0F, f32v4>(ab, a0);
    t1 = s3 * t0;
    c2 = c2 + t1;
    c2 = c2 / dt;
    t1 = s2 * t0;
    c3 = c3 - t1;
    c3 = c3 / dt;
    return true;
  }


  inline static Mat4 zero () noexcept {
    f32v4 r;
    r = avx_vxorps<f32v4>(r, r);
    return {r, r, r, r};
  }
  inline static Mat4 identity () noexcept {
    auto a0 = avx_vinsertps<0x0E, f32v4>(1.0F, 1.0F);
    auto a1 = avx_vinsertps<0x1D, f32v4>(1.0F, 1.0F);
    auto a2 = avx_vinsertps<0x2B, f32v4>(1.0F, 1.0F);
    auto a3 = avx_vinsertps<0x37, f32v4>(1.0F, 1.0F);
    return {a0, a1, a2, a3};
  }
  inline static Mat4 translate (Vec3 p) noexcept {
    auto a0 = avx_vinsertps<0x0E, f32v4>(1.0F, 1.0F);
    auto a1 = avx_vinsertps<0x1D, f32v4>(1.0F, 1.0F);
    auto a2 = avx_vinsertps<0x2B, f32v4>(1.0F, 1.0F);
    auto a3 = avx_vinsertps<0x30, f32v4>(p.p, 1.0F);
    return {a0, a1, a2, a3};
  }
  inline static Mat4 rotatex (f32 a) noexcept {
    auto s = sinf(a);
    auto c = cosf(a);
    auto a0 = avx_vinsertps<0x0E, f32v4>(1.0F, 1.0F);
    auto a1 = avx_vinsertps<0x10, f32v4>(c, c);
    a1 = avx_vinsertps<0x29, f32v4>(a1, s);
    auto a2 = avx_vinsertps<0x10, f32v4>(c, -s);
    a2 = avx_vinsertps<0x29, f32v4>(a2, c);
    auto a3 = avx_vinsertps<0x37, f32v4>(1.0F, 1.0F);
    return {a0, a1, a2, a3};
  }
  inline static Mat4 rotatey (f32 a) noexcept {
    auto s = sinf(a);
    auto c = cosf(a);
    auto a0 = avx_vinsertps<0x2A, f32v4>(c, -s);
    auto a1 = avx_vinsertps<0x1D, f32v4>(1.0F, 1.0F);
    auto a2 = avx_vinsertps<0x2A, f32v4>(s, c);
    auto a3 = avx_vinsertps<0x37, f32v4>(1.0F, 1.0F);
    return {a0, a1, a2, a3};
  }
  inline static Mat4 rotatez (f32 a) noexcept {
    auto s = sinf(a);
    auto c = cosf(a);
    auto a0 = avx_vinsertps<0x1C, f32v4>(c, s);
    auto a1 = avx_vinsertps<0x1C, f32v4>(-s, c);
    auto a2 = avx_vinsertps<0x2B, f32v4>(1.0F, 1.0F);
    auto a3 = avx_vinsertps<0x37, f32v4>(1.0F, 1.0F);
    return {a0, a1, a2, a3};
  }
  inline static Mat4 rotatexp (f32 a, Vec3 p) noexcept {
    auto s = sinf(a);
    auto c = cosf(a);
    auto a0 = avx_vinsertps<0x0E, f32v4>(1.0F, 1.0F);
    auto a1 = avx_vinsertps<0x10, f32v4>(c, c);
    a1 = avx_vinsertps<0x29, f32v4>(a1, s);
    auto a2 = avx_vinsertps<0x10, f32v4>(c, -s);
    a2 = avx_vinsertps<0x29, f32v4>(a2, c);
    auto p1 = avx_vshufps<0x55, f32v4>(p.p, p.p);
    auto p2 = avx_vshufps<0xAA, f32v4>(p.p, p.p);
    auto a3 = p.p - a1*p1 - a2*p2;
    a3 = avx_vinsertps<0x31, f32v4>(a3, 1.0F);
    return {a0, a1, a2, a3};
  }
  inline static Mat4 rotateyp (f32 a, Vec3 p) noexcept {
    auto s = sinf(a);
    auto c = cosf(a);
    auto a0 = avx_vinsertps<0x2A, f32v4>(c, -s);
    auto a1 = avx_vinsertps<0x1D, f32v4>(1.0F, 1.0F);
    auto a2 = avx_vinsertps<0x2A, f32v4>(s, c);
    auto p0 = avx_vshufps<0x00, f32v4>(p.p, p.p);
    auto p2 = avx_vshufps<0xAA, f32v4>(p.p, p.p);
    auto a3 = p.p - a0*p0 - a2*p2;
    a3 = avx_vinsertps<0x32, f32v4>(a3, 1.0F);
    return {a0, a1, a2, a3};
  }
  inline static Mat4 rotatezp (f32 a, Vec3 p) noexcept {
    auto s = sinf(a);
    auto c = cosf(a);
    auto a0 = avx_vinsertps<0x1C, f32v4>(c, s);
    auto a1 = avx_vinsertps<0x1C, f32v4>(-s, c);
    auto a2 = avx_vinsertps<0x2B, f32v4>(1.0F, 1.0F);
    auto p0 = avx_vshufps<0x00, f32v4>(p.p, p.p);
    auto p1 = avx_vshufps<0x55, f32v4>(p.p, p.p);
    auto a3 = p.p - a0*p0 - a1*p1;
    a3 = avx_vinsertps<0x34, f32v4>(a3, 1.0F);
    return {a0, a1, a2, a3};
  }
  inline static Mat4 rotatev (f32 a, Vec3 v) noexcept {
    auto s = sinf(a);
    auto c = 1.0F - cosf(a);
    auto c4 = avx_vshufps<0x00, f32v4>(c, c); // [c, c, c, c]
    auto s4 = avx_vshufps<0x00, f32v4>(s, s); // [s, s, s, s]
    auto v0 = avx_vinsertps<0x08, f32v4>(v.p, v.p); // [x, y, z, 0]
    auto cv = c4 * v0; // [cx, cy, cz, 0]
    auto sv = s4 * v0; // [sx, sy, sz, 0]
    auto sn = -sv; // [-sx, -sy, -sz, 0]
    auto x3 = avx_vshufps<0xC0, f32v4>(v0, v0); // [x, x, x, 0]
    auto a0 = cv * x3; // [cxx, cyx, czx, 0]
    auto b0 = avx_vinsertps<0x90, f32v4>(c4, sv); // [c, sz, c, c]
    b0 = avx_vinsertps<0x68, f32v4>(b0, sn); // [c, sz, -sy, 0]
    a0 = a0 + b0; // [cxx+c, cyx+sz, czx-sy, 0]
    auto y3 = avx_vshufps<0xD5, f32v4>(v0, v0); // [y, y, y, 0]
    auto a1 = cv * y3; // [cxy, cyy, czy, 0]
    auto b1 = avx_vinsertps<0x80, f32v4>(c4, sn); // [-sz, c, c, c]
    b1 = avx_vinsertps<0x28, f32v4>(b1, sv); // [-sz, c, sx, 0]
    a1 = a1 + b1; // [cxy-sz, cyy+c, czy+sx, 0]
    auto z3 = avx_vshufps<0xEA, f32v4>(v0, v0); // [z, z, z, 0]
    auto a2 = cv * z3; // [cxz, cyz, czz, 0]
    auto b2 = avx_vinsertps<0x40, f32v4>(c4, sv); // [sy, c, c, c]
    b2 = avx_vinsertps<0x18, f32v4>(b2, sn); // [sy, -sx, c, 0]
    a2 = a2 + b2; // [cxz+sy, cyz-sx, czz+c, 0]
    auto a3 = avx_vinsertps<0x37, f32v4>(1.0F, 1.0F); // [0, 0, 0, 1]
    return {a0, a1, a2, a3};
  }
  inline static Mat4 rotatevp (f32 a, Vec3 v, Vec3 p) noexcept {
    auto s = sinf(a);
    auto c = 1.0F - cosf(a);
    auto c4 = avx_vshufps<0x00, f32v4>(c, c); // [c, c, c, c]
    auto s4 = avx_vshufps<0x00, f32v4>(s, s); // [s, s, s, s]
    auto v0 = avx_vinsertps<0x08, f32v4>(v.p, v.p); // [x, y, z, 0]
    auto cv = c4 * v0; // [cx, cy, cz, 0]
    auto sv = s4 * v0; // [sx, sy, sz, 0]
    auto sn = -sv; // [-sx, -sy, -sz, 0]
    auto x3 = avx_vshufps<0xC0, f32v4>(v0, v0); // [x, x, x, 0]
    auto a0 = cv * x3; // [cxx, cyx, czx, 0]
    auto b0 = avx_vinsertps<0x90, f32v4>(c4, sv); // [c, sz, c, c]
    b0 = avx_vinsertps<0x68, f32v4>(b0, sn); // [c, sz, -sy, 0]
    a0 = a0 + b0; // [cxx+c, cyx+sz, czx-sy, 0]
    b0 = a0 * x3;
    auto a3 = v0 - b0;
    auto y3 = avx_vshufps<0xD5, f32v4>(v0, v0); // [y, y, y, 0]
    auto a1 = cv * y3; // [cxy, cyy, czy, 0]
    auto b1 = avx_vinsertps<0x80, f32v4>(c4, sn); // [-sz, c, c, c]
    b1 = avx_vinsertps<0x28, f32v4>(b1, sv); // [-sz, c, sx, 0]
    a1 = a1 + b1; // [cxy-sz, cyy+c, czy+sx, 0]
    b1 = a1 * y3;
    a3 = a3 - b1;
    auto z3 = avx_vshufps<0xEA, f32v4>(v0, v0); // [z, z, z, 0]
    auto a2 = cv * z3; // [cxz, cyz, czz, 0]
    auto b2 = avx_vinsertps<0x40, f32v4>(c4, sv); // [sy, c, c, c]
    b2 = avx_vinsertps<0x18, f32v4>(b2, sn); // [sy, -sx, c, 0]
    a2 = a2 + b2; // [cxz+sy, cyz-sx, czz+c, 0]
    b2 = a2 * z3;
    a3 = a3 - b2;
    a3 = avx_vinsertps<0x30, f32v4>(a3, 1.0F);
    return {a0, a1, a2, a3};
  }
  inline static Mat4 scale (Vec3 t) noexcept {
    auto a0 = avx_vinsertps<0x0E, f32v4>(t.p, t.p);
    auto a1 = avx_vinsertps<0x5D, f32v4>(t.p, t.p);
    auto a2 = avx_vinsertps<0xAB, f32v4>(t.p, t.p);
    auto a3 = avx_vinsertps<0x37, f32v4>(t.p, 1.0F);
    return {a0, a1, a2, a3};
  }
  inline static Mat4 scalep (Vec3 t, Vec3 p) noexcept {
    auto a0 = avx_vinsertps<0x0E, f32v4>(t.p, t.p);
    auto a1 = avx_vinsertps<0x5D, f32v4>(t.p, t.p);
    auto a2 = avx_vinsertps<0xAB, f32v4>(t.p, t.p);
    auto a3 = t.p * p.p;
    a3 = p.p - a3;
    a3 = avx_vinsertps<0x37, f32v4>(a3, 1.0F);
    return {a0, a1, a2, a3};
  }
  inline static Mat4 scalev (f32 t, Vec3 v) noexcept {
    auto f = t - 1.0F;
    auto f4 = avx_vshufps<0x00, f32v4>(t, t);
    auto v0 = avx_vinsertps<0x08, f32v4>(v.p, v.p);
    auto fv = f4 * v0;
    auto x3 = avx_vshufps<0xC0, f32v4>(v0, v0);
    auto fx = fv * x3;
    auto a0 = avx_vinsertps<0x0E, f32v4>(1.0F, 1.0F);
    a0 = a0 + fx;
    auto y3 = avx_vshufps<0xD5, f32v4>(v0, v0);
    auto fy = fv * y3;
    auto a1 = avx_vinsertps<0x1D, f32v4>(1.0F, 1.0F);
    a1 = a1 + fy;
    auto z3 = avx_vshufps<0xEA, f32v4>(v0, v0);
    auto fz = fv * z3;
    auto a2 = avx_vinsertps<0x2B, f32v4>(1.0F, 1.0F);
    a2 = a2 + fz;
    auto a3 = avx_vinsertps<0x37, f32v4>(1.0F, 1.0F);
    return {a0, a1, a2, a3};
  }
  inline static Mat4 scalevp (f32 t, Vec3 v, Vec3 p) noexcept {
    auto f = t - 1.0F;
    auto f4 = avx_vshufps<0x00, f32v4>(t, t);
    auto v0 = avx_vinsertps<0x08, f32v4>(v.p, v.p);
    auto fv = f4 * v0;
    auto x3 = avx_vshufps<0xC0, f32v4>(v0, v0);
    auto fx = fv * x3;
    auto a0 = avx_vinsertps<0x0E, f32v4>(1.0F, 1.0F);
    a0 = a0 + fx;
    auto y3 = avx_vshufps<0xD5, f32v4>(v0, v0);
    auto fy = fv * y3;
    auto a1 = avx_vinsertps<0x1D, f32v4>(1.0F, 1.0F);
    a1 = a1 + fy;
    auto z3 = avx_vshufps<0xEA, f32v4>(v0, v0);
    auto fz = fv * z3;
    auto a2 = avx_vinsertps<0x2B, f32v4>(1.0F, 1.0F);
    a2 = a2 + fz;
    auto vp = avx_vdpps<0x77, f32v4>(v0, p.p);
    auto w3 = -vp;
    auto fw = fv * w3;
    auto a3 = avx_vinsertps<0x37, f32v4>(1.0F, 1.0F);
    a3 = a3 + fw;
    return {a0, a1, a2, a3};
  }
  inline static Mat4 skewx (Vec3 v) noexcept {
    auto x4 = avx_vshufps<0x00, f32v4>(v.p, v.p);
    auto n4 = v.p / x4;
    auto a0 = avx_vinsertps<0x08, f32v4>(n4, 1.0F);
    auto a1 = avx_vinsertps<0x1D, f32v4>(n4, 1.0F);
    auto a2 = avx_vinsertps<0x2B, f32v4>(n4, 1.0F);
    auto a3 = avx_vinsertps<0x37, f32v4>(n4, 1.0F);
    return {a0, a1, a2, a3};
  }
  inline static Mat4 skewy (Vec3 v) noexcept {
    auto y4 = avx_vshufps<0x55, f32v4>(v.p, v.p);
    auto n4 = v.p / y4;
    auto a0 = avx_vinsertps<0x0E, f32v4>(n4, 1.0F);
    auto a1 = avx_vinsertps<0x18, f32v4>(n4, 1.0F);
    auto a2 = avx_vinsertps<0x2B, f32v4>(n4, 1.0F);
    auto a3 = avx_vinsertps<0x37, f32v4>(n4, 1.0F);
    return {a0, a1, a2, a3};
  }
  inline static Mat4 skewz (Vec3 v) noexcept {
    auto z4 = avx_vshufps<0xAA, f32v4>(v.p, v.p);
    auto n4 = v.p / z4;
    auto a0 = avx_vinsertps<0x0E, f32v4>(n4, 1.0F);
    auto a1 = avx_vinsertps<0x1D, f32v4>(n4, 1.0F);
    auto a2 = avx_vinsertps<0x28, f32v4>(n4, 1.0F);
    auto a3 = avx_vinsertps<0x37, f32v4>(n4, 1.0F);
    return {a0, a1, a2, a3};
  }
  inline static Mat4 skewxp (Vec3 v, f32 p) noexcept {
    auto x4 = avx_vshufps<0x00, f32v4>(v.p, v.p);
    auto n4 = v.p / x4;
    auto a0 = avx_vinsertps<0x08, f32v4>(n4, 1.0F);
    auto a1 = avx_vinsertps<0x1D, f32v4>(n4, 1.0F);
    auto a2 = avx_vinsertps<0x2B, f32v4>(n4, 1.0F);
    auto p4 = avx_vshufps<0x00, f32v4>(p, p);
    auto dv = -p4 * n4;
    auto a3 = avx_vinsertps<0x31, f32v4>(dv, 1.0F);
    return {a0, a1, a2, a3};
  }
  inline static Mat4 skewyp (Vec3 v, f32 p) noexcept {
    auto y4 = avx_vshufps<0x55, f32v4>(v.p, v.p);
    auto n4 = v.p / y4;
    auto a0 = avx_vinsertps<0x0E, f32v4>(n4, 1.0F);
    auto a1 = avx_vinsertps<0x18, f32v4>(n4, 1.0F);
    auto a2 = avx_vinsertps<0x2B, f32v4>(n4, 1.0F);
    auto p4 = avx_vshufps<0x00, f32v4>(p, p);
    auto dv = -p4 * n4;
    auto a3 = avx_vinsertps<0x32, f32v4>(dv, 1.0F);
    return {a0, a1, a2, a3};
  }
  inline static Mat4 skewzp (Vec3 v, f32 p) noexcept {
    auto z4 = avx_vshufps<0xAA, f32v4>(v.p, v.p);
    auto n4 = v.p / z4;
    auto a0 = avx_vinsertps<0x0E, f32v4>(n4, 1.0F);
    auto a1 = avx_vinsertps<0x1D, f32v4>(n4, 1.0F);
    auto a2 = avx_vinsertps<0x28, f32v4>(n4, 1.0F);
    auto p4 = avx_vshufps<0x00, f32v4>(p, p);
    auto dv = -p4 * n4;
    auto a3 = avx_vinsertps<0x34, f32v4>(dv, 1.0F);
    return {a0, a1, a2, a3};
  }
  inline static Mat4 lookat (Vec3 s, Vec3 d, Vec3 u) noexcept {
    Vec3 z = s - d;
    z.normalize();
    Vec3 x = u ^ z;
    x.normalize();
    Vec3 y = z ^ x;
    y.normalize();
    auto a0 = avx_vshufps<0x00, f32v4>(x.p, z.p);
    a0 = avx_vinsertps<0x18, f32v4>(a0, y.p);
    auto a1 = avx_vshufps<0x55, f32v4>(x.p, z.p);
    a1 = avx_vinsertps<0x58, f32v4>(a1, y.p);
    auto a2 = avx_vshufps<0xAA, f32v4>(x.p, z.p);
    a2 = avx_vinsertps<0x98, f32v4>(a2, y.p);
    auto a3 = avx_vxorps<f32v4>(a2, a2);
    auto sx = avx_vshufps<0x00, f32v4>(s.p, s.p);
    auto dx = a0 * sx;
    a3 = a3 - dx;
    auto sy = avx_vshufps<0x55, f32v4>(s.p, s.p);
    auto dy = a1 * sy;
    a3 = a3 - dy;
    auto sz = avx_vshufps<0xAA, f32v4>(s.p, s.p);
    auto dz = a2 * sz;
    a3 = a3 - dz;
    a3 = avx_vinsertps<0x30, f32v4>(a3, 1.0F);
    return {a0, a1, a2, a3};
  }
  inline static Mat4 orthogonal (const Box3& frustum) noexcept {
    auto dv = frustum.max - frustum.min;
    auto av = frustum.min + frustum.max;
    auto i2 = avx_vshufps<0x00, f32v4>(2.0F, 2.0F);
    auto v2 = i2 / dv;
    auto a0 = avx_vinsertps<0x0E, f32v4>(v2, v2);
    auto a1 = avx_vinsertps<0x5D, f32v4>(v2, v2);
    auto a2 = avx_vinsertps<0xAB, f32v4>(v2, v2);
    auto a3 = av / dv;
    a3 = avx_vinsertps<0x30, f32v4>(a3, 1.0F);
    return {a0, a1, a2, a3};
  }
  inline static Mat4 perspective (f32 fovy, f32 aspect, f32 near, f32 far) noexcept {
    auto i = 1.0F / (near-far);
    auto a = 1.0F / tanf(0.5F*fovy);
    auto b = a / aspect;
    auto c = i * (far+near);
    auto d = -2.0F * i * far * near;
    auto a0 = avx_vinsertps<0x0E, f32v4>(b, b);
    auto a1 = avx_vinsertps<0x1D, f32v4>(a, a);
    auto a2 = avx_vinsertps<0x20, f32v4>(c, c);
    a2 = avx_vinsertps<0x33, f32v4>(a2, -1.0F);
    auto a3 = avx_vinsertps<0x2B, f32v4>(d, d);
    return {a0, a1, a2, a3};
  }
};


template<>
inline f32 Mat4::c<0> () const noexcept {
  return c0[0];
}
template<>
inline f32 Mat4::c<1> () const noexcept {
  return c0[1];
}
template<>
inline f32 Mat4::c<2> () const noexcept {
  return c0[2];
}
template<>
inline f32 Mat4::c<3> () const noexcept {
  return c0[3];
}
template<>
inline f32 Mat4::c<4> () const noexcept {
  return c1[0];
}
template<>
inline f32 Mat4::c<5> () const noexcept {
  return c1[1];
}
template<>
inline f32 Mat4::c<6> () const noexcept {
  return c1[2];
}
template<>
inline f32 Mat4::c<7> () const noexcept {
  return c1[3];
}
template<>
inline f32 Mat4::c<8> () const noexcept {
  return c2[0];
}
template<>
inline f32 Mat4::c<9> () const noexcept {
  return c2[1];
}
template<>
inline f32 Mat4::c<10> () const noexcept {
  return c2[2];
}
template<>
inline f32 Mat4::c<11> () const noexcept {
  return c2[3];
}
template<>
inline f32 Mat4::c<12> () const noexcept {
  return c3[0];
}
template<>
inline f32 Mat4::c<13> () const noexcept {
  return c3[1];
}
template<>
inline f32 Mat4::c<14> () const noexcept {
  return c3[2];
}
template<>
inline f32 Mat4::c<15> () const noexcept {
  return c3[3];
}


inline Vec4 operator* (Vec4 v, const Mat4& m) noexcept {
  return ~m * v;
}


CLOSE_JLIB_NS