; nasm -f win64 vec_x64.asm
; x86_64-w64-mingw32-ld -b pe-x86-64 --subsystem windows --dll -e 0 -s -o vec_x64.dll vec_x64.obj

; x64 calling convention (Windows):
;   Native: rcx, rdx,  r8,  r9,                stack
;   Java:        rdx,  r8,  r9, rdi, rsi, rcx, stack
; x64 calling convention (Linux, macOS):
;   Native: rdi, rsi, rdx, rcx,  r8,  r9,      stack
;   Java:        rsi, rdx, rcx,  r8,  r9, rdi, stack

section .text

; | 0 1 2 3 | 0 1 2 3 |
;           | 4 5 6 7 |
;           | 8 9 A B | v(vec4) * m(mat4) => r(vec4)
;           | C D E F | r[0] = dot(v[0,1,2,3], m[0,4,8,C]); ...
; void matVecMulAvx(const float v[4]:rcx, const float m[16]:rdx, float r[4]:r8) {
;   r[0] = v[0]*m[0] + v[1]*m[4] + v[2]*m[ 8] + v[3]*m[12];
;   r[1] = v[0]*m[1] + v[1]*m[5] + v[2]*m[ 9] + v[3]*m[13];
;   r[2] = v[0]*m[2] + v[1]*m[6] + v[2]*m[10] + v[3]*m[14];
;   r[3] = v[0]*m[3] + v[1]*m[7] + v[2]*m[11] + v[3]*m[15];
; }
align 32
global matVecMulAvx
export matVecMulAvx
matVecMulAvx:
	vbroadcastf128	ymm0, [rcx]				; ymm0: v0 v1 v2 v3 v0 v1 v2 v3 <= v
	vmovups			ymm2, [rdx]				; ymm2: m0
	vmovups			ymm3, [rdx+0x20]		; ymm3: m1
	vunpckhps		ymm1, ymm0, ymm0		; ymm1: v2 v2 v3 v3 v2 v2 v3 v3
	vunpcklps		ymm0, ymm0, ymm0		; ymm0: v0 v0 v1 v1 v0 v0 v1 v1
	vshufpd			ymm0, ymm0, ymm0, 0xc	; ymm0: v0 v0 v0 v0 v1 v1 v1 v1
	vshufpd			ymm1, ymm1, ymm1, 0xc	; ymm1: v2 v2 v2 v2 v3 v3 v3 v3
	vmulps			ymm0, ymm0, ymm2		; ymm0:*m0*m1*m2*m3*m4*m5*m6*m7 <= m0,m1
;	vmulps			ymm1, ymm1, ymm3		; ymm1:*m8*m9*mA*mB*mC*mD*mE*mF
;	vaddps			ymm0, ymm0, ymm1
	vfmadd231ps		ymm0, ymm1, ymm3		; ymm0: v0*m0+v2*m8, ... <= m2,m3
	vperm2f128		ymm1, ymm0, ymm0, 1		; ymm1: ymm0.swap(low128, high128)
	vaddps			ymm0, ymm0, ymm1		; ymm0: v0*m0+v1*m4+v2*m8+v3*mC, ...
	vmovups			[r8], xmm0				; r <= xmm0
	vzeroupper
	ret

; | 0 1 2 3 |
; | 4 5 6 7 |
; | 8 9 A B | m(mat4) * M(mat4) => r(mat4)
; | C D E F | r[0] = dot(m[0,1,2,3], M[0,4,8,C]); ...
; void matMulAvx(const float m[16]:rcx, const float M[16]:rdx, float r[16]:r8)
align 32
global matMulAvx
export matMulAvx
matMulAvx:
	vmovups			ymm0, [rcx]					; ymm0: m0 m1 m2 m3 m4 m5 m6 m7 <= m0,m1
	vmovups			ymm1, [rcx+0x20]			; ymm1: m8 m9 mA mB mC mD mE mF <= m2,m3
	vbroadcastf128	ymm4, [rdx]					; ymm4: M0 M1 M2 M3 M0 M1 M2 M3 <= M0
	vperm2f128		ymm6, ymm6, [rdx], 0x33		; ymm6: M4 M5 M6 M7 M4 M5 M6 M7 <= M1
	vshufps			ymm2, ymm0, ymm0, 0			; ymm2: m0 m0 m0 m0 m4 m4 m4 m4
	vshufps			ymm3, ymm1, ymm1, 0			; ymm3: m8 m8 m8 m8 mC mC mC mC
	vmulps			ymm2, ymm2, ymm4			; ymm2: m0*M0 m0*M1 m0*M2 m0*M3 m4*M0 m4*M1 m4*M2 m4*M3
	vmulps			ymm3, ymm3, ymm4			; ymm3: m8*M0 m8*M1 m8*M2 m8*M3 mC*M0 mC*M1 mC*M2 mC*M3
	vshufps			ymm4, ymm0, ymm0, 0x55		; ymm4: m1 m1 m1 m1 m5 m5 m5 m5
	vshufps			ymm5, ymm1, ymm1, 0x55		; ymm5: m9 m9 m9 m9 mD mD mD mD
;	vmulps			ymm4, ymm4, ymm6			; ymm4: m1*M4 m1*M5 m1*M6 m1*M7 m5*M4 m5*M5 m5*M6 m5*M7
;	vaddps			ymm2, ymm2, ymm4
	vfmadd231ps		ymm2, ymm4, ymm6			; ymm2: m0*M0+m1*M4 m0*M1+m1*M5 m0*M2+m1*M6 m0*M3+m1*M7 m4*M0+m5*M4 m4*M1+m5*M5 m4*M2+m5*M6 m4*M3+m5*M7
;	vmulps			ymm4, ymm5, ymm6			; ymm4: m9*M4 m9*M5 m9*M6 m9*M7 mD*M4 mD*M5 mD*M6 mD*M7
;	vaddps			ymm3, ymm3, ymm4
	vfmadd231ps		ymm3, ymm5, ymm6			; ymm3: m8*M0+m9*M4 m8*M1+m9*M5 m8*M2+m9*M6 m8*M3+m9*M7 mC*M0+mD*M4 mC*M1+mD*M5 mC*M2+mD*M6 mC*M3+mD*M7
	vshufps			ymm4, ymm0, ymm0, 0xaa		; ymm4: m2 m2 m2 m2 m6 m6 m6 m6
	vshufps			ymm5, ymm1, ymm1, 0xaa		; ymm5: mA mA mA mA mE mE mE mE
	vbroadcastf128	ymm6, [rdx+0x20]			; ymm6: M8 M9 MA MB M8 M9 MA MB <= M2
;	vmulps			ymm4, ymm4, ymm6			; ymm4: m2*M8 m2*M9 m2*MA m2*MB m6*M8 m6*M9 m6*MA m6*MB
	vfmadd132ps		ymm4, ymm2, ymm6			; ymm4: m0*M0+m1*M4+m2*M8 m0*M1+m1*M5+m2*M9 m0*M2+m1*M6+m2*MA m0*M3+m1*M7+m2*MB m4*M0+m5*M4+m6*M8 m4*M1+m5*M5+m6*M9 m4*M2+m5*M6+m6*MA m4*M3+m5*M7+m6*MB
;	vmulps			ymm5, ymm5, ymm6			; ymm5: mA*M8 mA*M9 mA*MA mA*MB mE*M8 mE*M9 mE*MA mE*MB
	vfmadd132ps		ymm5, ymm3, ymm6			; ymm5: m8*M0+m9*M4+mA*M8 m8*M1+m9*M5+mA*M9 m8*M2+m9*M6+mA*MA m8*M3+m9*M7+mA*MB mC*M0+mD*M4+mE*M8 mC*M1+mD*M5+mE*M9 mC*M2+mD*M6+mE*MA mC*M3+mD*M7+mE*MB
	vshufps			ymm0, ymm0, ymm0, 0xff		; ymm0: m3 m3 m3 m3 m7 m7 m7 m7
	vshufps			ymm1, ymm1, ymm1, 0xff		; ymm1: mB mB mB mB mF mF mF mF
	vperm2f128		ymm6, ymm6, [rdx+0x20], 0x33; ymm6: MC MD ME MF MC MD ME MF <= M3
;	vmulps			ymm0, ymm0, ymm6			; ymm0: m3*MC m3*MD m3*ME m3*MF m7*MC m7*MD m7*ME m7*MF
;	vaddps			ymm0, ymm0, ymm4
	vfmadd132ps		ymm0, ymm4, ymm6			; ymm0: m0*M0+m1*M4+m2*M8+m3*MC m0*M1+m1*M5+m2*M9+m3*MD m0*M2+m1*M6+m2*MA+m3*ME m0*M3+m1*M7+m2*MB+m3*MF m4*M0+m5*M4+m6*M8+m7*MC m4*M1+m5*M5+m6*M9+m7*MD m4*M2+m5*M6+m6*MA+m7*ME m4*M3+m5*M7+m6*MB+m7*MF
;	vaddps			ymm0, ymm0, ymm2
;	vmulps			ymm1, ymm1, ymm6			; ymm1: mB*MC mB*MD mB*ME mB*MF mF*MC mF*MD mF*ME mF*MF
;	vaddps			ymm1, ymm1, ymm5
	vfmadd132ps		ymm1, ymm5, ymm6			; ymm1: m8*M0+m9*M4+mA*M8+mB*MC m8*M1+m9*M5+mA*M9+mB*MD m8*M2+m9*M6+mA*MA+mB*ME m8*M3+m9*M7+mA*MB+mB*MF mC*M0+mD*M4+mE*M8+mF*MC mC*M1+mD*M5+mE*M9+mF*MD mC*M2+mD*M6+mE*MA+mF*ME mC*M3+mD*M7+mE*MB+mF*MF
;	vaddps			ymm1, ymm1, ymm3
	vmovups			[r8], ymm0					; r <= ymm0,ymm1
	vmovups			[r8+0x20], ymm1
	vzeroupper
	ret
