
#include <linux/errno.h>
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/cpufeature.h>
#include <asm/alternative.h>
/*
void em_vldr_32(u64 addr, u64 inx, u64 buf);
void em_vstr_32(u64 addr, u64 inx, u64 buf);
void em_vldr_64(u64 addr, u64 inx, u64 buf);
void em_vstr_64(u64 addr, u64 inx, u64 buf);
*/

/**
 * x0 addr
 * x1 float reg inx, must in [0...31]
 * x2 tmp data buf reg
 */
ENTRY(em_vldr_32)
	ldr w2, [x0]
	and x1, x1, 0x1F
	mov x0, #8
	mul x1, x1, x0
	adr x0, #12
	add x0, x0, x1
	br x0
// s0, s1, s2, s3
	mov v0.s[0], w2
	ret
	mov v0.s[1], w2
	ret
	mov v0.s[2], w2
	ret
	mov v0.s[3], w2
	ret
// s4, s5, s6, s7
	mov v1.s[0], w2
	ret
	mov v1.s[1], w2
	ret
	mov v1.s[2], w2
	ret
	mov v1.s[3], w2
	ret
// s8, s9, s10, s11
	mov v2.s[0], w2
	ret
	mov v2.s[1], w2
	ret
	mov v2.s[2], w2
	ret
	mov v2.s[3], w2
	ret
// s12, s13, s14, s15
	mov v3.s[0], w2
	ret
	mov v3.s[1], w2
	ret
	mov v3.s[2], w2
	ret
	mov v3.s[3], w2
	ret
// s16, s17, s18, s19
	mov v4.s[0], w2
	ret
	mov v4.s[1], w2
	ret
	mov v4.s[2], w2
	ret
	mov v4.s[3], w2
	ret
// s20, s21, s22, s23
	mov v5.s[0], w2
	ret
	mov v5.s[1], w2
	ret
	mov v5.s[2], w2
	ret
	mov v5.s[3], w2
	ret
// s24, s25, s26, s27
	mov v6.s[0], w2
	ret
	mov v6.s[1], w2
	ret
	mov v6.s[2], w2
	ret
	mov v6.s[3], w2
	ret
// s28, s29, s30, s31
	mov v7.s[0], w2
	ret
	mov v7.s[1], w2
	ret
	mov v7.s[2], w2
	ret
	mov v7.s[3], w2
	ret
ENDPROC(em_vldr_32)

/**
 * x0 addr
 * x1 float reg inx, must in [0...31]
 * x2 tmp data buf reg
 */
ENTRY(em_vstr_32)
	// calc float inx
	and x1, x1, 0x1F
	mov x2, #12
	mul x1, x1, x2
	adr x2, #12
	add x2, x2, x1
	br x2
/********************/
//  s0
	mov w2, v0.s[0]
	str w2, [x0]
	ret
// s1
	mov w2, v0.s[1]
	str w2, [x0]
	ret
// s2
	mov w2, v0.s[2]
	str w2, [x0]
	ret
// s3
	mov w2, v0.s[3]
	str w2, [x0]
	ret
/********************/
//  s4
	mov w2, v1.s[0]
	str w2, [x0]
	ret
// s5
	mov w2, v1.s[1]
	str w2, [x0]
	ret
// s6
	mov w2, v1.s[2]
	str w2, [x0]
	ret
// s7
	mov w2, v1.s[3]
	str w2, [x0]
	ret
/********************/
//  s8
	mov w2, v2.s[0]
	str w2, [x0]
	ret
// s9
	mov w2, v2.s[1]
	str w2, [x0]
	ret
// s10
	mov w2, v2.s[2]
	str w2, [x0]
	ret
// s11
	mov w2, v2.s[3]
	str w2, [x0]
	ret
/********************/
//  s12
	mov w2, v3.s[0]
	str w2, [x0]
	ret
// s13
	mov w2, v3.s[1]
	str w2, [x0]
	ret
// s14
	mov w2, v3.s[2]
	str w2, [x0]
	ret
// s15
	mov w2, v3.s[3]
	str w2, [x0]
	ret
/********************/
//  s16
	mov w2, v4.s[0]
	str w2, [x0]
	ret
// s17
	mov w2, v4.s[1]
	str w2, [x0]
	ret
// s18
	mov w2, v4.s[2]
	str w2, [x0]
	ret
// s19
	mov w2, v4.s[3]
	str w2, [x0]
	ret
/********************/
//  s20
	mov w2, v5.s[0]
	str w2, [x0]
	ret
// s21
	mov w2, v5.s[1]
	str w2, [x0]
	ret
// s22
	mov w2, v5.s[2]
	str w2, [x0]
	ret
// s23
	mov w2, v5.s[3]
	str w2, [x0]
	ret
/********************/
//  s24
	mov w2, v6.s[0]
	str w2, [x0]
	ret
// s25
	mov w2, v6.s[1]
	str w2, [x0]
	ret
// s26
	mov w2, v6.s[2]
	str w2, [x0]
	ret
// s27
	mov w2, v6.s[3]
	str w2, [x0]
	ret
/********************/
//  s28
	mov w2, v7.s[0]
	str w2, [x0]
	ret
// s29
	mov w2, v7.s[1]
	str w2, [x0]
	ret
// s30
	mov w2, v7.s[2]
	str w2, [x0]
	ret
// s31
	mov w2, v7.s[3]
	str w2, [x0]
	ret
ENDPROC(em_vstr_32)


/**
 * x0 addr
 * x1 float reg inx, must in [0...31]
 * x2 tmp data buf reg
 */
ENTRY(em_vldr_64)
	ldr x2, [x0]
	and x1, x1, 0x1F
	mov x0, #8
	mul x1, x1, x0
	adr x0, #12
	add x0, x0, x1
	br x0
// 
	mov v0.d[0], x2
	ret
	mov v0.d[1], x2
	ret
// 
	mov v1.d[0], x2
	ret
	mov v1.d[1], x2
	ret
// 
	mov v2.d[0], x2
	ret
	mov v2.d[1], x2
	ret
// 
	mov v3.d[0], x2
	ret
	mov v3.d[1], x2
	ret
// 
	mov v4.d[0], x2
	ret
	mov v4.d[1], x2
	ret
// 
	mov v5.d[0], x2
	ret
	mov v5.d[1], x2
	ret
// 
	mov v6.d[0], x2
	ret
	mov v6.d[1], x2
	ret
// 
	mov v7.d[0], x2
	ret
	mov v7.d[1], x2
	ret
// 
	mov v8.d[0], x2
	ret
	mov v8.d[1], x2
	ret
// 
	mov v9.d[0], x2
	ret
	mov v9.d[1], x2
	ret
// 
	mov v10.d[0], x2
	ret
	mov v10.d[1], x2
	ret
// 
	mov v11.d[0], x2
	ret
	mov v11.d[1], x2
	ret
// 
	mov v12.d[0], x2
	ret
	mov v12.d[1], x2
	ret
// 
	mov v13.d[0], x2
	ret
	mov v13.d[1], x2
	ret
// 
	mov v14.d[0], x2
	ret
	mov v14.d[1], x2
	ret
// 
	mov v15.d[0], x2
	ret
	mov v15.d[1], x2
	ret
ENDPROC(em_vldr_64)

/**
 * x0 addr
 * x1 float reg inx, must in [0...31]
 * x2 tmp data buf reg
 */
ENTRY(em_vstr_64)
	// calc float inx
	and x1, x1, 0x1F
	mov x2, #12
	mul x1, x1, x2
	adr x2, #12
	add x2, x2, x1
	br x2
//  s0, s1
	mov x2, v0.d[0]
	str x2, [x0]
	ret
	mov x2, v0.d[1]
	str x2, [x0]
	ret
// s2, s3
	mov x2, v1.d[0]
	str x2, [x0]
	ret
	mov x2, v1.d[1]
	str x2, [x0]
	ret
// 
	mov x2, v2.d[0]
	str x2, [x0]
	ret
	mov x2, v2.d[1]
	str x2, [x0]
	ret
// 
	mov x2, v3.d[0]
	str x2, [x0]
	ret
	mov x2, v3.d[1]
	str x2, [x0]
	ret
// 
	mov x2, v4.d[0]
	str x2, [x0]
	ret
	mov x2, v4.d[1]
	str x2, [x0]
	ret
// 
	mov x2, v5.d[0]
	str x2, [x0]
	ret
	mov x2, v5.d[1]
	str x2, [x0]
	ret
// 
	mov x2, v6.d[0]
	str x2, [x0]
	ret
	mov x2, v6.d[1]
	str x2, [x0]
	ret
// 
	mov x2, v7.d[0]
	str x2, [x0]
	ret
	mov x2, v7.d[1]
	str x2, [x0]
	ret
// 
	mov x2, v8.d[0]
	str x2, [x0]
	ret
	mov x2, v8.d[1]
	str x2, [x0]
	ret
// 
	mov x2, v9.d[0]
	str x2, [x0]
	ret
	mov x2, v9.d[1]
	str x2, [x0]
	ret
// 
	mov x2, v10.d[0]
	str x2, [x0]
	ret
	mov x2, v10.d[1]
	str x2, [x0]
	ret
// 
	mov x2, v11.d[0]
	str x2, [x0]
	ret
	mov x2, v11.d[1]
	str x2, [x0]
	ret
// 
	mov x2, v12.d[0]
	str x2, [x0]
	ret
	mov x2, v12.d[1]
	str x2, [x0]
	ret
// 
	mov x2, v13.d[0]
	str x2, [x0]
	ret
	mov x2, v13.d[1]
	str x2, [x0]
	ret
// 
	mov x2, v14.d[0]
	str x2, [x0]
	ret
	mov x2, v14.d[1]
	str x2, [x0]
	ret
// 
	mov x2, v15.d[0]
	str x2, [x0]
	ret
	mov x2, v15.d[1]
	str x2, [x0]
	ret

ENDPROC(em_vstr_64)

