  .text
  .align  2
  .global RV_sgemm_unloop
RV_sgemm_unloop:

  vsetvli t0, zero, e32, m4, ta, ma
  li s11, 3
  li s10, 1

  mv a6, a1
  mv a7, a2  
  fcvt.s.w ft8, x0

LOOP_An_Bn:
  mv t6, a5

LOOP_A_Bn:
  addi t6, t6, -4

  vxor.vv v16, v16, v16
  vxor.vv v20, v20, v20
  vxor.vv v24, v24, v24
  vxor.vv v28, v28, v28
  
  mv s9, a0

LOOP4:
  addi s9, s9, -4
  addi t0, a1, 64
  addi t1, a1, 128
  addi t2, a1, 192

  vle32.v v0, (a1)
  vle32.v v4, (t0)
  vle32.v v8, (t1)
  vle32.v v12, (t2)
 
  addi t3, a2, 16
  addi t4, a2, 32
  addi t5, a2, 48
  
  flw ft0, 0(a2)
  flw ft1, 4(a2)
  flw ft2, 8(a2)
  flw ft3, 12(a2)
  
  vfmacc.vf v16, ft0, v0
  vfmacc.vf v20, ft1, v0
  vfmacc.vf v24, ft2, v0
  vfmacc.vf v28, ft3, v0
  
  flw ft4, 0(t3)
  flw ft5, 4(t3)
  flw ft6, 8(t3)
  flw ft7, 12(t3)

  vfmacc.vf v16, ft4, v4
  vfmacc.vf v20, ft5, v4
  vfmacc.vf v24, ft6, v4
  vfmacc.vf v28, ft7, v4

  flw ft0, 0(t4)
  flw ft1, 4(t4)
  flw ft2, 8(t4)
  flw ft3, 12(t4)

  vfmacc.vf v16, ft0, v8
  vfmacc.vf v20, ft1, v8
  vfmacc.vf v24, ft2, v8
  vfmacc.vf v28, ft3, v8

  flw ft4, 0(t5)
  flw ft5, 4(t5)
  flw ft6, 8(t5)
  flw ft7, 12(t5)

  vfmacc.vf v16, ft4, v12
  vfmacc.vf v20, ft5, v12
  vfmacc.vf v24, ft6, v12
  vfmacc.vf v28, ft7, v12
  
  addi a1, a1, 256
  addi a2, a2, 64

  ble s9, s11, LOOP2
   j LOOP4

LOOP2:
  #if s9 <= s3(1), jump LOOP1
  ble s9, s10, LOOP1
  addi s9, s9, -2
  addi t0, a1, 64
  vle32.v v0, (a1)
  vle32.v v4, (t0)
  
  flw ft0, 0(a2)
  flw ft1, 4(a2)
  flw ft2, 8(a2)
  flw ft3, 12(a2)
  
  addi t3, a2, 16
  
  vfmacc.vf v16, ft0, v0
  vfmacc.vf v20, ft1, v0
  vfmacc.vf v24, ft2, v0
  vfmacc.vf v28, ft3, v0
  
  flw ft4, 0(t3)
  flw ft5, 4(t3)
  flw ft6, 8(t3)
  flw ft7, 12(t3)
  
  vfmacc.vf v16, ft4, v4
  vfmacc.vf v20, ft5, v4
  vfmacc.vf v24, ft6, v4
  vfmacc.vf v28, ft7, v4
  
  addi a1, a1, 128
  addi a2, a2, 32 

  # if s9 <= s10(1), jump LOOP1
  #ble s9, s10, LOOP1
   
LOOP1:
  beqz s9, MULTIPLY_alpha 
  vle32.v v8, (a1)
  
  flw ft0, 0(a2)
  flw ft1, 4(a2)
  flw ft2, 8(a2)
  flw ft3, 12(a2)
  
  vfmacc.vf v16, ft0, v8
  vfmacc.vf v20, ft1, v8
  vfmacc.vf v24, ft2, v8
  vfmacc.vf v28, ft3, v8  

  addi a1, a1, 64
  addi a2, a2, 16

MULTIPLY_alpha:  
  vfmul.vf v16, v16, fa0
  vfmul.vf v20, v20, fa0
  vfmul.vf v24, v24, fa0
  vfmul.vf v28, v28, fa0
  
MULTIPLY_beta:
  feq.s t5, fa1, ft8
  beq t5, zero, beta_NOTZERO

beta_ZERO: 
  addi t0, a3, 64
  addi t1, a3, 128
  addi t2, a3, 192
  vse32.v v16, (a3)
  vse32.v v20, (t0)
  vse32.v v24, (t1)
  vse32.v v28, (t2)
  addi a3, a3, 256  

beta_NOTZERO:
  addi t0, a3, 64
  addi t1, a3, 128
  addi t2, a3, 192
  
  vle32.v v0, (a3)
  vle32.v v4, (t0)
  vle32.v v8, (t1)
  vle32.v v12, (t2)
  
  vfmacc.vf v16, fa1, v0
  vfmacc.vf v20, fa1, v4
  vfmacc.vf v24, fa1, v8
  vfmacc.vf v28, fa1, v12

  vse32.v v16, (a3)
  vse32.v v20, (t0)
  vse32.v v24, (t1)
  vse32.v v28, (t2)
   
  addi a3, a3, 256
  mv s8, a1  
  mv a1, a6
  bgtz t6, LOOP_A_Bn
   
  addi a4, a4, -16
  mv a1, s8
  mv a6, s8
  mv a2, a7
  bgtz a4, LOOP_An_Bn
 
ret

