/*
   [m1]        [m2]

| a b c |   | 1 2 3 |
| m n o | x | 4 5 6 | =
| x y z |   | 7 8 9 |


  first col m1        second col m1       third col m1
       *                   *                   *
  first line m2       second line m2      third line m2
| a * [ 1 2 3 ] |   | b * [ 4 5 6 ] |   | c * [ 7 8 9 ] |   
| m * [ 1 2 3 ] | + | n * [ 4 5 6 ] | + | o * [ 7 8 9 ] | = 
| x * [ 1 2 3 ] |   | y * [ 4 5 6 ] |   | z * [ 7 8 9 ] |   


        [mA]                    [mB]                   [mC]
    first col m1            second col m1           third col m1
         *                       *                       *
    first line m2           second line m2          third line m2
| {a*1} {a*2} {a*3} |   | {b*4} {b*5} {b*6} |   | {c*7} {c*8} {c*9} |
| {m*1} {m*2} {m*3} | + | {n*4} {n*5} {n*6} | + | {o*7} {o*8} {o*9} | = 
| {x*1} {x*2} {x*3} |   | {y*4} {y*5} {y*6} |   | {z*7} {z*8} {z*9} |


| {a*1 + b*4 + c*7} {a*2 + b*5 + c*8} {a*3 + b*6 + c*9} |
| {m*1 + n*4 + o*7} {m*2 + n*5 + o*8} {m*3 + n*6 + o*9} |
| {x*1 + y*4 + z*7} {x*2 + y*5 + z*8} {x*3 + y*6 + z*9} |

*/


/*
    [m1]          [m2]

| g h i j |   | 0 1 2 3 |
| m n o p | x | 4 5 6 7 | =
| s t u v |   | 8 9 A B |
| w x y z |   | C D E F |


         [mA]                  [mB]                  [mC]                  [mD]
     1st col m1            2nd col m1            3rd col m1            4th col m1
          *                     *                     *                     *
     1st line m2           2nd line m2           3rd line m2           4th line m2
| g * [ 0 1 2 3 ] |   | h * [ 4 5 6 7 ] |   | i * [ 8 9 A B ] |   | j * [ C D E F ] |
| m * [ 0 1 2 3 ] | + | n * [ 4 5 6 7 ] | + | o * [ 8 9 A B ] | + | p * [ C D E F ] | =
| s * [ 0 1 2 3 ] |   | t * [ 4 5 6 7 ] |   | u * [ 8 9 A B ] |   | v * [ C D E F ] |
| w * [ 0 1 2 3 ] |   | x * [ 4 5 6 7 ] |   | y * [ 8 9 A B ] |   | z * [ C D E F ] |


            [mA]                          [mB]                          [mC]                          [mD]
        1st col m1                    2nd col m1                    3rd col m1                    4th col m1
             *                             *                             *                             *
        1st line m2                   2nd line m2                   3rd line m2                   4th line m2
| {g*0} {g*1} {g*2} {g*3} |   | {h*4} {h*5} {h*6} {h*7} |   | {i*8} {i*9} {i*A} {i*B} |   | {j*C} {j*D} {j*E} {j*F} |
| {m*0} {m*1} {m*2} {m*3} | + | {n*4} {n*5} {n*6} {n*7} | + | {o*8} {o*9} {o*A} {o*B} | + | {p*C} {p*D} {p*E} {p*F} |
| {s*0} {s*1} {s*2} {s*3} |   | {t*4} {t*5} {t*6} {t*7} |   | {u*8} {u*9} {u*A} {u*B} |   | {v*C} {v*D} {v*E} {v*F} |
| {w*0} {w*1} {w*2} {w*3} |   | {x*4} {x*5} {x*6} {x*7} |   | {y*8} {y*9} {y*A} {y*B} |   | {z*C} {z*D} {z*E} {z*F} |


| {g*0 + h*4 + i*8 + j*C} {g*1 + h*5 + i*9 + j*D} {g*2 + h*6 + i*A + j*E} {g*3 + h*7 + i*B + j*F} |
| {m*0 + n*4 + o*8 + p*C} {m*1 + n*5 + o*9 + p*D} {m*2 + n*6 + o*A + p*E} {m*3 + n*7 + o*B + p*F} |
| {s*0 + t*4 + u*8 + v*C} {s*1 + t*5 + u*9 + v*D} {s*2 + t*6 + u*A + v*E} {s*3 + t*7 + u*B + v*F} |
| {w*0 + x*4 + y*8 + z*C} {w*1 + x*5 + y*9 + z*D} {w*2 + x*6 + y*A + z*E} {w*3 + x*7 + y*B + z*F} |

*/

.global multMatrix44FPU
multMatrix44FPU:
    VPUSH       {d8-d11}

    VLDMIA      r1!,{s16-s19}       // Load 1st line of m2  -> [ 0 1 2 3]
    VLDR.F32    s20,[r0,#16*0+0*4]  // Load 1st col of m1   -> g
    VLDR.F32    s21,[r0,#16*1+0*4]  // -> m
    VLDR.F32    s22,[r0,#16*2+0*4]  // -> s
    VLDR.F32    s23,[r0,#16*3+0*4]  // -> w

    VMUL.F32    s0 ,s20,s16 // = {g*0}
    VMUL.F32    s1 ,s20,s17 // = {g*1}
    VMUL.F32    s2 ,s20,s18 // = {g*2}
    VMUL.F32    s3 ,s20,s19 // = {g*3}

    VMUL.F32    s4 ,s21,s16 // = {m*0}
    VMUL.F32    s5 ,s21,s17 // = {m*1}
    VMUL.F32    s6 ,s21,s18 // = {m*2}
    VMUL.F32    s7 ,s21,s19 // = {m*3}

    VMUL.F32    s8 ,s22,s16 // = {s*0}
    VMUL.F32    s9 ,s22,s17 // = {s*1}
    VMUL.F32    s10,s22,s18 // = {s*2}
    VMUL.F32    s11,s22,s19 // = {s*3}

    VMUL.F32    s12,s23,s16 // = {w*0}
    VMUL.F32    s13,s23,s17 // = {w*1}
    VMUL.F32    s14,s23,s18 // = {w*2}
    VMUL.F32    s15,s23,s19 // = {w*3}

    VLDMIA      r1!,{s16-s19}       // Load 2nd line of m2  -> [ 4 5 6 7 ]
    VLDR.F32    s20,[r0,#16*0+1*4]  // Load 2nd col of m1   -> h
    VLDR.F32    s21,[r0,#16*1+1*4]  // -> n
    VLDR.F32    s22,[r0,#16*2+1*4]  // -> t
    VLDR.F32    s23,[r0,#16*3+1*4]  // -> x

    VMLA.F32    s0 ,s20,s16 // = {g*0} + {h*4}
    VMLA.F32    s1 ,s20,s17 // = {g*1} + {h*5}
    VMLA.F32    s2 ,s20,s18 // = {g*2} + {h*6}
    VMLA.F32    s3 ,s20,s19 // = {g*3} + {h*7}

    VMLA.F32    s4 ,s21,s16 // = {m*0} + {n*4}
    VMLA.F32    s5 ,s21,s17 // = {m*1} + {n*5}
    VMLA.F32    s6 ,s21,s18 // = {m*2} + {n*6}
    VMLA.F32    s7 ,s21,s19 // = {m*3} + {n*7}

    VMLA.F32    s8 ,s22,s16 // = {s*0} + {t*4}
    VMLA.F32    s9 ,s22,s17 // = {s*1} + {t*5}
    VMLA.F32    s10,s22,s18 // = {s*2} + {t*6}
    VMLA.F32    s11,s22,s19 // = {s*3} + {t*7}

    VMLA.F32    s12,s23,s16 // = {w*0} + {x*4}
    VMLA.F32    s13,s23,s17 // = {w*1} + {x*5}
    VMLA.F32    s14,s23,s18 // = {w*2} + {x*6}
    VMLA.F32    s15,s23,s19 // = {w*3} + {x*7}

    VLDMIA      r1!,{s16-s19}       // Load 3nd line of m2  -> [ 8 9 A B ]
    VLDR.F32    s20,[r0,#16*0+2*4]  // Load 3nd col of m1   -> i
    VLDR.F32    s21,[r0,#16*1+2*4]  // -> o
    VLDR.F32    s22,[r0,#16*2+2*4]  // -> u
    VLDR.F32    s23,[r0,#16*3+2*4]  // -> y

    VMLA.F32    s0 ,s20,s16 // = {g*0} + {h*4} + {i*8}
    VMLA.F32    s1 ,s20,s17 // = {g*1} + {h*5} + {i*9}
    VMLA.F32    s2 ,s20,s18 // = {g*2} + {h*6} + {i*A}
    VMLA.F32    s3 ,s20,s19 // = {g*3} + {h*7} + {i*B}

    VMLA.F32    s4 ,s21,s16 // = {m*0} + {n*4} + {o*8}
    VMLA.F32    s5 ,s21,s17 // = {m*1} + {n*5} + {o*9}
    VMLA.F32    s6 ,s21,s18 // = {m*2} + {n*6} + {o*A}
    VMLA.F32    s7 ,s21,s19 // = {m*3} + {n*7} + {o*B}

    VMLA.F32    s8 ,s22,s16 // = {s*0} + {t*4} + {u*8}
    VMLA.F32    s9 ,s22,s17 // = {s*1} + {t*5} + {u*9}
    VMLA.F32    s10,s22,s18 // = {s*2} + {t*6} + {u*A}
    VMLA.F32    s11,s22,s19 // = {s*3} + {t*7} + {u*B}

    VMLA.F32    s12,s23,s16 // = {w*0} + {x*4} + {y*8}
    VMLA.F32    s13,s23,s17 // = {w*1} + {x*5} + {y*9}
    VMLA.F32    s14,s23,s18 // = {w*2} + {x*6} + {y*A}
    VMLA.F32    s15,s23,s19 // = {w*3} + {x*7} + {y*B}

    VLDMIA      r1,{s16-s19}       // Load 4nd line of m2  -> [ C D E F ]
    VLDR.F32    s20,[r0,#16*0+3*4]  // Load 4nd col of m1   -> j
    VLDR.F32    s21,[r0,#16*1+3*4]  // -> p
    VLDR.F32    s22,[r0,#16*2+3*4]  // -> v
    VLDR.F32    s23,[r0,#16*3+3*4]  // -> z

    VMLA.F32    s0 ,s20,s16 // = {g*0} + {h*4} + {i*8} + {j*C}
    VMLA.F32    s1 ,s20,s17 // = {g*1} + {h*5} + {i*9} + {j*D}
    VMLA.F32    s2 ,s20,s18 // = {g*2} + {h*6} + {i*A} + {j*E}
    VMLA.F32    s3 ,s20,s19 // = {g*3} + {h*7} + {i*B} + {j*F}

    VMLA.F32    s4 ,s21,s16 // = {m*0} + {n*4} + {o*8} + {p*C}
    VMLA.F32    s5 ,s21,s17 // = {m*1} + {n*5} + {o*9} + {p*D}
    VMLA.F32    s6 ,s21,s18 // = {m*2} + {n*6} + {o*A} + {p*E}
    VMLA.F32    s7 ,s21,s19 // = {m*3} + {n*7} + {o*B} + {p*F}

    VMLA.F32    s8 ,s22,s16 // = {s*0} + {t*4} + {u*8} + {v*C}
    VMLA.F32    s9 ,s22,s17 // = {s*1} + {t*5} + {u*9} + {v*D}
    VMLA.F32    s10,s22,s18 // = {s*2} + {t*6} + {u*A} + {v*E}
    VMLA.F32    s11,s22,s19 // = {s*3} + {t*7} + {u*B} + {v*F}

    VMLA.F32    s12,s23,s16 // = {w*0} + {x*4} + {y*8} + {z*C}
    VMLA.F32    s13,s23,s17 // = {w*1} + {x*5} + {y*9} + {z*D}
    VMLA.F32    s14,s23,s18 // = {w*2} + {x*6} + {y*A} + {z*E}
    VMLA.F32    s15,s23,s19 // = {w*3} + {x*7} + {y*B} + {z*F}

    VPOP        {d8-d11}
    VSTMIA      r2,{s0-s15}
    BX          lr


/*
multMatrix44FPU:
    VPUSH       {d8-d12}            // Save registers

    VLDMIA      r2!,{s16-s23}       // Matrix p2 is put into the [S16-S23] registers
    VLDR.F32    s24,[r1,#16*0+0*4]  // p1[0][0]
    VLDR.F32    s25,[r1,#16*1+0*4]  // p1[1][0]

    VMUL.F32    s0,s24,s16
    VMUL.F32    s1,s24,s17
    VMUL.F32    s2,s24,s18
    VMUL.F32    s3,s24,s19
    VLDR.F32    s24,[r1,#16*2+0*4]  // p1[2][0]

    VMUL.F32    s4,s25,s16
    VMUL.F32    s5,s25,s17
    VMUL.F32    s6,s25,s18
    VMUL.F32    s7,s25,s19
    VLDR.F32    s25,[r1,#16*3+0*4]  // p1[3][0]

    VMUL.F32    s8,s24,s16
    VMUL.F32    s9,s24,s17
    VMUL.F32    s10,s24,s18
    VMUL.F32    s11,s24,s19
    VLDR.F32    s24,[r1,#16*0+1*4]  // p1[0][1]

    VMUL.F32    s12,s25,s16
    VMUL.F32    s13,s25,s17
    VMUL.F32    s14,s25,s18
    VMUL.F32    s15,s25,s19
    VLDR.F32    s25,[r1,#16*1+1*4]  // p1[1][1]

    VLDMIA      r2!,{s16-s19}       // Matrix p2 is put into the [S16-S19] registers

    VMLA.F32    s0,s24,s20
    VMLA.F32    s1,s24,s21
    VMLA.F32    s2,s24,s22
    VMLA.F32    s3,s24,s23
    VLDR.F32    s24,[r1,#16*2+1*4]  // p1[2][1]

    VMLA.F32    s4,s25,s20
    VMLA.F32    s5,s25,s21
    VMLA.F32    s6,s25,s22
    VMLA.F32    s7,s25,s23
    VLDR.F32    s25,[r1,#16*3+1*4]  // p1[3][1]

    VMLA.F32    s8,s24,s20
    VMLA.F32    s9,s24,s21
    VMLA.F32    s10,s24,s22
    VMLA.F32    s11,s24,s23
    VLDR.F32    s24,[r1,#16*0+2*4]  // p1[0][2]

    VMLA.F32    s12,s25,s20
    VMLA.F32    s13,s25,s21
    VMLA.F32    s14,s25,s22
    VMLA.F32    s15,s25,s23
    VLDR.F32    s25,[r1,#16*1+2*4]  // p1[1][2]

    VLDMIA      r2,{s20-s23}        // Matrix p2 is put into the [S20-S23] registers
ARM VFP registers
    VMLA.F32    s0,s24,s16
    VMLA.F32    s1,s24,s17
    VMLA.F32    s2,s24,s18
    VMLA.F32    s3,s24,s19
    VLDR.F32    s24,[r1,#16*2+2*4]  // p1[2][2]

    VMLA.F32    s4,s25,s16
    VMLA.F32    s5,s25,s17
    VMLA.F32    s6,s25,s18
    VMLA.F32    s7,s25,s19
    VLDR.F32    s25,[r1,#16*3+2*4]  // p1[3][2]

    VMLA.F32    s8,s24,s16
    VMLA.F32    s9,s24,s17
    VMLA.F32    s10,s24,s18
    VMLA.F32    s11,s24,s19
    VLDR.F32    s24,[r1,#16*0+3*4]  // p1[0][3]

    VMLA.F32    s12,s25,s16
    VMLA.F32    s13,s25,s17
    VMLA.F32    s14,s25,s18
    VMLA.F32    s15,s25,s19
    VLDR.F32    s25,[r1,#16*1+3*4]  // p1[1][3]

    VMLA.F32    s0,s24,s20
    VMLA.F32    s1,s24,s21
    VMLA.F32    s2,s24,s22
    VMLA.F32    s3,s24,s23
    VLDR.F32    s24,[r1,#16*2+3*4]  // p1[2][3]

    VMLA.F32    s4,s25,s20
    VMLA.F32    s5,s25,s21
    VMLA.F32    s6,s25,s22
    VMLA.F32    s7,s25,s23
    VLDR.F32    s25,[r1,#16*3+3*4]  // p1[3][3]

    VMLA.F32    s8,s24,s20
    VMLA.F32    s9,s24,s21
    VMLA.F32    s10,s24,s22
    VMLA.F32    s11,s24,s23

    VMLA.F32    s12,s25,s20
    VMLA.F32    s13,s25,s21
    VMLA.F32    s14,s25,s22
    VMLA.F32    s15,s25,s23

    VPOP        {d8-d12}            // Register return
    VSTMIA      r0,{s0-s15}         // Store result
    BX          lr                  // Return

*/

/*

  transf v(x,y,z) by MAT4x4 =

| 00 01 02 03 |   | vx |   | vx*00 + vy*01 + vz*02 + 03 |
| 10 11 12 13 | * | vy | = | vy*10 + vy*11 + vz*12 + 13 |
| 20 21 22 23 |   | vz |   | vx*20 + vy*21 + vz*22 + 23 |
| 30 31 32 33 |   | 1  |   | vx*30 + vy*31 + vz*32 + 33 |

*/

.global transfByMatrix44FPU
transfByMatrix44FPU:
    VPUSH       {d8-d9}

    VLDMIA      r0,{s0-s15} //Load Matrix
    VLDMIA      r1,{s16-s18} //Load Vector

transfByMatrix44FPU_MID:
    //calc 03 + vz*02 + vy*01 + vx*00
    VMLA.F32    s3,s2,s18 // 03 += vz*02
    VMLA.F32    s3,s1,s17 // 03 += vy*01
    VMLA.F32    s3,s0,s16 // 03 += vx*00

    //calc 13 + vz*12 + vy*11 + vx*10
    VMLA.F32    s7,s6,s18 // 13 += vz*12
    VMLA.F32    s7,s5,s17 // 13 += vy*11
    VMLA.F32    s7,s4,s16 // 13 += vx*10

    //calc 23 + vz*22 + vy*21 + vx*20
    VMLA.F32    s11,s10,s18 // 23 += vz*22
    VMLA.F32    s11,s9,s17  // 23 += vy*21
    VMLA.F32    s11,s8,s16  // 23 += vx*20

    //calc 33 + vz*32 + vy*31 + vx*30
    VMLA.F32    s15,s14,s18 // 33 += vz*32
    VMLA.F32    s15,s13,s17 // 33 += vy*31
    VMLA.F32    s15,s12,s16 // 33 += vx*30

    VPOP        {d8-d9}

    //Result is accumulated at s3,s7,s11,s15. Storing...
    VSTR.F32    s3,[r2,#0]
    VSTR.F32    s7,[r2,#4]
    VSTR.F32    s11,[r2,#8]

    VMOV.F32    r0, s15
    
    BX          lr
    
/*
transfByMatrix44FPU:
    VPUSH       {d8-d9}             // Save registers
    
    VLDMIA      r1,{s0-s15}         // The entire pM matrix is put in the [S0-S15] registers
    VLDMIA      r2,{s16-s18}        // All vectors are put in the [S16-S18] registers
    
    VMLA.F32    s3,s0,s16
    VMLA.F32    s7,s4,s16
    VMLA.F32    s11,s8,s16
    VMLA.F32    s15,s12,s16
    
    VMLA.F32    s3,s1,s17
    VMLA.F32    s7,s5,s17
    VMLA.F32    s11,s9,s17
    VMLA.F32    s15,s13,s17
    
    VMLA.F32    s3,s2,s18
    VMLA.F32    s7,s6,s18
    VMLA.F32    s11,s10,s18
    VMLA.F32    s15,s14,s18 

    VPOP        {d8-d9}             // Register return
    
    VSTR.F32    s3,[r0,#0]
    VSTR.F32    s7,[r0,#4]
    VSTR.F32    s11,[r0,#8]         // Store result
    VSTR.F32    s15,[r0,#12]        // Store result
    
    BX          lr                  // Return
*/


.global transfV4ByMatrix44FPU
transfV4ByMatrix44FPU:
    VPUSH       {d8-d9}

    VLDMIA      r0,{s0-s15} //Load Matrix
    VLDMIA      r1,{s16-s19} //Load Vector

    //calc 03 * vw
    VMUL.F32    s3,s3,s19 // 03 = vw*03

    //calc 13 * vw
    VMUL.F32    s7,s7,s19 // 13 = vw*13

    //calc 23 * vw
    VMUL.F32    s11,s11,s19 // 23 = vw*23

    //calc 33 * vw
    VMUL.F32    s15,s15,s19 // 33 = vw*33

    B transfByMatrix44FPU_MID
