File size: 9,707 Bytes
8b7c501
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
// Auto-generated file. Do not edit!
//   Template: src/f16-gemm/6x16-aarch64-neonfp16arith-ld32.S.in
//   Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.

#include <xnnpack/assembly.h>

# void xnn_f16_gemminc_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld32(
#     size_t mr,                x0
#     size_t nc,                x1
#     size_t kc,                x2 / x0
#     const void* restrict a,    x3
#     size_t a_stride,          x4
#     const void* restrict w,    x5
#     void* restrict c,          x6
#     size_t cm_stride,         x7
#     size_t cn_stride,         [sp] -> (x8)

#     const float* restrict acc,  [sp + 8] -> x15
#     const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])  [sp + 16] -> (x8)

# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.

// Register usage
// A0  x3 v0
// A1  x9 v1
// A2 x10 v2
// A3 x11 v3
// A4 x12 v4
// A5  x4 v5
// B   x5 v16 v17 v18 v19

// C0  x6  v20 v21
// C1 x16  v22 v23
// C2 x17  v24 v25
// C3 x14  v26 v27
// C4 x13  v28 v29
// C5  x7  v30 v31
// clamp  v6, (v4), (v5)
// unused     v7
// unused A   v8 v9 v10 v11
// unused B   v12 v13 v14 v15

BEGIN_FUNCTION xnn_f16_gemminc_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld32

        # Load acc, params pointer
        LDP         x15, x8, [sp, 8]

        # Clamp A and C pointers
        CMP         x0, 2                   // if mr < 2
        ADD         x9, x3, x4              // a1 = a0 + a_stride
        ADD         x16, x6, x7             // c1 = c0 + cm_stride
        CSEL        x9, x3, x9, LO          //   a1 = a0
        CSEL        x16, x6, x16, LO        //   c1 = c0

        # Load params
        LDR         s6, [x8]

        ADD         x10, x9, x4             // a2 = a1 + a_stride
        ADD         x17, x16, x7            // c2 = c1 + cm_stride
                                            // if mr <= 2
        CSEL        x10, x9, x10, LS        //   a2 = a1
        CSEL        x17, x16, x17, LS       //   c2 = c1

        CMP         x0, 4                   // if mr < 4
        ADD         x11, x10, x4            // a3 = a2 + a_stride
        ADD         x14, x17, x7            // c3 = c2 + cm_stride
        CSEL        x11, x10, x11, LO       //   a3 = a2
        CSEL        x14, x17, x14, LO       //   c3 = c2

        ADD         x12, x11, x4            // a4 = a3 + a_stride
        ADD         x13, x14, x7            // c4 = c3 + cm_stride
                                            // if mr <= 4
        CSEL        x12, x11, x12, LS       //   a4 = a3
        CSEL        x13, x14, x13, LS       //   c4 = c3

        CMP         x0, 6                   // if mr < 6
        ADD         x4, x12, x4             // a5 = a4 + a_stride
        ADD         x7, x13, x7             // c5 = c4 + cm_stride
        CSEL        x4, x12, x4, LO         //   a5 = a4
        CSEL        x7, x13, x7, LO         //   c5 = c4

        LDR         x8, [sp]                // load cn_stride


0:
        # Load initial accumulators
        LDP         q20, q21, [x15], 32
        LDP         q22, q23, [x15], 32
        LDP         q24, q25, [x15], 32
        LDP         q26, q27, [x15], 32
        LDP         q28, q29, [x15], 32
        LDP         q30, q31, [x15], 32

         # Is there at least 2 halffloats (4 bytes)?
        SUBS        x0, x2, 4               // k = kc - 4
        B.LO        3f

       .p2align 3
        # Main loop - 2 halffloats of A (4 bytes)
        # 24 FMA + 6 ld32 A + 4 LDR B
1:
        LDR         s0,  [x3], 4
        LDR         q16, [x5], 16
        LDR         q17, [x5], 16
        LDR         s1,  [x9], 4
        LDR         s2, [x10], 4
        LDR         s3, [x11], 4
        LDR         s4, [x12], 4
        LDR         s5,  [x4], 4
        LDR         q18, [x5], 16
        LDR         q19, [x5], 16
        SUBS        x0, x0, 4
        FMLA        v20.8h, v16.8h,  v0.h[0]
        FMLA        v22.8h, v16.8h,  v1.h[0]
        FMLA        v24.8h, v16.8h,  v2.h[0]
        FMLA        v26.8h, v16.8h,  v3.h[0]
        FMLA        v28.8h, v16.8h,  v4.h[0]
        FMLA        v30.8h, v16.8h,  v5.h[0]
        FMLA        v21.8h, v17.8h,  v0.h[0]
        FMLA        v23.8h, v17.8h,  v1.h[0]
        FMLA        v25.8h, v17.8h,  v2.h[0]
        FMLA        v27.8h, v17.8h,  v3.h[0]
        FMLA        v29.8h, v17.8h,  v4.h[0]
        FMLA        v31.8h, v17.8h,  v5.h[0]

        FMLA        v20.8h, v18.8h,  v0.h[1]
        FMLA        v22.8h, v18.8h,  v1.h[1]
        FMLA        v24.8h, v18.8h,  v2.h[1]
        FMLA        v26.8h, v18.8h,  v3.h[1]
        FMLA        v28.8h, v18.8h,  v4.h[1]
        FMLA        v30.8h, v18.8h,  v5.h[1]
        FMLA        v21.8h, v19.8h,  v0.h[1]
        FMLA        v23.8h, v19.8h,  v1.h[1]
        FMLA        v25.8h, v19.8h,  v2.h[1]
        FMLA        v27.8h, v19.8h,  v3.h[1]
        FMLA        v29.8h, v19.8h,  v4.h[1]
        FMLA        v31.8h, v19.8h,  v5.h[1]
        B.HS        1b

        # Is there a remainder?- 1 halffloat of A (2 bytes)
        TBNZ        x0, 1, 3f
2:
        # Clamp
        DUP         v4.8h, v6.h[0]
        DUP         v5.8h, v6.h[1]
        FMAX        v20.8h, v20.8h, v4.8h
        FMAX        v21.8h, v21.8h, v4.8h
        FMAX        v22.8h, v22.8h, v4.8h
        FMAX        v23.8h, v23.8h, v4.8h
        FMAX        v24.8h, v24.8h, v4.8h
        FMAX        v25.8h, v25.8h, v4.8h
        FMAX        v26.8h, v26.8h, v4.8h
        FMAX        v27.8h, v27.8h, v4.8h
        FMAX        v28.8h, v28.8h, v4.8h
        FMAX        v29.8h, v29.8h, v4.8h
        FMAX        v30.8h, v30.8h, v4.8h
        FMAX        v31.8h, v31.8h, v4.8h
        SUBS        x1, x1, 16
        FMIN        v20.8h, v20.8h, v5.8h
        FMIN        v21.8h, v21.8h, v5.8h
        FMIN        v22.8h, v22.8h, v5.8h
        FMIN        v23.8h, v23.8h, v5.8h
        FMIN        v24.8h, v24.8h, v5.8h
        FMIN        v25.8h, v25.8h, v5.8h
        FMIN        v26.8h, v26.8h, v5.8h
        FMIN        v27.8h, v27.8h, v5.8h
        FMIN        v28.8h, v28.8h, v5.8h
        FMIN        v29.8h, v29.8h, v5.8h
        FMIN        v30.8h, v30.8h, v5.8h
        FMIN        v31.8h, v31.8h, v5.8h

        # Store full 6 x 16
        B.LO        4f

        ST1         {v30.16b, v31.16b},  [x7], x8
        SUB         x3,  x3, x2             // a0 -= kc
        ST1         {v28.16b, v29.16b}, [x13], x8
        SUB         x9,  x9, x2             // a1 -= kc
        ST1         {v26.16b, v27.16b}, [x14], x8
        SUB         x10, x10, x2            // a2 -= kc
        ST1         {v24.16b, v25.16b}, [x17], x8
        SUB         x11, x11, x2            // a3 -= kc
        ST1         {v22.16b, v23.16b}, [x16], x8
        SUB         x12, x12, x2            // a4 -= kc
        ST1         {v20.16b, v21.16b},  [x6], x8
        SUB         x4,  x4, x2             // a5 -= kc

        B.HI        0b
        RET

3:
        # Remainder- 1 halffloat of A (2 bytes)
        LDR         h0,  [x3], 2
        LDR         q16, [x5], 16
        LDR         q17, [x5], 16
        LDR         h1,  [x9], 2
        LDR         h2, [x10], 2
        LDR         h3, [x11], 2
        LDR         h4, [x12], 2
        LDR         h5,  [x4], 2
        FMLA        v20.8h, v16.8h,  v0.h[0]
        FMLA        v22.8h, v16.8h,  v1.h[0]
        FMLA        v24.8h, v16.8h,  v2.h[0]
        FMLA        v26.8h, v16.8h,  v3.h[0]
        FMLA        v28.8h, v16.8h,  v4.h[0]
        FMLA        v30.8h, v16.8h,  v5.h[0]
        FMLA        v21.8h, v17.8h,  v0.h[0]
        FMLA        v23.8h, v17.8h,  v1.h[0]
        FMLA        v25.8h, v17.8h,  v2.h[0]
        FMLA        v27.8h, v17.8h,  v3.h[0]
        FMLA        v29.8h, v17.8h,  v4.h[0]
        FMLA        v31.8h, v17.8h,  v5.h[0]
        B           2b

        # Store odd width
4:
        TBZ         x1, 3, 5f
        STR         q30,  [x7], 16
        MOV         v30.16b, v31.16b
        STR         q28, [x13], 16
        MOV         v28.16b, v29.16b
        STR         q26, [x14], 16
        MOV         v26.16b, v27.16b
        STR         q24, [x17], 16
        MOV         v24.16b, v25.16b
        STR         q22, [x16], 16
        MOV         v22.16b, v23.16b
        STR         q20,  [x6], 16
        MOV         v20.16b, v21.16b

5:
        TBZ         x1, 2, 6f
        STR         d30,  [x7], 8
        STR         d28, [x13], 8
        DUP         d30, v30.d[1]
        DUP         d28, v28.d[1]
        STR         d26, [x14], 8
        STR         d24, [x17], 8
        DUP         d26, v26.d[1]
        DUP         d24, v24.d[1]
        STR         d22, [x16], 8
        STR         d20,  [x6], 8
        DUP         d22, v22.d[1]
        DUP         d20, v20.d[1]

6:
        TBZ         x1, 1, 7f
        STR         s30,  [x7], 4
        STR         s28, [x13], 4
        DUP         s30, v30.s[1]
        DUP         s28, v28.s[1]
        STR         s26, [x14], 4
        STR         s24, [x17], 4
        DUP         s26, v26.s[1]
        DUP         s24, v24.s[1]
        STR         s22, [x16], 4
        STR         s20,  [x6], 4
        DUP         s22, v22.s[1]
        DUP         s20, v20.s[1]

7:
        TBZ         x1, 0, 8f
        STR         h30,  [x7]
        STR         h28, [x13]
        STR         h26, [x14]
        STR         h24, [x17]
        STR         h22, [x16]
        STR         h20,  [x6]
8:
        RET

END_FUNCTION xnn_f16_gemminc_minmax_ukernel_6x16__asm_aarch64_neonfp16arith_ld32

#ifdef __ELF__
.section ".note.GNU-stack","",%progbits
#endif