// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements.  See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.  The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.  You may obtain a copy of the License at
//
//   http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.  See the License for the
// specific language governing permissions and limitations
// under the License.

////////////////////////////////////////
// ldsm macros
////////////////////////////////////////

#define LDSM_ROW_X1_OPCODE \
        "ldmatrix.sync.aligned.x1.m8n8.shared.b16 {%0}, [%1];\n"

#define LDSM_ROW_X2_OPCODE \
        "ldmatrix.sync.aligned.x2.m8n8.shared.b16 {%0,%1}, [%2];\n"

#define LDSM_ROW_X4_OPCODE \
        "ldmatrix.sync.aligned.x4.m8n8.shared.b16 {%0,%1,%2,%3}, [%4];\n"

#define LDSM_ROW_X1_INST(_x0, _addr) \
        asm volatile(LDSM_ROW_X1_OPCODE:   "=r"(_x0)   : "r"(_addr));

#define LDSM_ROW_X2_INST(_x0, _x1, _addr) \
        asm volatile(LDSM_ROW_X2_OPCODE:   "=r"(_x0),   "=r"(_x1): "r"(_addr));

#define LDSM_ROW_X4_INST(_x0, _x1, _x2, _x3, _addr) \
        asm volatile(LDSM_ROW_X4_OPCODE:   "=r"(_x0),   "=r"(_x1),  "=r"(_x2),   "=r"(_x3): "r"(_addr));

#define LDSM_COL_X1_OPCODE \
        "ldmatrix.sync.aligned.x1.trans.m8n8.shared.b16 {%0}, [%1];\n"

#define LDSM_COL_X2_OPCODE \
        "ldmatrix.sync.aligned.x2.trans.m8n8.shared.b16 {%0,%1}, [%2];\n"

#define LDSM_COL_X4_OPCODE \
        "ldmatrix.sync.aligned.x4.trans.m8n8.shared.b16 {%0,%1,%2,%3}, [%4];\n"

#define LDSM_COL_X1_INST(_x0, _addr) \
        asm volatile(LDSM_COL_X1_OPCODE:   "=r"(_x0)   : "r"(_addr));

#define LDSM_COL_X2_INST(_x0, _x1, _addr) \
        asm volatile(LDSM_COL_X2_OPCODE:   "=r"(_x0),   "=r"(_x1): "r"(_addr));

#define LDSM_COL_X4_INST(_x0, _x1, _x2, _x3, _addr) \
        asm volatile(LDSM_COL_X4_OPCODE:   "=r"(_x0),   "=r"(_x1),  "=r"(_x2),   "=r"(_x3): "r"(_addr));
