/*
 * Copyright © 2023 Bas Nieuwenhuizen
 *
 * SPDX-License-Identifier: MIT
 */

#include "nir_builder.h"
#include "radv_nir.h"

/* This pass lowers cooperative matrix.
 *
 * On GFX11, the A&B matrices needs to be replicated, lanes 0..15 are replicated
 * to 16..31 and for wave64 also into lanes 32..47 and 48..63. A&B matrices are
 * always vectors of 16 elements.
 *
 * On GFX12, there is no data replication and the matrices layout is described
 * as below:
 *
 * Wave32:
 *         0..15  | 16..31 (lanes)
 * v0 lo:  row 0  | row 8
 * v0 hi:  row 1  | row 9
 * v1 lo:  row 2  | row 10
 * v1 hi:  row 3  | row 11
 * v2 lo:  row 4  | row 12
 * v2 hi:  row 5  | row 13
 * v3 lo:  row 6  | row 14
 * v3 hi:  row 7  | row 15
 *
 * Wave64:
 *         0..15 | 16..31 | 32..47 | 48..63 (lanes)
 * v0 lo:  row 0 | row 8  | row 4  | row 12
 * v0 hi:  row 1 | row 9  | row 5  | row 13
 * v1 lo:  row 2 | row 10 | row 6  | row 14
 * v1 hi:  row 3 | row 11 | row 7  | row 15
 *
 * Note that the GFX12 ISA doc describes other layouts for A/B, but they are identical
 * to the C layout with the exception of the order of the rows (columns for A).
 * And as long as these are swapped in the same way for both A and B, the muladd
 * result will be the same. So we use the C layout for all uses.
 */

typedef struct {
   enum amd_gfx_level gfx_level;
   unsigned wave_size;
} lower_cmat_params;

static unsigned
radv_nir_cmat_bits(struct glsl_cmat_description desc)
{
   return glsl_base_type_bit_size(desc.element_type);
}

static unsigned
radv_nir_cmat_length(struct glsl_cmat_description desc, const lower_cmat_params *params)
{
   if (params->gfx_level >= GFX12) {
      assert(desc.cols == 16 && desc.rows == 16);
      return 256 / params->wave_size;
   } else if (desc.use != GLSL_CMAT_USE_ACCUMULATOR) {
      return 16;
   } else {
      return desc.cols * desc.rows / params->wave_size * (radv_nir_cmat_bits(desc) == 16 ? 2 : 1);
   }
}

static unsigned
radv_nir_cmat_length_mul(struct glsl_cmat_description desc, const lower_cmat_params *params)
{
   if (params->gfx_level >= GFX12 || desc.use != GLSL_CMAT_USE_ACCUMULATOR) {
      return 1;
   } else {
      /* For  GFX11 C matrices we have 1 VGPR per element even if the element type is
       * 16bits. So with 8 fp16 elements we implement that with a f16vec16.
       * We then use the coefficient generated by this function to figure out
       * how many elements we really have.
       */
      return radv_nir_cmat_bits(desc) == 16 ? 2 : 1;
   }
}

static nir_def *
radv_nir_load_cmat(nir_builder *b, const lower_cmat_params *params, nir_def *src)
{
   nir_deref_instr *deref = nir_instr_as_deref(src->parent_instr);
   struct glsl_cmat_description desc = *glsl_get_cmat_description(deref->type);
   return nir_build_load_deref(b, radv_nir_cmat_length(desc, params), radv_nir_cmat_bits(desc), src, 0);
}

static const struct glsl_type *
radv_nir_translate_matrix_type(const struct glsl_type *orig_type, struct hash_table *type_map,
                               const lower_cmat_params *params)
{
   struct hash_entry *entry = _mesa_hash_table_search(type_map, orig_type);
   if (entry) {
      return entry->data;
   } else if (glsl_type_is_cmat(orig_type)) {
      struct glsl_cmat_description desc = *glsl_get_cmat_description(orig_type);
      unsigned length = radv_nir_cmat_length(desc, params);

      return glsl_vector_type(desc.element_type, length);
   } else if (glsl_type_is_array(orig_type)) {
      const struct glsl_type *elem_type = glsl_get_array_element(orig_type);
      const struct glsl_type *new_elem_type = radv_nir_translate_matrix_type(elem_type, type_map, params);

      if (elem_type == new_elem_type)
         return orig_type;

      return glsl_array_type(new_elem_type, glsl_get_length(orig_type), glsl_get_explicit_stride(orig_type));
   } else if (glsl_type_is_struct(orig_type)) {
      unsigned num_fields = glsl_get_length(orig_type);

      bool change = false;
      for (unsigned i = 0; i < num_fields; ++i) {
         const struct glsl_type *field_type = glsl_get_struct_field(orig_type, i);
         const struct glsl_type *new_field_type = radv_nir_translate_matrix_type(field_type, type_map, params);

         if (field_type != new_field_type) {
            change = true;
            break;
         }
      }

      if (!change)
         return orig_type;

      struct glsl_struct_field *fields = malloc(sizeof(struct glsl_struct_field) * num_fields);

      for (unsigned i = 0; i < num_fields; ++i) {
         fields[i] = *glsl_get_struct_field_data(orig_type, i);

         fields[i].type = radv_nir_translate_matrix_type(fields[i].type, type_map, params);
      }

      const struct glsl_type *ret =
         glsl_struct_type(fields, num_fields, glsl_get_type_name(orig_type), glsl_struct_type_is_packed(orig_type));
      free(fields);

      _mesa_hash_table_insert(type_map, orig_type, (void *)ret);
      return ret;
   } else
      return orig_type;
}

static nir_def *
radv_get_base_row(nir_builder *b, struct glsl_cmat_description desc, const lower_cmat_params *params,
                  nir_def *local_idx)
{
   nir_def *base_row;

   if (params->gfx_level >= GFX12) {
      base_row = nir_udiv_imm(b, local_idx, 16);

      if (params->wave_size == 64) {
         /* Switch rows from lanes 16..31 to 32..47 */
         base_row = nir_ushr_imm(b, nir_bitfield_reverse(b, base_row), 30);
         base_row = nir_imul_imm(b, base_row, 4);
      } else {
         base_row = nir_imul_imm(b, base_row, 8);
      }
   } else {
      base_row = desc.use == GLSL_CMAT_USE_ACCUMULATOR ? nir_udiv_imm(b, local_idx, 16) : nir_imm_int(b, 0);
   }

   return base_row;
}

static nir_def *
convert_base_type(nir_builder *b, nir_def *src, enum glsl_base_type src_type, enum glsl_base_type dst_type, bool sat)
{
   if (dst_type == src_type)
      return src;

   if (src_type == GLSL_TYPE_BFLOAT16) {
      src = nir_bf2f(b, src);
      return convert_base_type(b, src, GLSL_TYPE_FLOAT, dst_type, sat);
   } else if (dst_type == GLSL_TYPE_BFLOAT16) {
      src = convert_base_type(b, src, src_type, GLSL_TYPE_FLOAT, sat);
      return nir_f2bf(b, src);
   } else if (src_type == GLSL_TYPE_FLOAT_E4M3FN) {
      src = nir_e4m3fn2f(b, src);
      return convert_base_type(b, src, GLSL_TYPE_FLOAT, dst_type, sat);
   } else if (dst_type == GLSL_TYPE_FLOAT_E4M3FN) {
      src = convert_base_type(b, src, src_type, GLSL_TYPE_FLOAT, sat);
      if (sat)
         return nir_f2e4m3fn_sat(b, src);
      else
         return nir_f2e4m3fn(b, src);
   } else if (src_type == GLSL_TYPE_FLOAT_E5M2) {
      src = nir_e5m22f(b, src);
      return convert_base_type(b, src, GLSL_TYPE_FLOAT, dst_type, sat);
   } else if (dst_type == GLSL_TYPE_FLOAT_E5M2) {
      src = convert_base_type(b, src, src_type, GLSL_TYPE_FLOAT, sat);
      if (sat)
         return nir_f2e5m2_sat(b, src);
      else
         return nir_f2e5m2(b, src);
   }

   nir_op op = nir_type_conversion_op(nir_get_nir_type_for_glsl_base_type(src_type),
                                      nir_get_nir_type_for_glsl_base_type(dst_type), nir_rounding_mode_undef);

   return nir_build_alu1(b, op, src);
}

static nir_def *
convert_use(nir_builder *b, nir_def *src, enum glsl_cmat_use src_use, enum glsl_cmat_use dst_use,
            const lower_cmat_params *params)
{
   if (src_use == dst_use)
      return src;
   if (params->gfx_level >= GFX12) {
      if (src_use == GLSL_CMAT_USE_B && dst_use == GLSL_CMAT_USE_ACCUMULATOR)
         return src;
      if (src_use == GLSL_CMAT_USE_ACCUMULATOR && dst_use == GLSL_CMAT_USE_B)
         return src;
   }

   if (src_use == GLSL_CMAT_USE_A && dst_use == GLSL_CMAT_USE_ACCUMULATOR) {
      src = convert_use(b, src, GLSL_CMAT_USE_A, GLSL_CMAT_USE_B, params);
      return convert_use(b, src, GLSL_CMAT_USE_B, GLSL_CMAT_USE_ACCUMULATOR, params);
   } else if (src_use == GLSL_CMAT_USE_ACCUMULATOR && dst_use == GLSL_CMAT_USE_A) {
      src = convert_use(b, src, GLSL_CMAT_USE_ACCUMULATOR, GLSL_CMAT_USE_B, params);
      return convert_use(b, src, GLSL_CMAT_USE_B, GLSL_CMAT_USE_A, params);
   }

   nir_def *components[NIR_MAX_VEC_COMPONENTS] = {NULL};

   unsigned num_comps = src->num_components;
   for (unsigned i = 0; i < num_comps; i++)
      components[i] = nir_channel(b, src, i);

   if (src_use == GLSL_CMAT_USE_ACCUMULATOR && dst_use == GLSL_CMAT_USE_B) {
      assert(params->gfx_level < GFX12);
      nir_def *tmp[NIR_MAX_VEC_COMPONENTS];

      if (src->bit_size == 32) {
         if (params->wave_size == 64) {
            nir_def *low_lanes = nir_inverse_ballot(b, 1, nir_imm_intN_t(b, UINT32_MAX, 64));
            for (int i = 0; i < num_comps; i++) {
               nir_def *comp = components[i];
               nir_def *half_swap = nir_rotate(b, comp, nir_imm_int(b, 32), .cluster_size = 64);

               tmp[i * 2] = nir_bcsel(b, low_lanes, comp, half_swap);
               tmp[i * 2 + 1] = nir_bcsel(b, low_lanes, half_swap, comp);
            }
            num_comps *= 2;
            memcpy(components, tmp, sizeof(components));
         }

         nir_def *low_lanes = nir_inverse_ballot(b, 1, nir_imm_intN_t(b, 0xffff0000ffffull, params->wave_size));
         for (int i = 0; i < num_comps; i++) {
            unsigned swap16 = 0x1f | (0x10 << 10);
            nir_def *half_swap = nir_masked_swizzle_amd(b, components[i], .swizzle_mask = swap16, .fetch_inactive = 1);
            tmp[i * 2] = nir_bcsel(b, low_lanes, components[i], half_swap);
            tmp[i * 2 + 1] = nir_bcsel(b, low_lanes, half_swap, components[i]);
         }

         num_comps *= 2;
         memcpy(components, tmp, sizeof(components));
      } else {
         /* Same as above, but operate on 32 bits at once, using byte_perm as vectorized bcsel. */
         assert(src->bit_size < 32);
         nir_def *packed = nir_extract_bits(b, components, num_comps, 0, num_comps / (32 / src->bit_size), 32);
         num_comps /= 32 / src->bit_size;
         for (unsigned i = 0; i < num_comps; i++)
            components[i] = nir_channel(b, packed, i);

         nir_def *low_sel = nir_imm_int(b, src->bit_size == 8 ? 0x05010400 : 0x05040100);
         nir_def *high_sel = nir_imm_int(b, src->bit_size == 8 ? 0x01050004 : 0x01000504);

         if (params->wave_size == 64) {
            nir_def *low_lanes = nir_inverse_ballot(b, 1, nir_imm_intN_t(b, UINT32_MAX, 64));
            nir_def *first_perm = nir_bcsel(b, low_lanes, low_sel, high_sel);
            nir_def *second_perm = nir_ior_imm(b, first_perm, 0x02020202);
            for (int i = 0; i < num_comps; i++) {
               nir_def *comp = components[i];
               nir_def *half_swap = nir_rotate(b, comp, nir_imm_int(b, 32), .cluster_size = 64);

               tmp[i * 2] = nir_byte_perm_amd(b, half_swap, comp, first_perm);
               tmp[i * 2 + 1] = nir_byte_perm_amd(b, half_swap, comp, second_perm);
            }
            num_comps *= 2;
            memcpy(components, tmp, sizeof(components));
         }

         nir_def *low_lanes = nir_inverse_ballot(b, 1, nir_imm_intN_t(b, 0xffff0000ffffull, params->wave_size));
         nir_def *first_perm = nir_bcsel(b, low_lanes, low_sel, high_sel);
         nir_def *second_perm = nir_ior_imm(b, first_perm, 0x02020202);
         for (int i = 0; i < num_comps; i++) {
            nir_def *comp = components[i];
            unsigned swap16 = 0x1f | (0x10 << 10);
            nir_def *half_swap = nir_masked_swizzle_amd(b, comp, .swizzle_mask = swap16, .fetch_inactive = 1);
            tmp[i * 2] = nir_byte_perm_amd(b, half_swap, comp, first_perm);
            tmp[i * 2 + 1] = nir_byte_perm_amd(b, half_swap, comp, second_perm);
         }
         num_comps *= 2;
         memcpy(components, tmp, sizeof(components));

         nir_def *unpacked =
            nir_extract_bits(b, components, num_comps, 0, num_comps * (32 / src->bit_size), src->bit_size);
         num_comps *= 32 / src->bit_size;
         for (unsigned i = 0; i < num_comps; i++)
            components[i] = nir_channel(b, unpacked, i);
      }

      assert(num_comps == 16);
   } else if (src_use == GLSL_CMAT_USE_B && dst_use == GLSL_CMAT_USE_ACCUMULATOR) {
      assert(params->gfx_level < GFX12);
      assert(num_comps == 16);
      if (src->bit_size == 32) {
         for (unsigned keep32 = 0; keep32 < ((params->wave_size == 64) ? 2 : 1); keep32++) {
            nir_def *ballot = nir_imm_intN_t(b, keep32 ? UINT32_MAX : 0xffff0000ffffull, params->wave_size);
            nir_def *keep = nir_inverse_ballot(b, 1, ballot);
            num_comps /= 2;
            for (unsigned i = 0; i < num_comps; i++) {
               components[i] = nir_bcsel(b, keep, components[i * 2], components[i * 2 + 1]);
            }
         }
      } else {
         /* Same as above, but operate on 32 bits at once, using byte_perm as vectorized bcsel. */
         assert(src->bit_size < 32);
         nir_def *packed = nir_extract_bits(b, components, num_comps, 0, num_comps / (32 / src->bit_size), 32);
         num_comps /= 32 / src->bit_size;
         for (unsigned i = 0; i < num_comps; i++)
            components[i] = nir_channel(b, packed, i);

         nir_def *low_sel = nir_imm_int(b, src->bit_size == 8 ? 0x06040200 : 0x05040100);
         nir_def *high_sel = nir_imm_int(b, src->bit_size == 8 ? 0x07050301 : 0x07060302);

         for (unsigned keep32 = 0; keep32 < ((params->wave_size == 64) ? 2 : 1); keep32++) {
            nir_def *ballot = nir_imm_intN_t(b, keep32 ? UINT32_MAX : 0xffff0000ffffull, params->wave_size);
            nir_def *keep = nir_inverse_ballot(b, 1, ballot);
            nir_def *perm = nir_bcsel(b, keep, low_sel, high_sel);
            num_comps /= 2;
            for (unsigned i = 0; i < num_comps; i++) {
               components[i] = nir_byte_perm_amd(b, components[i * 2 + 1], components[i * 2], perm);
            }
         }

         nir_def *unpacked =
            nir_extract_bits(b, components, num_comps, 0, num_comps * (32 / src->bit_size), src->bit_size);
         num_comps *= 32 / src->bit_size;
         for (unsigned i = 0; i < num_comps; i++)
            components[i] = nir_channel(b, unpacked, i);
      }
   } else if ((src_use == GLSL_CMAT_USE_A && dst_use == GLSL_CMAT_USE_B) ||
              (src_use == GLSL_CMAT_USE_B && dst_use == GLSL_CMAT_USE_A)) {
      /* Transpose is a mess... */
      for (unsigned x_mask = 1; x_mask < num_comps; x_mask *= 2) {
         /* Use separate masks to always keep the masked_swizzle on the first source of v_cndmask. */
         uint64_t mask = 0;
         for (unsigned i = 0; i < 64; i += 2 * x_mask) {
            mask |= BITFIELD64_MASK(x_mask) << i;
         }

         nir_def *even = nir_inverse_ballot(b, 1, nir_imm_intN_t(b, mask, params->wave_size));
         nir_def *odd = nir_inverse_ballot(b, 1, nir_imm_intN_t(b, mask << x_mask, params->wave_size));

         for (unsigned i = 0; i < num_comps; i += 2 * x_mask) {
            for (unsigned j = 0; j < x_mask; j++) {
               unsigned pos0 = i + j;
               unsigned pos1 = pos0 + x_mask;
               nir_def *comp0 = components[pos0];
               nir_def *comp1 = components[pos1];

               nir_def *comp0x =
                  nir_masked_swizzle_amd(b, comp0, .swizzle_mask = 0x1f | (x_mask << 10), .fetch_inactive = 1);
               nir_def *comp1x =
                  nir_masked_swizzle_amd(b, comp1, .swizzle_mask = 0x1f | (x_mask << 10), .fetch_inactive = 1);

               components[pos0] = nir_bcsel(b, even, comp0, comp1x);
               components[pos1] = nir_bcsel(b, odd, comp1, comp0x);
            }
         }
      }

      assert(num_comps == 16 || params->gfx_level >= GFX12);

      if (params->gfx_level >= GFX12) {
         if (params->wave_size == 64) {
            nir_def *cond = nir_inverse_ballot(b, 1, nir_imm_intN_t(b, 0xf0f0f0f00f0f0f0f, params->wave_size));
            for (unsigned i = 0; i < num_comps; i++) {
               nir_def *comp = components[i];
               nir_def *compx = nir_rotate(b, comp, nir_imm_int(b, 32));
               compx = nir_masked_swizzle_amd(b, compx, .swizzle_mask = 0x1f | (0x4 << 10), .fetch_inactive = 1);
               components[i] = nir_bcsel(b, cond, comp, compx);
            }
         }

         nir_def *cond = nir_inverse_ballot(b, 1, nir_imm_intN_t(b, 0xff0000ffff0000ff, params->wave_size));
         for (unsigned i = 0; i < num_comps; i++) {
            nir_def *comp = components[i];
            nir_def *compx = nir_masked_swizzle_amd(b, comp, .swizzle_mask = 0x1f | (0x18 << 10), .fetch_inactive = 1);
            components[i] = nir_bcsel(b, cond, comp, compx);
         }
      }
   }

   return nir_vec(b, components, num_comps);
}

bool
radv_nir_lower_cooperative_matrix(nir_shader *shader, enum amd_gfx_level gfx_level, unsigned wave_size)
{
   bool progress = false;

   if (!shader->info.cs.has_cooperative_matrix)
      return false;

   const lower_cmat_params params = {
      .gfx_level = gfx_level,
      .wave_size = wave_size,
   };

   struct nir_function *func = (struct nir_function *)exec_list_get_head_const(&shader->functions);
   struct hash_table *type_map = _mesa_pointer_hash_table_create(NULL);

   nir_foreach_variable_with_modes (var, shader, nir_var_shader_temp) {
      const struct glsl_type *new_type = radv_nir_translate_matrix_type(var->type, type_map, &params);
      if (new_type != var->type) {
         var->type = new_type;
         progress = true;
      }
   }

   nir_foreach_function_temp_variable (var, func->impl) {
      const struct glsl_type *new_type = radv_nir_translate_matrix_type(var->type, type_map, &params);
      if (new_type != var->type) {
         var->type = new_type;
         progress = true;
      }
   }

   nir_builder b = nir_builder_create(func->impl);

   /* Iterate in reverse order so that lowering can still use the matrix types from the derefs before we change it. */
   nir_foreach_block_reverse (block, func->impl) {
      nir_foreach_instr_reverse_safe (instr, block) {
         b.cursor = nir_before_instr(instr);

         switch (instr->type) {
         case nir_instr_type_intrinsic: {
            nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
            switch (intr->intrinsic) {
            case nir_intrinsic_cmat_length: {
               struct glsl_cmat_description desc = nir_intrinsic_cmat_desc(intr);
               unsigned len = radv_nir_cmat_length(desc, &params) / radv_nir_cmat_length_mul(desc, &params);
               nir_def_rewrite_uses(&intr->def, nir_imm_int(&b, len));
               nir_instr_remove(instr);
               progress = true;
               break;
            }
            case nir_intrinsic_cmat_extract: {
               nir_deref_instr *src_deref = nir_src_as_deref(intr->src[0]);
               struct glsl_cmat_description desc = *glsl_get_cmat_description(src_deref->type);
               nir_def *src0 = radv_nir_load_cmat(&b, &params, intr->src[0].ssa);

               nir_def *index = intr->src[1].ssa;
               index = nir_imul_imm(&b, index, radv_nir_cmat_length_mul(desc, &params));

               nir_def *elem = nir_vector_extract(&b, src0, index);

               nir_def_rewrite_uses(&intr->def, elem);
               nir_instr_remove(instr);
               progress = true;
               break;
            }
            case nir_intrinsic_cmat_insert: {
               nir_def *src1 = radv_nir_load_cmat(&b, &params, intr->src[2].ssa);
               nir_deref_instr *dst_deref = nir_src_as_deref(intr->src[0]);
               struct glsl_cmat_description desc = *glsl_get_cmat_description(dst_deref->type);
               nir_def *index = intr->src[3].ssa;
               index = nir_imul_imm(&b, index, radv_nir_cmat_length_mul(desc, &params));

               nir_def *elem = intr->src[1].ssa;
               nir_def *r = nir_vector_insert(&b, src1, elem, index);
               nir_store_deref(&b, dst_deref, r, nir_component_mask(r->num_components));
               nir_instr_remove(instr);
               progress = true;
               break;
            }
            case nir_intrinsic_cmat_construct: {
               nir_deref_instr *dst_deref = nir_src_as_deref(intr->src[0]);
               struct glsl_cmat_description desc = *glsl_get_cmat_description(dst_deref->type);
               nir_def *elem = intr->src[1].ssa;

               nir_def *r = nir_replicate(&b, elem, radv_nir_cmat_length(desc, &params));

               nir_store_deref(&b, dst_deref, r, nir_component_mask(r->num_components));
               nir_instr_remove(instr);
               progress = true;
               break;
            }
            case nir_intrinsic_cmat_load:
            case nir_intrinsic_cmat_store: {
               const bool is_load = intr->intrinsic == nir_intrinsic_cmat_load;

               nir_deref_instr *cmat_deref = nir_src_as_deref(intr->src[!is_load]);
               struct glsl_cmat_description desc = *glsl_get_cmat_description(cmat_deref->type);
               enum glsl_matrix_layout layout = nir_intrinsic_matrix_layout(intr);

               nir_deref_instr *deref = nir_src_as_deref(intr->src[is_load]);
               nir_def *stride = intr->src[2].ssa;

               nir_def *local_idx = nir_load_subgroup_invocation(&b);
               nir_def *inner_idx = nir_iand_imm(&b, local_idx, 15);

               /* A input is transposed */
               if (desc.use == GLSL_CMAT_USE_A)
                  layout = layout == GLSL_MATRIX_LAYOUT_COLUMN_MAJOR ? GLSL_MATRIX_LAYOUT_ROW_MAJOR
                                                                     : GLSL_MATRIX_LAYOUT_COLUMN_MAJOR;

               unsigned length = radv_nir_cmat_length(desc, &params);
               unsigned mul = radv_nir_cmat_length_mul(desc, &params);
               unsigned lanes_per_iter = desc.use == GLSL_CMAT_USE_ACCUMULATOR ? params.wave_size : 16;
               nir_def *vars[16];
               if (is_load) {
                  if (mul > 1) {
                     for (unsigned i = 0; i < length; ++i)
                        if (i % mul != 0)
                           vars[i] = nir_undef(&b, 1, radv_nir_cmat_bits(desc));
                  }
               } else {
                  if (gfx_level < GFX12 && desc.use != GLSL_CMAT_USE_ACCUMULATOR)
                     nir_push_if(&b, nir_ilt_imm(&b, local_idx, 16));

                  nir_def *src = radv_nir_load_cmat(&b, &params, &cmat_deref->def);
                  for (unsigned i = 0; i < length; ++i)
                     vars[i] = nir_channel(&b, src, i);
               }

               unsigned idx_bits = deref->def.bit_size;
               nir_def *base_row = radv_get_base_row(&b, desc, &params, local_idx);

               /* VUID-RuntimeSpirv-OpCooperativeMatrixLoadKHR-08986:
                * For OpCooperativeMatrixLoadKHR and OpCooperativeMatrixStoreKHR instructions,
                * the Pointer and Stride operands must be aligned to at least the lesser of 16 bytes
                * or the natural alignment of a row or column (depending on ColumnMajor) of the matrix
                * (where the natural alignment is the number of columns/rows multiplied by the component size).
                */
               unsigned align_mul = 0;
               if (layout == GLSL_MATRIX_LAYOUT_COLUMN_MAJOR)
                  align_mul = MIN2(16, radv_nir_cmat_bits(desc) * desc.rows / 8);

               if (gfx_level >= GFX12)
                  align_mul /= wave_size / 16;
               else if (desc.use == GLSL_CMAT_USE_ACCUMULATOR)
                  align_mul = 0;

               for (unsigned i = 0; i < length / mul; ++i) {
                  nir_def *col_offset = inner_idx;
                  nir_def *row_offset;
                  uint32_t row_iter;

                  if (gfx_level >= GFX12) {
                     row_iter = i;
                  } else {
                     row_iter = i * lanes_per_iter / 16;
                  }

                  row_offset = nir_iadd_imm(&b, base_row, row_iter);

                  if (layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR) {
                     nir_def *tmp = col_offset;
                     col_offset = row_offset;
                     row_offset = tmp;
                  }

                  col_offset = nir_imul(&b, col_offset, stride);

                  col_offset = nir_u2uN(&b, col_offset, idx_bits);
                  row_offset = nir_u2uN(&b, row_offset, idx_bits);

                  nir_deref_instr *iter_deref = nir_build_deref_ptr_as_array(&b, deref, col_offset);
                  iter_deref = nir_build_deref_cast(&b, &iter_deref->def, deref->modes,
                                                    glsl_scalar_type(desc.element_type), radv_nir_cmat_bits(desc) / 8);
                  iter_deref = nir_build_deref_ptr_as_array(&b, iter_deref, row_offset);

                  if (align_mul) {
                     unsigned align_offset = row_iter * radv_nir_cmat_bits(desc) / 8 % align_mul;
                     iter_deref =
                        nir_build_deref_cast_with_alignment(&b, &iter_deref->def, deref->modes, iter_deref->type,
                                                            iter_deref->cast.ptr_stride, align_mul, align_offset);
                  }

                  if (is_load) {
                     vars[i * mul] = nir_load_deref(&b, iter_deref);
                  } else {
                     nir_store_deref(&b, iter_deref, vars[i * mul], 1);
                  }
               }

               if (is_load) {
                  nir_def *mat = nir_vec(&b, vars, length);
                  nir_store_deref(&b, cmat_deref, mat, nir_component_mask(mat->num_components));
               } else if (gfx_level < GFX12 && desc.use != GLSL_CMAT_USE_ACCUMULATOR) {
                  nir_pop_if(&b, NULL);
               }
               nir_instr_remove(instr);
               progress = true;
               break;
            }
            case nir_intrinsic_cmat_muladd: {
               nir_def *A = radv_nir_load_cmat(&b, &params, intr->src[1].ssa);
               nir_def *B = radv_nir_load_cmat(&b, &params, intr->src[2].ssa);
               nir_def *C = radv_nir_load_cmat(&b, &params, intr->src[3].ssa);

               nir_deref_instr *a_deref = nir_src_as_deref(intr->src[1]);
               nir_deref_instr *b_deref = nir_src_as_deref(intr->src[2]);
               struct glsl_cmat_description a_desc = *glsl_get_cmat_description(a_deref->type);
               struct glsl_cmat_description b_desc = *glsl_get_cmat_description(b_deref->type);

               const nir_cmat_signed cmat_signed_mask = nir_intrinsic_cmat_signed_mask(intr);

               enum glsl_base_type a_element_type =
                  glsl_apply_signedness_to_base_type(a_desc.element_type, cmat_signed_mask & NIR_CMAT_A_SIGNED);
               enum glsl_base_type b_element_type =
                  glsl_apply_signedness_to_base_type(b_desc.element_type, cmat_signed_mask & NIR_CMAT_B_SIGNED);

               nir_def *ret = nir_cmat_muladd_amd(&b, A, B, C, .saturate = nir_intrinsic_saturate(intr),
                                                  .src_base_type = a_element_type, .src_base_type2 = b_element_type);

               nir_deref_instr *dst_deref = nir_src_as_deref(intr->src[0]);
               nir_store_deref(&b, dst_deref, ret, nir_component_mask(ret->num_components));
               nir_instr_remove(instr);
               progress = true;
               break;
            }
            case nir_intrinsic_cmat_transpose:
            case nir_intrinsic_cmat_convert: {
               nir_deref_instr *dst_deref = nir_src_as_deref(intr->src[0]);
               nir_deref_instr *src_deref = nir_src_as_deref(intr->src[1]);
               struct glsl_cmat_description dst_desc = *glsl_get_cmat_description(dst_deref->type);
               struct glsl_cmat_description src_desc = *glsl_get_cmat_description(src_deref->type);
               nir_def *src = radv_nir_load_cmat(&b, &params, intr->src[1].ssa);

               bool sat = false;
               const bool transpose = intr->intrinsic == nir_intrinsic_cmat_transpose;

               enum glsl_cmat_use dst_use = dst_desc.use;
               enum glsl_cmat_use src_use = src_desc.use;

               enum glsl_base_type dst_element_type = dst_desc.element_type;
               enum glsl_base_type src_element_type = src_desc.element_type;

               if (transpose) {
                  /* NV_cmat2 only support acc -> b transpose, but we can handle any transpose except acc -> acc. */
                  if (dst_use == GLSL_CMAT_USE_A) {
                     dst_use = GLSL_CMAT_USE_B;
                  } else if (dst_use == GLSL_CMAT_USE_B) {
                     dst_use = GLSL_CMAT_USE_A;
                  } else if (dst_use == GLSL_CMAT_USE_ACCUMULATOR) {
                     if (src_use == GLSL_CMAT_USE_A)
                        src_use = GLSL_CMAT_USE_B;
                     else if (src_use == GLSL_CMAT_USE_B)
                        src_use = GLSL_CMAT_USE_A;
                     else
                        unreachable("unsupported transpose");
                  }
               } else {
                  sat = nir_intrinsic_saturate(intr);
                  nir_cmat_signed cmat_signed_mask = nir_intrinsic_cmat_signed_mask(intr);

                  dst_element_type =
                     glsl_apply_signedness_to_base_type(dst_element_type, cmat_signed_mask & NIR_CMAT_RESULT_SIGNED);
                  src_element_type =
                     glsl_apply_signedness_to_base_type(src_element_type, cmat_signed_mask & NIR_CMAT_A_SIGNED);
               }

               unsigned dst_mul = radv_nir_cmat_length_mul(dst_desc, &params);
               unsigned src_mul = radv_nir_cmat_length_mul(src_desc, &params);

               if (src_mul > dst_mul) {
                  nir_def *components[NIR_MAX_VEC_COMPONENTS];
                  unsigned scale = src_mul / dst_mul;
                  for (unsigned i = 0; i * scale < src->num_components; ++i) {
                     components[i] = nir_channel(&b, src, i * scale);
                  }
                  src = nir_vec(&b, components, src->num_components / scale);
               }

               if (radv_nir_cmat_bits(src_desc) <= radv_nir_cmat_bits(dst_desc))
                  src = convert_use(&b, src, src_use, dst_use, &params);

               nir_def *ret = convert_base_type(&b, src, src_element_type, dst_element_type, sat);

               if (radv_nir_cmat_bits(src_desc) > radv_nir_cmat_bits(dst_desc))
                  ret = convert_use(&b, ret, src_use, dst_use, &params);

               if (dst_mul > src_mul) {
                  nir_def *components[NIR_MAX_VEC_COMPONENTS];
                  unsigned scale = dst_mul / src_mul;
                  for (unsigned i = 0; i < ret->num_components; ++i) {
                     components[i * scale] = nir_channel(&b, ret, i);
                     for (unsigned j = 1; j < scale; j++)
                        components[i * scale + j] = nir_undef(&b, 1, ret->bit_size);
                  }
                  ret = nir_vec(&b, components, ret->num_components * scale);
               }

               nir_store_deref(&b, dst_deref, ret, nir_component_mask(ret->num_components));
               nir_instr_remove(instr);
               progress = true;
               break;
            }
            case nir_intrinsic_cmat_unary_op: {
               nir_def *src = radv_nir_load_cmat(&b, &params, intr->src[1].ssa);
               nir_op op = nir_intrinsic_alu_op(intr);
               nir_def *ret = nir_build_alu1(&b, op, src);
               nir_store_deref(&b, nir_src_as_deref(intr->src[0]), ret, nir_component_mask(ret->num_components));
               nir_instr_remove(instr);
               progress = true;
               break;
            }
            case nir_intrinsic_cmat_scalar_op: {
               nir_def *src1 = radv_nir_load_cmat(&b, &params, intr->src[1].ssa);
               nir_op op = nir_intrinsic_alu_op(intr);
               nir_def *ret = nir_build_alu2(&b, op, src1, intr->src[2].ssa);
               nir_store_deref(&b, nir_src_as_deref(intr->src[0]), ret, nir_component_mask(ret->num_components));
               nir_instr_remove(instr);
               progress = true;
               break;
            }
            case nir_intrinsic_cmat_binary_op: {
               nir_def *src1 = radv_nir_load_cmat(&b, &params, intr->src[1].ssa);
               nir_def *src2 = radv_nir_load_cmat(&b, &params, intr->src[2].ssa);
               nir_op op = nir_intrinsic_alu_op(intr);
               nir_def *ret = nir_build_alu2(&b, op, src1, src2);
               nir_store_deref(&b, nir_src_as_deref(intr->src[0]), ret, nir_component_mask(ret->num_components));
               nir_instr_remove(instr);
               progress = true;
               break;
            }
            case nir_intrinsic_cmat_bitcast: {
               nir_def *src1 = radv_nir_load_cmat(&b, &params, intr->src[1].ssa);
               nir_store_deref(&b, nir_src_as_deref(intr->src[0]), src1, nir_component_mask(src1->num_components));
               nir_instr_remove(instr);
               progress = true;
               break;
            }
            case nir_intrinsic_cmat_copy: {
               nir_build_copy_deref(&b, intr->src[0].ssa, intr->src[1].ssa);
               nir_instr_remove(instr);
               progress = true;
               break;
            }
            default:
               continue;
            }
            break;
         }
         case nir_instr_type_deref: {
            nir_deref_instr *deref = nir_instr_as_deref(instr);
            const struct glsl_type *new_type = radv_nir_translate_matrix_type(deref->type, type_map, &params);
            if (new_type != deref->type) {
               deref->type = new_type;
               progress = true;
            }
            break;
         }
         default:
            continue;
         }
      }
   }

   _mesa_hash_table_destroy(type_map, NULL);

   return nir_progress(progress, func->impl, 0);
}

static bool
apply_component_mods(nir_scalar *comp, unsigned num_comps, unsigned stride, nir_op alu_op)
{
   for (unsigned i = 0; i < num_comps; i++) {
      nir_scalar s = comp[i * stride];
      if (!nir_scalar_is_alu(s) || nir_scalar_alu_op(s) != alu_op)
         return false;
   }

   for (unsigned i = 0; i < num_comps; i++)
      comp[i * stride] = nir_scalar_chase_alu_src(comp[i * stride], 0);

   return true;
}

/* Apply neg_lo/neg_hi modifiers to A/B and neg/abs to C. */
static bool
opt_cmat_modifiers(nir_builder *b, nir_intrinsic_instr *intrin, enum amd_gfx_level gfx_level, unsigned src_idx)
{
   unsigned length_mul = src_idx == 2 && intrin->src[2].ssa->bit_size == 16 && gfx_level < GFX12 ? 2 : 1;
   nir_scalar comp[NIR_MAX_VEC_COMPONENTS] = {0};
   nir_def *src = intrin->src[src_idx].ssa;

   for (unsigned i = 0; i < src->num_components; i += length_mul)
      comp[i] = nir_scalar_resolved(src, i);

   unsigned neg_lo = nir_intrinsic_neg_lo_amd(intrin);
   unsigned neg_hi = nir_intrinsic_neg_hi_amd(intrin);

   bool progress = false;
   if (src_idx == 2) {
      unsigned num_comp = src->num_components / length_mul;
      if (apply_component_mods(comp, num_comp, length_mul, nir_op_fneg)) {
         neg_lo ^= (~neg_hi) & BITFIELD_BIT(src_idx);
         progress = true;
      }
      if (apply_component_mods(comp, num_comp, length_mul, nir_op_fabs)) {
         neg_hi |= BITFIELD_BIT(src_idx);
         progress = true;
      }
   } else {
      unsigned num_comp = src->num_components / 2;
      if (apply_component_mods(comp, num_comp, 2, nir_op_fneg)) {
         neg_lo ^= BITFIELD_BIT(src_idx);
         progress = true;
      }
      if (apply_component_mods(comp + 1, num_comp, 2, nir_op_fneg)) {
         neg_hi ^= BITFIELD_BIT(src_idx);
         progress = true;
      }
   }

   if (!progress)
      return false;

   nir_intrinsic_set_neg_lo_amd(intrin, neg_lo);
   nir_intrinsic_set_neg_hi_amd(intrin, neg_hi);

   /* Avoid creating a new vec if we don't have to. */
   nir_def *new_src = comp[0].def;
   for (unsigned i = 0; i < src->num_components; i += length_mul) {
      if (comp[i].def != new_src || comp[i].comp != i) {
         new_src = NULL;
         break;
      }
   }

   if (!new_src) {
      b->cursor = nir_before_instr(&intrin->instr);
      if (length_mul > 1) {
         nir_scalar undef = nir_get_scalar(nir_undef(b, 1, src->bit_size), 0);
         for (unsigned i = 0; i < src->num_components; i += length_mul) {
            for (unsigned j = 1; j < length_mul; j++)
               comp[i + j] = undef;
         }
      }

      new_src = nir_vec_scalars(b, comp, src->num_components);
   }

   nir_src_rewrite(&intrin->src[src_idx], new_src);
   return true;
}

static bool
opt_cmat(nir_builder *b, nir_intrinsic_instr *intrin, void *data)
{
   enum amd_gfx_level gfx_level = *(enum amd_gfx_level *)data;

   if (intrin->intrinsic != nir_intrinsic_cmat_muladd_amd)
      return false;

   enum glsl_base_type a_type = nir_intrinsic_src_base_type(intrin);

   if (glsl_base_type_is_integer(a_type))
      return false;

   bool progress = false;

   if (a_type == GLSL_TYPE_FLOAT16) {
      for (unsigned i = 0; i < 2; i++)
         progress |= opt_cmat_modifiers(b, intrin, gfx_level, i);
   }

   if (a_type == GLSL_TYPE_FLOAT16 || intrin->def.bit_size == 32)
      progress |= opt_cmat_modifiers(b, intrin, gfx_level, 2);

   return progress;
}

bool
radv_nir_opt_cooperative_matrix(nir_shader *shader, enum amd_gfx_level gfx_level)
{
   return nir_shader_intrinsics_pass(shader, opt_cmat, nir_metadata_control_flow, &gfx_level);
}
