#include "agxv_private.h"

#include "vk_util.h"
#include "glsl_types.h"
#include "spirv/nir_spirv.h"
#include "vk_shader_module.h"

#include "agx_compile.h"

const struct agxv_dynamic_state default_dynamic_state = {
    .viewport = {
        .count = 0,
    },
    .scissor = {
        .count = 0,
    },
    .stencil_compare_mask = {
        .front = ~0u,
        .back = ~0u,
    },
    .stencil_write_mask = {
        .front = ~0u,
        .back = ~0u,
    },
    .stencil_reference = {
        .front = 0u,
        .back = 0u,
    },
    .blend_constants = {0.0f, 0.0f, 0.0f, 0.0f},
    .depth_bias = {
        .constant_factor = 0.0f,
        .depth_bias_clamp = 0.0f,
        .slope_factor = 0.0f,
    },
    .line_width = 1.0f,
};

static const struct spirv_to_nir_options default_spirv_options = {
    .caps = {
        .device_group = true,
        .variable_pointers = true,
        .subgroup_basic = true,
    },
    .ubo_addr_format = nir_address_format_32bit_index_offset,
    .ssbo_addr_format = nir_address_format_32bit_index_offset,
    .phys_ssbo_addr_format = nir_address_format_64bit_global,
    .push_const_addr_format = nir_address_format_logical,
    .shared_addr_format = nir_address_format_32bit_offset,
};

static void
agxv_destroy_pipeline(struct agxv_pipeline *pipeline,
                      struct agxv_device *device,
                      const VkAllocationCallbacks *pAllocator)
{
   if (!pipeline)
      return;

   vk_object_free(&device->vk, pAllocator, pipeline);
}

VKAPI_ATTR void VKAPI_CALL
agxv_DestroyPipeline(VkDevice _device,
                     VkPipeline _pipeline,
                     const VkAllocationCallbacks *pAllocator)
{
   AGXV_FROM_HANDLE(agxv_device, device, _device);
   AGXV_FROM_HANDLE(agxv_pipeline, pipeline, _pipeline);

   if (!pipeline)
      return;

   agxv_destroy_pipeline(pipeline, device, pAllocator);
}

static gl_shader_stage
agxv_shader_stage(VkShaderStageFlagBits stage)
{
   switch (stage) {
   case VK_SHADER_STAGE_VERTEX_BIT:
      return MESA_SHADER_VERTEX;
   case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
      return MESA_SHADER_TESS_CTRL;
   case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
      return MESA_SHADER_TESS_EVAL;
   case VK_SHADER_STAGE_GEOMETRY_BIT:
      return MESA_SHADER_GEOMETRY;
   case VK_SHADER_STAGE_FRAGMENT_BIT:
      return MESA_SHADER_FRAGMENT;
   case VK_SHADER_STAGE_COMPUTE_BIT:
      return MESA_SHADER_COMPUTE;
   default:
      unreachable("invalid VkShaderStageFlagBits");
      return MESA_SHADER_NONE;
   }
}

static void
agxv_shader_compile_to_ir(struct agxv_pipeline *pipeline,
                          struct vk_shader_module *module,
                          const char *entrypoint_name,
                          gl_shader_stage stage,
                          const VkSpecializationInfo *spec_info)
{
    nir_shader *nir;
    bool progress;

    if (!module->nir) {
        uint32_t *spirv = (uint32_t *)module->data;

        assert(module->size % 4 == 0);

        uint32_t num_spec_entries = 0;
        struct nir_spirv_specialization *spec_entries =
            vk_spec_info_to_nir_spirv(spec_info, &num_spec_entries);

        nir = spirv_to_nir(spirv, module->size / 4,
                           spec_entries, num_spec_entries,
                           stage, entrypoint_name, &default_spirv_options, &agx_nir_options);
        if (!nir) {
            free(spec_entries);
            return;
        }
        nir_validate_shader(nir, NULL);

        free(spec_entries);
    } else {
        nir = nir_shader_clone(NULL, module->nir);
        nir_validate_shader(nir, "nir module");
    }

    NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
    NIR_PASS_V(nir, nir_lower_returns);
    NIR_PASS_V(nir, nir_inline_functions);
    NIR_PASS_V(nir, nir_copy_prop);
    NIR_PASS_V(nir, nir_opt_deref);

    /* Pick off the single entrypoint that we want */
    foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
        if (func->is_entrypoint)
            func->name = ralloc_strdup(func, "main");
        else
            exec_node_remove(&func->node);
    }
    assert(exec_list_length(&nir->functions) == 1);

    NIR_PASS_V(nir, nir_lower_variable_initializers, ~0);
    NIR_PASS_V(nir, nir_split_var_copies);
    NIR_PASS_V(nir, nir_split_per_member_structs);

    // if (stage == MESA_SHADER_FRAGMENT)
    //     lvp_lower_input_attachments(nir, false);
    NIR_PASS_V(nir, nir_lower_system_values);
    NIR_PASS_V(nir, nir_lower_compute_system_values, NULL);

    NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
    NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_uniform, NULL);

    // lvp_lower_pipeline_layout(pipeline->device, pipeline->layout, nir);

    NIR_PASS_V(nir, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(nir), true, true);
    NIR_PASS_V(nir, nir_split_var_copies);
    NIR_PASS_V(nir, nir_lower_global_vars_to_local);

    NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_push_const,
               nir_address_format_32bit_offset);

    NIR_PASS_V(nir, nir_lower_explicit_io,
               nir_var_mem_ubo | nir_var_mem_ssbo,
               nir_address_format_32bit_index_offset);

    NIR_PASS_V(nir, nir_lower_explicit_io,
               nir_var_mem_global,
               nir_address_format_64bit_global);

    // if (nir->info.stage == MESA_SHADER_COMPUTE) {
    //     NIR_PASS_V(nir, nir_lower_vars_to_explicit_types, nir_var_mem_shared, shared_var_info);
    //     NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_shared, nir_address_format_32bit_offset);
    // }

    NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_shader_temp, NULL);

    if (nir->info.stage == MESA_SHADER_VERTEX ||
        nir->info.stage == MESA_SHADER_GEOMETRY) {
        NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
    } else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
        NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, true);
    }

    do {
        progress = false;

        NIR_PASS(progress, nir, nir_lower_flrp, 32 | 64, true);
        NIR_PASS(progress, nir, nir_split_array_vars, nir_var_function_temp);
        NIR_PASS(progress, nir, nir_shrink_vec_array_vars, nir_var_function_temp);
        NIR_PASS(progress, nir, nir_opt_deref);
        NIR_PASS(progress, nir, nir_lower_vars_to_ssa);

        NIR_PASS(progress, nir, nir_copy_prop);
        NIR_PASS(progress, nir, nir_opt_dce);
        NIR_PASS(progress, nir, nir_opt_peephole_select, 8, true, true);

        NIR_PASS(progress, nir, nir_opt_algebraic);
        NIR_PASS(progress, nir, nir_opt_constant_folding);

        NIR_PASS(progress, nir, nir_opt_remove_phis);
        bool trivial_continues = false;
        NIR_PASS(trivial_continues, nir, nir_opt_trivial_continues);
        progress |= trivial_continues;
        if (trivial_continues)
        {
            /* If nir_opt_trivial_continues makes progress, then we need to clean
          * things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
          * to make progress.
          */
            NIR_PASS(progress, nir, nir_copy_prop);
            NIR_PASS(progress, nir, nir_opt_dce);
            NIR_PASS(progress, nir, nir_opt_remove_phis);
        }
        NIR_PASS(progress, nir, nir_opt_if, true);
        NIR_PASS(progress, nir, nir_opt_dead_cf);
        NIR_PASS(progress, nir, nir_opt_conditional_discard);
        NIR_PASS(progress, nir, nir_opt_remove_phis);
        NIR_PASS(progress, nir, nir_opt_cse);
        NIR_PASS(progress, nir, nir_opt_undef);

        NIR_PASS(progress, nir, nir_opt_deref);
        NIR_PASS(progress, nir, nir_lower_alu_to_scalar, NULL, NULL);
    } while (progress);

    NIR_PASS_V(nir, nir_remove_dead_variables,
               nir_var_shader_in | nir_var_shader_out | nir_var_system_value, NULL);

    NIR_PASS_V(nir, nir_lower_var_copies);
    NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp, NULL);
    NIR_PASS_V(nir, nir_opt_dce);
    nir_sweep(nir);

    nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));

    if (nir->info.stage != MESA_SHADER_VERTEX)
        nir_assign_io_var_locations(nir, nir_var_shader_in, &nir->num_inputs, nir->info.stage);
    else {
        nir->num_inputs = util_last_bit64(nir->info.inputs_read);
        nir_foreach_shader_in_variable(var, nir) {
            var->data.driver_location = var->data.location - VERT_ATTRIB_GENERIC0;
        }
    }
    nir_assign_io_var_locations(nir, nir_var_shader_out, &nir->num_outputs,
                                nir->info.stage);

    /* Vulkan uses the separate-shader linking model */
    nir->info.separate_shader = true;

    pipeline->pipeline_nir[stage] = nir;
}

static unsigned
agxv_dynamic_state_mask(VkDynamicState state)
{
    switch (state) {
    case VK_DYNAMIC_STATE_VIEWPORT:
        return AGXV_DYNAMIC_VIEWPORT;
    case VK_DYNAMIC_STATE_SCISSOR:
        return AGXV_DYNAMIC_SCISSOR;
    case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
        return AGXV_DYNAMIC_STENCIL_COMPARE_MASK;
    case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
        return AGXV_DYNAMIC_STENCIL_WRITE_MASK;
    case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
        return AGXV_DYNAMIC_STENCIL_REFERENCE;
    case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
        return AGXV_DYNAMIC_BLEND_CONSTANTS;
    case VK_DYNAMIC_STATE_DEPTH_BIAS:
        return AGXV_DYNAMIC_DEPTH_BIAS;
    case VK_DYNAMIC_STATE_LINE_WIDTH:
        return AGXV_DYNAMIC_LINE_WIDTH;

    case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
        return 0;

    default:
        unreachable("Unhandled dynamic state");
    }
}

/* FIXME: C&P from radv. tu has similar code. Perhaps common place? */
static void agxv_viewport_compute_xform(const VkViewport *viewport,
                                 float scale[3],
                                 float translate[3])
{
    float x = viewport->x;
    float y = viewport->y;
    float half_width = 0.5f * viewport->width;
    float half_height = 0.5f * viewport->height;
    double n = viewport->minDepth;
    double f = viewport->maxDepth;

    scale[0] = half_width;
    translate[0] = half_width + x;
    scale[1] = half_height;
    translate[1] = half_height + y;

    scale[2] = (f - n);
    translate[2] = n;

    /* It seems that if the scale is small enough the hardware won't clip
    * correctly so we work around this my choosing the smallest scale that
    * seems to work.
    *
    * This case is exercised by CTS:
    * dEQP-VK.draw.inverted_depth_ranges.nodepthclamp_deltazero
    */
    const float min_abs_scale = 0.000009f;
    if (fabs(scale[2]) < min_abs_scale)
        scale[2] = min_abs_scale * (scale[2] < 0 ? -1.0f : 1.0f);
}

static void
pipeline_init_dynamic_state(
    struct agxv_pipeline *pipeline,
    const VkPipelineDynamicStateCreateInfo *pDynamicState,
    const VkPipelineViewportStateCreateInfo *pViewportState,
    const VkPipelineDepthStencilStateCreateInfo *pDepthStencilState,
    const VkPipelineColorBlendStateCreateInfo *pColorBlendState,
    const VkPipelineRasterizationStateCreateInfo *pRasterizationState)
{
    pipeline->dynamic_state = default_dynamic_state;
    struct agxv_dynamic_state *dynamic = &pipeline->dynamic_state;

    /* Create a mask of enabled dynamic states */
    uint32_t dynamic_states = 0;
    if (pDynamicState)
    {
        uint32_t count = pDynamicState->dynamicStateCount;
        for (uint32_t s = 0; s < count; s++)
        {
            dynamic_states |=
                agxv_dynamic_state_mask(pDynamicState->pDynamicStates[s]);
        }
    }

    /* For any pipeline states that are not dynamic, set the dynamic state
    * from the static pipeline state.
    */
    if (pViewportState)
    {
        if (!(dynamic_states & AGXV_DYNAMIC_VIEWPORT))
        {
            dynamic->viewport.count = pViewportState->viewportCount;
            typed_memcpy(dynamic->viewport.viewports, pViewportState->pViewports,
                         pViewportState->viewportCount);

            for (uint32_t i = 0; i < dynamic->viewport.count; i++)
            {
                agxv_viewport_compute_xform(&dynamic->viewport.viewports[i],
                                            dynamic->viewport.scale[i],
                                            dynamic->viewport.translate[i]);
            }
        }

        if (!(dynamic_states & AGXV_DYNAMIC_SCISSOR))
        {
            dynamic->scissor.count = pViewportState->scissorCount;
            typed_memcpy(dynamic->scissor.scissors, pViewportState->pScissors,
                         pViewportState->scissorCount);
        }
    }

    if (pDepthStencilState)
    {
        if (!(dynamic_states & AGXV_DYNAMIC_STENCIL_COMPARE_MASK))
        {
            dynamic->stencil_compare_mask.front =
                pDepthStencilState->front.compareMask;
            dynamic->stencil_compare_mask.back =
                pDepthStencilState->back.compareMask;
        }

        if (!(dynamic_states & AGXV_DYNAMIC_STENCIL_WRITE_MASK))
        {
            dynamic->stencil_write_mask.front = pDepthStencilState->front.writeMask;
            dynamic->stencil_write_mask.back = pDepthStencilState->back.writeMask;
        }

        if (!(dynamic_states & AGXV_DYNAMIC_STENCIL_REFERENCE))
        {
            dynamic->stencil_reference.front = pDepthStencilState->front.reference;
            dynamic->stencil_reference.back = pDepthStencilState->back.reference;
        }
    }

    if (pColorBlendState && !(dynamic_states & AGXV_DYNAMIC_BLEND_CONSTANTS))
    {
        memcpy(dynamic->blend_constants, pColorBlendState->blendConstants,
               sizeof(dynamic->blend_constants));
    }

    if (pRasterizationState)
    {
        if (pRasterizationState->depthBiasEnable &&
            !(dynamic_states & AGXV_DYNAMIC_DEPTH_BIAS))
        {
            dynamic->depth_bias.constant_factor =
                pRasterizationState->depthBiasConstantFactor;
            dynamic->depth_bias.depth_bias_clamp =
                pRasterizationState->depthBiasClamp;
            dynamic->depth_bias.slope_factor =
                pRasterizationState->depthBiasSlopeFactor;
        }
        if (!(dynamic_states & AGXV_DYNAMIC_LINE_WIDTH))
            dynamic->line_width = pRasterizationState->lineWidth;
    }

    pipeline->dynamic_state.mask = dynamic_states;
}

static VkResult
agxv_graphics_pipeline_init(struct agxv_pipeline *pipeline,
                            struct agxv_device *device,
                            struct agxv_pipeline_cache *cache,
                            const VkGraphicsPipelineCreateInfo *pCreateInfo,
                            const VkAllocationCallbacks *alloc)
{
    pipeline->device = device;

    /* If rasterization is not enabled, various CreateInfo structs must be
        * ignored.
        */
    const bool raster_enabled =
        !pCreateInfo->pRasterizationState->rasterizerDiscardEnable;

    const VkPipelineViewportStateCreateInfo *vp_info =
        raster_enabled ? pCreateInfo->pViewportState : NULL;

    const VkPipelineDepthStencilStateCreateInfo *ds_info =
        raster_enabled ? pCreateInfo->pDepthStencilState : NULL;

    const VkPipelineRasterizationStateCreateInfo *rs_info =
        raster_enabled ? pCreateInfo->pRasterizationState : NULL;

    const VkPipelineColorBlendStateCreateInfo *cb_info =
        raster_enabled ? pCreateInfo->pColorBlendState : NULL;

    const VkPipelineMultisampleStateCreateInfo *ms_info =
        raster_enabled ? pCreateInfo->pMultisampleState : NULL;

    pipeline_init_dynamic_state(pipeline,
                                pCreateInfo->pDynamicState,
                                vp_info, ds_info, cb_info, rs_info);

    /* create shader stage */
    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
        VK_FROM_HANDLE(vk_shader_module, module, pCreateInfo->pStages[i].module);
        gl_shader_stage stage = agxv_shader_stage(pCreateInfo->pStages[i].stage);
        agxv_shader_compile_to_ir(pipeline, module,
                                 pCreateInfo->pStages[i].pName,
                                 stage,
                                 pCreateInfo->pStages[i].pSpecializationInfo);
        if (!pipeline->pipeline_nir[stage])
            return VK_ERROR_FEATURE_NOT_PRESENT;
    }

    return VK_SUCCESS;
}

static VkResult
graphics_pipeline_create(VkDevice _device,
                         VkPipelineCache _cache,
                         const VkGraphicsPipelineCreateInfo *pCreateInfo,
                         const VkAllocationCallbacks *pAllocator,
                         VkPipeline *pPipeline)
{
    AGXV_FROM_HANDLE(agxv_device, device, _device);
    AGXV_FROM_HANDLE(agxv_pipeline_cache, cache, _cache);

    struct agxv_pipeline *pipeline;
    VkResult result;

    /* Use the default pipeline cache if none is specified */
    // if (cache == NULL && device->instance->default_pipeline_cache_enabled)
    //     cache = &device->default_pipeline_cache;

    pipeline = vk_object_zalloc(&device->vk, pAllocator, sizeof(*pipeline),
                                VK_OBJECT_TYPE_PIPELINE);

    if (pipeline == NULL)
        return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);

    result = agxv_graphics_pipeline_init(pipeline, device, cache, pCreateInfo, pAllocator);
    if (result != VK_SUCCESS) {
        agxv_destroy_pipeline(pipeline, device, pAllocator);
        return result;
    }

    *pPipeline = agxv_pipeline_to_handle(pipeline);

    return VK_SUCCESS;
}

VKAPI_ATTR VkResult VKAPI_CALL
agxv_CreateGraphicsPipelines(VkDevice _device,
                             VkPipelineCache pipelineCache,
                             uint32_t count,
                             const VkGraphicsPipelineCreateInfo *pCreateInfos,
                             const VkAllocationCallbacks *pAllocator,
                             VkPipeline *pPipelines)
{
    VkResult result = VK_SUCCESS;

    for (uint32_t i = 0; i < count; i++) {
        VkResult local_result;

        local_result = graphics_pipeline_create(_device,
                                                pipelineCache,
                                                &pCreateInfos[i],
                                                pAllocator,
                                                &pPipelines[i]);
        if (local_result != VK_SUCCESS) {
            result = local_result;
            pPipelines[i] = VK_NULL_HANDLE;
        }
    }

    return result;
}