#include "agxv_private.h"
#include "vk_alloc.h"

#include "util/u_memory.h"

#include "nir.h"
#include "nir_lower_blend.h"


VKAPI_ATTR VkResult VKAPI_CALL
agxv_CreateCommandPool(VkDevice _device,
                       const VkCommandPoolCreateInfo *pCreateInfo,
                       const VkAllocationCallbacks *pAllocator,
                       VkCommandPool *pCmdPool)
{
    AGXV_FROM_HANDLE(agxv_device, device, _device);
    struct agxv_cmd_pool *pool;

    /* We only support one queue */
    assert(pCreateInfo->queueFamilyIndex == 0);

    pool = vk_object_zalloc(&device->vk, pAllocator, sizeof(*pool),
                            VK_OBJECT_TYPE_COMMAND_POOL);
    if (pool == NULL)
        return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);

    if (pAllocator)
        pool->alloc = *pAllocator;
    else
        pool->alloc = device->vk.alloc;

    list_inithead(&pool->cmd_buffers);
    // list_inithead(&pool->free_cmd_buffers);

    *pCmdPool = agxv_cmd_pool_to_handle(pool);

    return VK_SUCCESS;
}

static void
cmd_buffer_init(struct agxv_cmd_buffer *cmd_buffer,
                struct agxv_device *device,
                struct agxv_cmd_pool *pool,
                VkCommandBufferLevel level)
{
    const uint32_t base_size = sizeof(struct vk_object_base);
    uint8_t *cmd_buffer_driver_start = ((uint8_t *)cmd_buffer) + base_size;
    memset(cmd_buffer_driver_start, 0, sizeof(*cmd_buffer) - base_size);

    cmd_buffer->device = device;
    cmd_buffer->pool = pool;
    cmd_buffer->level = level;

    assert(pool);
    list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);

    list_inithead(&cmd_buffer->render_job);
}

static VkResult
cmd_buffer_create(struct agxv_device *device,
                  struct agxv_cmd_pool *pool,
                  VkCommandBufferLevel level,
                  VkCommandBuffer *pCommandBuffer)
{
    struct agxv_cmd_buffer *cmd_buffer;
    cmd_buffer = vk_object_zalloc(&device->vk,
                                  &pool->alloc,
                                  sizeof(*cmd_buffer),
                                  VK_OBJECT_TYPE_COMMAND_BUFFER);
    if (cmd_buffer == NULL)
        return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);

    cmd_buffer_init(cmd_buffer, device, pool, level);

    *pCommandBuffer = agxv_cmd_buffer_to_handle(cmd_buffer);

    return VK_SUCCESS;
}

VKAPI_ATTR VkResult VKAPI_CALL
agxv_AllocateCommandBuffers(VkDevice _device,
                            const VkCommandBufferAllocateInfo *pAllocateInfo,
                            VkCommandBuffer *pCommandBuffers)
{
    AGXV_FROM_HANDLE(agxv_device, device, _device);
    AGXV_FROM_HANDLE(agxv_cmd_pool, pool, pAllocateInfo->commandPool);

    VkResult result = VK_SUCCESS;
    uint32_t i;

    for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
        result = cmd_buffer_create(device, pool, pAllocateInfo->level,
                                   &pCommandBuffers[i]);
        if (result != VK_SUCCESS)
            break;
    }

    if (result != VK_SUCCESS) {
        agxv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
                                i, pCommandBuffers);
        for (i = 0; i < pAllocateInfo->commandBufferCount; i++)
            pCommandBuffers[i] = VK_NULL_HANDLE;
    }

    return result;
}

static void
cmd_buffer_destroy(struct agxv_cmd_buffer *cmd_buffer)
{
    list_del(&cmd_buffer->pool_link);
    vk_object_free(&cmd_buffer->device->vk, &cmd_buffer->pool->alloc, cmd_buffer);
}

VKAPI_ATTR void VKAPI_CALL
agxv_FreeCommandBuffers(VkDevice device,
                        VkCommandPool commandPool,
                        uint32_t commandBufferCount,
                        const VkCommandBuffer *pCommandBuffers)
{
    for (uint32_t i = 0; i < commandBufferCount; i++) {
        AGXV_FROM_HANDLE(agxv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);

        if (!cmd_buffer)
            continue;

        cmd_buffer_destroy(cmd_buffer);
    }
}

struct agxv_draw_info
{
    uint32_t indexCount;
    uint32_t firstIndex;
    int32_t vertexOffset;

    uint32_t vertex_count;
    uint32_t first_vertex;

    uint32_t instance_count;
    uint32_t first_instance;
};

struct agx_blend {
   bool logicop_enable, blend_enable;

   union {
      nir_lower_blend_rt rt[8];
      unsigned logicop_func;
   };
};

struct asahi_shader_key {
   struct agx_shader_key base;
   struct agx_blend blend;
   unsigned nr_cbufs;
   enum pipe_format rt_formats[8];
};

struct agx_compiled_shader {
   /* Mapped executable memory */
   struct agx_bo *bo;

   /* Varying descriptor (TODO: is this the right place?) */
   uint64_t varyings;

   /* Metadata returned from the compiler */
   struct agx_shader_info info;
};

static void agxv_update_shader(struct agxv_cmd_buffer *cmd_buf,
                               struct asahi_shader_key *key,
                               gl_shader_stage stage)
{

    /* TODO: need to hash table */

    struct agx_compiled_shader *compiled = CALLOC_STRUCT(agx_compiled_shader);
    struct util_dynarray binary;
    util_dynarray_init(&binary, NULL);

    nir_shader *nir = nir_shader_clone(NULL, cmd_buf->state.gfx.pipeline->pipeline_nir[stage]);

    if (key->blend.blend_enable) {
        nir_lower_blend_options opts = {
            .format = {key->rt_formats[0]},
            .scalar_blend_const = true};

        memcpy(opts.rt, key->blend.rt, sizeof(opts.rt));
        NIR_PASS_V(nir, nir_lower_blend, opts);
    } else if (key->blend.logicop_enable) {
        nir_lower_blend_options opts = {
            .format = {key->rt_formats[0]},
            .logicop_enable = true,
            .logicop_func = key->blend.logicop_func,
        };

        NIR_PASS_V(nir, nir_lower_blend, opts);
    }

    if (stage == PIPE_SHADER_FRAGMENT)
        NIR_PASS_V(nir, nir_lower_fragcolor, key->nr_cbufs);

    agx_compile_shader_nir(nir, &key->base, &binary, &compiled->info);

    struct agx_varyings *varyings = &compiled->info.varyings;
    unsigned packed_varying_sz = (AGX_VARYING_HEADER_LENGTH + varyings->nr_descs * AGX_VARYING_LENGTH);
    uint8_t *packed_varyings = alloca(packed_varying_sz);

    agx_pack(packed_varyings, VARYING_HEADER, cfg) {
        cfg.triangle_slots = cfg.point_slots = varyings->nr_slots;
    }

    memcpy(packed_varyings + AGX_VARYING_HEADER_LENGTH, varyings->packed,
           varyings->nr_descs * AGX_VARYING_LENGTH);

    if (binary.size) {
        struct agx_device *dev = &cmd_buf->device->pdevice->dev;
        compiled->bo = agx_bo_create(dev,
                                     ALIGN_POT(binary.size, 256) + (3 * packed_varying_sz),
                                     AGX_MEMORY_TYPE_SHADER);
        memcpy(compiled->bo->ptr.cpu, binary.data, binary.size);

        /* TODO: Why is the varying descriptor duplicated 3x? */
        unsigned offs = ALIGN_POT(binary.size, 256);
        for (unsigned copy = 0; copy < 3; ++copy)
        {
            memcpy(((uint8_t *)compiled->bo->ptr.cpu) + offs, packed_varyings, packed_varying_sz);
            offs += packed_varying_sz;
        }

        compiled->varyings = compiled->bo->ptr.gpu + ALIGN_POT(binary.size, 256);
    }

    ralloc_free(nir);
    util_dynarray_fini(&binary);
}

static void cmd_buffer_draw(struct agxv_cmd_buffer *cmd_buf, struct agxv_draw_info *info)
{
    struct agxv_batch *batch;

    if (info->indexCount && info->firstIndex)
        unreachable("todo: index bias");
    if (info->instance_count != 1 )
        unreachable("todo: index bias");

    batch = vk_zalloc(&cmd_buf->device->vk.alloc, sizeof(*batch), 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);

    /* 1. update vs/fs */

}

VKAPI_ATTR void VKAPI_CALL
agxv_CmdDraw(VkCommandBuffer commandBuffer,
             uint32_t vertexCount,
             uint32_t instanceCount,
             uint32_t firstVertex,
             uint32_t firstInstance)
{
    if (vertexCount == 0 || instanceCount == 0)
        return;

    AGXV_FROM_HANDLE(agxv_cmd_buffer, cmd_buffer, commandBuffer);
    struct agxv_draw_info info = {};
    info.vertex_count = vertexCount;
    info.instance_count = instanceCount;
    info.first_instance = firstInstance;
    info.first_vertex = firstVertex;

    info.indexCount = 0;
    info.vertexOffset = 0;
    info.firstIndex = 0;

    cmd_buffer_draw(cmd_buffer, &info);
}

VKAPI_ATTR void VKAPI_CALL
agxv_CmdDrawIndexed(VkCommandBuffer commandBuffer,
                    uint32_t indexCount,
                    uint32_t instanceCount,
                    uint32_t firstIndex,
                    int32_t vertexOffset,
                    uint32_t firstInstance)
{
    AGXV_FROM_HANDLE(agxv_cmd_buffer, cmd_buffer, commandBuffer);

    if (indexCount == 0 || instanceCount == 0)
        return;

    struct agxv_draw_info info = {};

    info.indexCount = indexCount;
    info.firstIndex = firstIndex;

    info.vertex_count = 0;
    info.first_vertex = 0;

    info.vertexOffset = vertexOffset;
    info.instance_count = instanceCount;
    info.first_instance = firstInstance;

    cmd_buffer_draw(cmd_buffer, &info);
}
