/*
 * Copyright (C) 2019-2020 Yaong <yaongtime@gmail.com>
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */

// #include "stdio.h"

// #include <sys/types.h>
// #include <sys/stat.h>
// #include <fcntl.h>
// #include <unistd.h>
// #include <stdlib.h>
// #include <assert.h>
// #include <sys/types.h>
// #include <sys/stat.h>

// #include "nir.h"
// #include "spirv/nir_spirv.h"
// #include "vulkan/vulkan_core.h"

// struct spirv_info
// {
//     uint32_t *data;
//     size_t size;
// };

// static const struct spirv_to_nir_options default_spirv_options = {
//     .environment = NIR_SPIRV_VULKAN,
//     .lower_ubo_ssbo_access_to_offsets = true,
//     .caps = {false},
//     .ubo_addr_format = nir_address_format_32bit_index_offset,
//     .ssbo_addr_format = nir_address_format_32bit_index_offset,
//     .phys_ssbo_addr_format = nir_address_format_64bit_global,
//     .push_const_addr_format = nir_address_format_logical,
//     .shared_addr_format = nir_address_format_32bit_offset,
//     .frag_coord_is_sysval = false,
// };

// static const nir_shader_compiler_options vc4_nir_options = {
//     .lower_all_io_to_temps = true,
//     .lower_extract_byte = true,
//     .lower_extract_word = true,
//     .lower_fdiv = true,
//     .lower_ffma = true,
//     .lower_flrp32 = true,
//     .lower_fmod = true,
//     .lower_fpow = true,
//     .lower_fsat = true,
//     .lower_fsqrt = true,
//     .lower_ldexp = true,
//     .lower_negate = true,
//     .lower_rotate = true,
//     .lower_to_scalar = true,
//     .max_unroll_iterations = 32,
// };

// static struct nir_spirv_specialization *
// vk_spec_info_to_nir_spirv(const VkSpecializationInfo *spec_info,
//                           uint32_t *out_num_spec_entries)
// {
//     if (spec_info == NULL || spec_info->mapEntryCount == 0)
//         return NULL;

//     uint32_t num_spec_entries = spec_info->mapEntryCount;
//     struct nir_spirv_specialization *spec_entries = calloc(num_spec_entries, sizeof(*spec_entries));

//     for (uint32_t i = 0; i < num_spec_entries; i++)
//     {
//         VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
//         const void *data = spec_info->pData + entry.offset;
//         assert(data + entry.size <= spec_info->pData + spec_info->dataSize);

//         spec_entries[i].id = spec_info->pMapEntries[i].constantID;
//         switch (entry.size)
//         {
//         case 8:
//             spec_entries[i].value.u64 = *(const uint64_t *)data;
//             break;
//         case 4:
//             spec_entries[i].value.u32 = *(const uint32_t *)data;
//             break;
//         case 2:
//             spec_entries[i].value.u16 = *(const uint16_t *)data;
//             break;
//         case 1:
//             spec_entries[i].value.u8 = *(const uint8_t *)data;
//             break;
//         default:
//             assert(!"Invalid spec constant size");
//             break;
//         }
//     }

//     *out_num_spec_entries = num_spec_entries;
//     return spec_entries;
// }

// static uint32_t *get_spirv_data(char *name, struct spirv_info *info)
// {
//     int fp;
//     uint32_t *data;

//     size_t filesize = -1;
//     struct stat statbuff;

//     if (stat(name, &statbuff) < 0)
//     {
//         printf("%s %d\n", __func__, __LINE__);
//         assert(0);
//     }

//     filesize = statbuff.st_size;

//     printf("spir-v file name: %s, length = %ld\n", name, filesize);

//     fp = open(name, O_RDONLY);
//     if (fp < 0)
//     {
//         printf("open spir-v file fail\n");
//         assert(0);
//     }

//     data = (uint32_t *)malloc(filesize);

//     if (read(fp, data, filesize) < 0)
//     {
//         printf("%s %d\n", __func__, __LINE__);
//         assert(0);
//     }

//     close(fp);

//     info->data = data;
//     info->size = filesize;

//     // char *byte = (char *)data;

//     // for(int i = 0; i < filesize; i++) {
//     //     printf("%02x ", byte[i]);
//     // }

//     // printf("\n");

//     return data;
// }

// int main(int argc, char *argv[])
// {
//     nir_shader *nir = NULL;
//     gl_shader_stage stage = MESA_SHADER_VERTEX;
//     struct spirv_info info;
//     uint32_t num_spec_entries = 0;

//     // get_spirv_data(argv[1], &info);
//     get_spirv_data("/home/workspace/linux/Vulkan/data/shaders/glsl/triangle/triangle.vert.spv", &info);
//     struct nir_spirv_specialization *spec_entries = vk_spec_info_to_nir_spirv(NULL, &num_spec_entries);

//     nir = spirv_to_nir(info.data, info.size / 4,
//                        spec_entries, num_spec_entries,
//                        stage, "main",
//                        &default_spirv_options, &vc4_nir_options);

//     nir_validate_shader(nir, "after spirv_to_nir");

//     nir_print_shader(nir, stderr);

//     return 0;
// }

/*
 * Copyright © 2015 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Jason Ekstrand (jason@jlekstrand.net)
 *
 */

/*
 * A simple executable that opens a SPIR-V shader, converts it to NIR, and
 * dumps out the result.  This should be useful for testing the
 * spirv_to_nir code.
 */

#include "spirv/nir_spirv.h"

#include <sys/mman.h>
#include <sys/types.h>
#include <fcntl.h>
#include <unistd.h>
#include <stdio.h>
#include <errno.h>
#include <string.h>
#include <getopt.h>

#include "nir_builder.h"
#include "vc4_private.h"

#define WORD_SIZE 4


static nir_shader *_nir;
static gl_shader_stage _stage;

static const struct spirv_to_nir_options default_spirv_options = {
    .environment = NIR_SPIRV_VULKAN,
    .caps = {false},
    .ubo_addr_format = nir_address_format_32bit_index_offset,
    .ssbo_addr_format = nir_address_format_32bit_index_offset,
    .phys_ssbo_addr_format = nir_address_format_64bit_global,
    .push_const_addr_format = nir_address_format_logical,
    .shared_addr_format = nir_address_format_32bit_offset,
    .frag_coord_is_sysval = false,
};

static const nir_shader_compiler_options vc4_nir_options = {
    .lower_all_io_to_temps = true,
    .lower_extract_byte = true,
    .lower_extract_word = true,
    .lower_fdiv = true,
    .lower_ffma16 = true,
    .lower_ffma32 = true,
    .lower_ffma64 = true,
    .lower_flrp32 = true,
    .lower_fmod = true,
    .lower_fpow = true,
    .lower_fsat = true,
    .lower_fsqrt = true,
    .lower_ldexp = true,
    .lower_negate = true,
    .lower_rotate = true,
    .lower_to_scalar = true,
    .max_unroll_iterations = 32,
};

static gl_shader_stage
stage_to_enum(char *stage)
{
    if (!strcmp(stage, "vertex"))
        return MESA_SHADER_VERTEX;
    else if (!strcmp(stage, "tess-ctrl"))
        return MESA_SHADER_TESS_CTRL;
    else if (!strcmp(stage, "tess-eval"))
        return MESA_SHADER_TESS_EVAL;
    else if (!strcmp(stage, "geometry"))
        return MESA_SHADER_GEOMETRY;
    else if (!strcmp(stage, "fragment"))
        return MESA_SHADER_FRAGMENT;
    else if (!strcmp(stage, "compute"))
        return MESA_SHADER_COMPUTE;
    else if (!strcmp(stage, "kernel"))
        return MESA_SHADER_KERNEL;
    else
        return MESA_SHADER_NONE;
}

static int
type_size(const struct glsl_type *type, bool bindless)
{
    return glsl_count_attribute_slots(type, false);
}

static void
print_usage(char *exec_name, FILE *f)
{
    fprintf(f,
            "Usage: %s [options] file\n"
            "Options:\n"
            "  -h  --help              Print this help.\n"
            "  -s, --stage <stage>     Specify the shader stage.  Valid stages are:\n"
            "                          vertex, tess-ctrl, tess-eval, geometry, fragment,\n"
            "                          compute, and kernel (OpenCL-style compute).\n"
            "  -e, --entry <name>      Specify the entry-point name.\n",
            exec_name);
}

static void
vc4_optimize_nir(struct nir_shader *s)
{
    bool progress;
    unsigned lower_flrp =
        (s->options->lower_flrp16 ? 16 : 0) |
        (s->options->lower_flrp32 ? 32 : 0) |
        (s->options->lower_flrp64 ? 64 : 0);

    do
    {
        progress = false;

        NIR_PASS_V(s, nir_lower_vars_to_ssa);
        NIR_PASS_V(s, nir_opt_deref);
        NIR_PASS(progress, s, nir_lower_alu_to_scalar, NULL, NULL);
        NIR_PASS(progress, s, nir_lower_phis_to_scalar);
        NIR_PASS(progress, s, nir_copy_prop);
        NIR_PASS(progress, s, nir_opt_remove_phis);
        NIR_PASS(progress, s, nir_opt_dce);
        NIR_PASS(progress, s, nir_opt_dead_cf);
        NIR_PASS(progress, s, nir_opt_cse);
        NIR_PASS(progress, s, nir_opt_peephole_select, 8, true, true);
        NIR_PASS(progress, s, nir_opt_algebraic);
        NIR_PASS(progress, s, nir_opt_constant_folding);
        if (lower_flrp != 0)
        {
            bool lower_flrp_progress = false;

            NIR_PASS(lower_flrp_progress, s, nir_lower_flrp,
                     lower_flrp,
                     false /* always_precise */);
            if (lower_flrp_progress)
            {
                NIR_PASS(progress, s, nir_opt_constant_folding);
                progress = true;
            }

            /* Nothing should rematerialize any flrps, so we only
                         * need to do this lowering once.
                         */
            lower_flrp = 0;
        }

        NIR_PASS(progress, s, nir_opt_undef);
        NIR_PASS(progress, s, nir_opt_loop_unroll,
                 nir_var_shader_in |
                     nir_var_shader_out |
                     nir_var_function_temp);
    } while (progress);
}

static void
vc4_nir_lower_output(nir_builder *b,
                     nir_intrinsic_instr *intr)
{
        nir_variable *output_var =
                nir_find_variable_with_driver_location(_nir, nir_var_shader_out,
                                                       nir_intrinsic_base(intr));
        assert(output_var);

        // nir_print_instr(&intr->instr, stdout);
        // printf("\n");

        if (_stage == MESA_SHADER_VERTEX && output_var->data.location != VARYING_SLOT_POS &&
            output_var->data.location != VARYING_SLOT_PSIZ) {
                nir_instr_remove(&intr->instr);
                return;
        }
}

static void
vc4_nir_lower_io_instr(nir_builder *b,
                       struct nir_instr *instr, struct vc4_pipeline *pipeline)
{
    if (instr->type != nir_instr_type_intrinsic)
        return;
    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);

    switch (intr->intrinsic)
    {
    case nir_intrinsic_store_output:
        vc4_nir_lower_output(b, intr);
        break;
    case nir_intrinsic_vulkan_resource_index:
        //   lower_vulkan_resource_index(b, intr, pipeline, pipeline->layout);
        break;
    default:
        break;
    }
}

static void
vc4_nir_lower_tex_instr(nir_builder *b,
                       struct nir_instr *instr)
{
    if (instr->type != nir_instr_type_tex)
        return;
    nir_tex_instr *tex = nir_instr_as_tex(instr);

    int tex_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_deref);

    if (tex_src_idx >= 0) {
        nir_deref_instr *deref = nir_src_as_deref(tex->src[tex_src_idx].src);
        nir_print_instr(&deref->instr, stdout); printf("\n");

        nir_variable *var = nir_deref_instr_get_variable(deref);
        printf("tex set = %d, bindling = %d\n", var->data.descriptor_set, var->data.binding);
    }

    int sampler_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_sampler_deref);
    if (sampler_src_idx >= 0) {
        nir_deref_instr *deref = nir_src_as_deref(tex->src[sampler_src_idx].src);
        nir_print_instr(&deref->instr, stdout); printf("\n");

        nir_variable *var = nir_deref_instr_get_variable(deref);
        printf("sampler set = %d, bindling = %d\n", var->data.descriptor_set, var->data.binding);
    }

}

// static void
// lower_load_push_constant(nir_builder *b, const nir_instr *instr)
// {
//     if (instr->type != nir_instr_type_intrinsic)
//         return;

//     nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
//     if (intr->intrinsic != nir_intrinsic_load_push_constant)
//         return;

//     assert(intr->intrinsic == nir_intrinsic_load_push_constant);
//     intr->intrinsic = nir_intrinsic_load_uniform;
// }

static void
lower_load_push_constant(nir_builder *b, const nir_instr *instr)
{
    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
    if (intr->intrinsic != nir_intrinsic_load_push_constant)
        return;

    nir_ssa_def *dests[4];
    for (unsigned i = 0; i < intr->num_components; i++)
    {
        nir_intrinsic_instr *intr_comp =
            nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_uniform);
        intr_comp->num_components = 1;
        nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1,
                          intr->dest.ssa.bit_size, NULL);

        nir_intrinsic_set_base(intr_comp,
                               nir_intrinsic_base(intr) +
                                   i * 4);

        intr_comp->src[0] = nir_src_for_ssa(intr->src[0].ssa);

        dests[i] = &intr_comp->dest.ssa;

        nir_builder_instr_insert(b, &intr_comp->instr);
    }

    nir_ssa_def *vec = nir_vec(b, dests, intr->num_components);
    nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(vec));

    nir_instr_remove(&intr->instr);
}

static bool
vc4_nir_lower_io_impl(nir_function_impl *impl, struct vc4_pipeline *pipeline)
{
   nir_builder b;
   nir_builder_init(&b, impl);

   nir_foreach_block(block, impl)
   {
       nir_foreach_instr_safe(instr, block) {
           b.cursor = nir_before_instr(instr);
           switch (instr->type) {
           case nir_instr_type_tex:
               vc4_nir_lower_tex_instr(&b, instr);
               break;
           case nir_instr_type_intrinsic:
               vc4_nir_lower_io_instr(&b, instr, pipeline);
            //    lower_load_push_constant(&b, instr);
               break;
           default:
               break;
           }
       }
   }

   nir_metadata_preserve(impl, nir_metadata_block_index |
                               nir_metadata_dominance);

   return true;
}

static void vc4_pipeline_nir_lower_io(nir_shader *s, gl_shader_stage stage,struct vc4_pipeline *pipeline)
{
   _nir = s;
   _stage = stage;

   nir_foreach_function(function, s)
   {
      if (function->impl)
         vc4_nir_lower_io_impl(function->impl, pipeline);
   }
}

static void vc4_nir_opt(nir_shader *nir, gl_shader_stage stage)
{
    struct vc4_pipeline dumy_pipeline = {0};

    NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
    NIR_PASS_V(nir, nir_lower_returns);
    NIR_PASS_V(nir, nir_inline_functions);
    NIR_PASS_V(nir, nir_opt_deref);
    NIR_PASS_V(nir, nir_copy_prop);

    foreach_list_typed_safe(nir_function, func, node, &nir->functions)
    {
        if (!func->is_entrypoint)
            exec_node_remove(&func->node);
    }
    assert(exec_list_length(&nir->functions) == 1);
    NIR_PASS_V(nir, nir_lower_variable_initializers, ~nir_var_function_temp);

    NIR_PASS_V(nir, nir_split_var_copies);
    NIR_PASS_V(nir, nir_split_per_member_structs);

    NIR_PASS_V(nir, nir_remove_dead_variables,
               nir_var_shader_in | nir_var_shader_out | nir_var_system_value | nir_var_mem_shared,
               NULL);

    NIR_PASS_V(nir, nir_propagate_invariant);

    NIR_PASS_V(nir, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(nir), true, true);

    NIR_PASS_V(nir, nir_lower_global_vars_to_local);
    NIR_PASS_V(nir, nir_split_var_copies);
    NIR_PASS_V(nir, nir_lower_var_copies);

    NIR_PASS_V(nir, nir_opt_copy_prop_vars);
    NIR_PASS_V(nir, nir_opt_combine_stores, nir_var_all);

    NIR_PASS_V(nir, nir_lower_indirect_derefs, nir_var_shader_in | nir_var_shader_out, UINT32_MAX);

    NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);

    nir_assign_io_var_locations(nir, nir_var_shader_in, &nir->num_inputs, stage);
    nir_assign_io_var_locations(nir, nir_var_shader_out, &nir->num_outputs, stage);

    NIR_PASS_V(nir, nir_lower_system_values);
    NIR_PASS_V(nir, nir_lower_compute_system_values, NULL);

    NIR_PASS_V(nir, nir_lower_array_deref_of_vec,
            nir_var_mem_ubo | nir_var_mem_ssbo,
            nir_lower_direct_array_deref_of_vec_load);

    NIR_PASS_V(nir, nir_lower_frexp);

    // NIR_PASS_V(nir, nir_lower_explicit_io,
    //            nir_var_mem_ubo | nir_var_mem_ssbo,
    //            nir_address_format_vec2_index_32bit_offset);

    NIR_PASS_V(nir, nir_lower_explicit_io,
               nir_var_mem_push_const,
               nir_address_format_32bit_offset);

    NIR_PASS_V(nir, nir_lower_explicit_io,
               nir_var_mem_ubo | nir_var_mem_ssbo,
               nir_address_format_32bit_index_offset);

    if (nir->info.stage == MESA_SHADER_VERTEX)
        NIR_PASS_V(nir, nir_lower_point_size, 1.0f, 0.0f);

    NIR_PASS_V(nir, nir_lower_regs_to_ssa);
    NIR_PASS_V(nir, nir_normalize_cubemap_coords);

    NIR_PASS_V(nir, nir_lower_load_const_to_scalar);

    /* Vulkan uses the separate-shader linking model */
    nir->info.separate_shader = true;

    vc4_optimize_nir(nir);

    vc4_pipeline_nir_lower_io(nir, stage, &dumy_pipeline);

    // struct nir_lower_tex_options tex_options = {
    //     /* We would need to implement txs, but we don't want the
    //         * int/float conversions
    //         */
    //     .lower_rect = false,

    //     .lower_txp = ~0,

    //     /* Apply swizzles to all samplers. */
    //     .swizzle_result = ~0,
    // };

    // NIR_PASS_V(nir, nir_lower_tex, &tex_options);

    // if (stage == MESA_SHADER_FRAGMENT)
    //     NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_in);

    // nir_print_shader(nir, stderr);

    NIR_PASS_V(nir, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
               type_size, (nir_lower_io_options)0);

    vc4_optimize_nir(nir);
}

int main(int argc, char **argv)
{
    gl_shader_stage shader_stage = MESA_SHADER_FRAGMENT;
    char *entry_point = "main";
    int ch;

    static struct option long_options[] =
        {
            {"help", no_argument, 0, 'h'},
            {"stage", required_argument, 0, 's'},
            {"entry", required_argument, 0, 'e'},
            {0, 0, 0, 0}};

    while ((ch = getopt_long(argc, argv, "hs:e:", long_options, NULL)) != -1)
    {
        switch (ch)
        {
        case 'h':
            print_usage(argv[0], stdout);
            return 0;
        case 's':
            shader_stage = stage_to_enum(optarg);
            if (shader_stage == MESA_SHADER_NONE)
            {
                fprintf(stderr, "Unknown stage \"%s\"\n", optarg);
                print_usage(argv[0], stderr);
                return 1;
            }
            break;
        case 'e':
            entry_point = optarg;
            break;
        default:
            fprintf(stderr, "Unrecognized option \"%s\".\n", optarg);
            print_usage(argv[0], stderr);
            return 1;
        }
    }

    const char *filename = argv[optind];
    int fd = open(filename, O_RDONLY);
    if (fd < 0)
    {
        fprintf(stderr, "Failed to open %s\n", filename);
        return 1;
    }

    off_t len = lseek(fd, 0, SEEK_END);
    if (len % WORD_SIZE != 0)
    {
        fprintf(stderr, "File length isn't a multiple of the word size\n");
        fprintf(stderr, "Are you sure this is a valid SPIR-V shader?\n");
        close(fd);
        return 1;
    }

    size_t word_count = len / WORD_SIZE;

    const void *map = mmap(NULL, len, PROT_READ, MAP_PRIVATE, fd, 0);
    if (map == MAP_FAILED)
    {
        fprintf(stderr, "Failed to mmap the file: errno=%d, %s\n",
                errno, strerror(errno));
        close(fd);
        return 1;
    }

    glsl_type_singleton_init_or_ref();

    //    struct spirv_to_nir_options spirv_opts = {0};
    //    if (shader_stage == MESA_SHADER_KERNEL) {
    //       spirv_opts.environment = NIR_SPIRV_OPENCL;
    //       spirv_opts.caps.address = true;
    //       spirv_opts.caps.float64 = true;
    //       spirv_opts.caps.int8 = true;
    //       spirv_opts.caps.int16 = true;
    //       spirv_opts.caps.int64 = true;
    //       spirv_opts.caps.kernel = true;
    //    }

    nir_shader *nir = spirv_to_nir(map, word_count, NULL, 0,
                                   shader_stage, entry_point,
                                   &default_spirv_options, &vc4_nir_options);

    if (nir)
        nir_print_shader(nir, stderr);
    else {
        fprintf(stderr, "SPIRV to NIR compilation failed\n");
        return -1;
    }

    fprintf(stderr, "=========================================================================\n\n");

    vc4_nir_opt(nir, shader_stage);

    fprintf(stderr, "=================================== Shader Output ======================================\n\n");
    nir_print_shader(nir, stdout);

    glsl_type_singleton_decref();

    return 0;
}
