﻿// 你好，世界。世界、今日は～ (Code Learning Studio® CPU Hacker™ Product)
// VulkanTest.cpp : 此文件包含 "main" 函数。程序执行将在此处开始并结束。
//

#include <Windows.h>

#include <stdio.h>
#include <stdint.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#define _USE_MATH_DEFINES
#include <math.h>

#include <vulkan/vulkan.h>
#include <vulkan/vulkan_win32.h>
#include <vulkan/vk_sdk_platform.h>

#ifdef _WIN32
#define FILE_BINARY_MODE                    "b"
#else
#define FILE_BINARY_MODE
#endif // _WIN32

#define APP_SHORT_NAME              "Vulkan Test"
#define APP_SHORT_NAME_W            L"Vulkan Test"

#define MILLION 1000000LL
#define BILLION 1000000000LL

// Allow a maximum of two outstanding presentation operations.
#define FRAME_LAG 2
#define DEMO_TEXTURE_COUNT 1

#define degreesToRadians(angleDegrees) (angleDegrees * M_PI / 180.0)

static PFN_vkGetDeviceProcAddr g_gdpa = NULL;

typedef float vec4[4];
typedef float vec3[3];
typedef vec4 mat4x4[4];

/*
 * structure to track all objects related to a texture.
 */
struct texture_object
{
    VkSampler sampler;

    VkImage image;
    VkBuffer buffer;
    VkImageLayout imageLayout;

    VkMemoryAllocateInfo mem_alloc;
    VkDeviceMemory mem;
    VkImageView view;
    int32_t tex_width, tex_height;
};

static int validation_error = 0;

struct vktexcube_vs_uniform
{
    // Must start with MVP
    float mvp[4][4];
    float position[12 * 3][4];
    float attr[12 * 3][4];
};

static PFN_vkCreateDebugUtilsMessengerEXT fpCreateDebugUtilsMessengerEXT;
static PFN_vkDestroyDebugUtilsMessengerEXT fpDestroyDebugUtilsMessengerEXT;
static PFN_vkSubmitDebugUtilsMessageEXT fpSubmitDebugUtilsMessageEXT;
static PFN_vkCmdBeginDebugUtilsLabelEXT fpCmdBeginDebugUtilsLabelEXT;
static PFN_vkCmdEndDebugUtilsLabelEXT fpCmdEndDebugUtilsLabelEXT;
static PFN_vkCmdInsertDebugUtilsLabelEXT fpCmdInsertDebugUtilsLabelEXT;
static PFN_vkSetDebugUtilsObjectNameEXT fpSetDebugUtilsObjectNameEXT;

static inline float vec3_mul_inner(vec3 const a, vec3 const b)
{
    float p = 0.f;
    int i;
    for (i = 0; i < 3; ++i) p += b[i] * a[i];
    return p;
}

static inline float vec3_len(vec3 const v) { return sqrtf(vec3_mul_inner(v, v)); }

static inline void vec3_scale(vec3 r, vec3 const v, float const s)
{
    int i;
    for (i = 0; i < 3; ++i) r[i] = v[i] * s;
}

static inline void vec3_norm(vec3 r, vec3 const v)
{
    float k = 1.f / vec3_len(v);
    vec3_scale(r, v, k);
}

static inline void vec4_scale(vec4 r, vec4 v, float s)
{
    int i;
    for (i = 0; i < 4; ++i) r[i] = v[i] * s;
}

static inline void vec4_add(vec4 r, vec4 const a, vec4 const b)
{
    int i;
    for (i = 0; i < 4; ++i) r[i] = a[i] + b[i];
}
static inline void vec4_sub(vec4 r, vec4 const a, vec4 const b)
{
    int i;
    for (i = 0; i < 4; ++i) r[i] = a[i] - b[i];
}

static inline void mat4x4_mul(mat4x4 M, mat4x4 a, mat4x4 b)
{
    int k, r, c;
    for (c = 0; c < 4; ++c)
        for (r = 0; r < 4; ++r) {
            M[c][r] = 0.f;
            for (k = 0; k < 4; ++k) M[c][r] += a[k][r] * b[c][k];
        }
}

static inline void mat4x4_dup(mat4x4 M, mat4x4 N)
{
    int i, j;
    for (i = 0; i < 4; ++i)
        for (j = 0; j < 4; ++j) M[i][j] = N[i][j];
}

static inline void mat4x4_from_vec3_mul_outer(mat4x4 M, vec3 a, vec3 b)
{
    int i, j;
    for (i = 0; i < 4; ++i)
        for (j = 0; j < 4; ++j) M[i][j] = i < 3 && j < 3 ? a[i] * b[j] : 0.f;
}

static inline void mat4x4_scale(mat4x4 M, mat4x4 a, float k)
{
    int i;
    for (i = 0; i < 4; ++i) vec4_scale(M[i], a[i], k);
}

static inline void mat4x4_identity(mat4x4 M)
{
    int i, j;
    for (i = 0; i < 4; ++i)
        for (j = 0; j < 4; ++j) M[i][j] = i == j ? 1.f : 0.f;
}

static inline void mat4x4_add(mat4x4 M, mat4x4 a, mat4x4 b)
{
    int i;
    for (i = 0; i < 4; ++i) vec4_add(M[i], a[i], b[i]);
}

static inline void mat4x4_sub(mat4x4 M, mat4x4 a, mat4x4 b)
{
    int i;
    for (i = 0; i < 4; ++i) vec4_sub(M[i], a[i], b[i]);
}

static inline void vec3_sub(vec3 r, vec3 const a, vec3 const b)
{
    int i;
    for (i = 0; i < 3; ++i) r[i] = a[i] - b[i];
}

static inline void mat4x4_row(vec4 r, mat4x4 M, int i)
{
    int k;
    for (k = 0; k < 4; ++k) r[k] = M[k][i];
}

static inline void vec3_mul_cross(vec3 r, vec3 const a, vec3 const b)
{
    r[0] = a[1] * b[2] - a[2] * b[1];
    r[1] = a[2] * b[0] - a[0] * b[2];
    r[2] = a[0] * b[1] - a[1] * b[0];
}

static inline float vec4_mul_inner(vec4 a, vec4 b)
{
    float p = 0.f;
    int i;
    for (i = 0; i < 4; ++i) p += b[i] * a[i];
    return p;
}

static inline void mat4x4_rotate(mat4x4 R, mat4x4 M, float x, float y, float z, float angle)
{
    float s = sinf(angle);
    float c = cosf(angle);
    vec3 u = { x, y, z };

    if (vec3_len(u) > 1e-4)
    {
        vec3_norm(u, u);
        mat4x4 T;
        mat4x4_from_vec3_mul_outer(T, u, u);

        mat4x4 S = { {0, u[2], -u[1], 0}, {-u[2], 0, u[0], 0}, {u[1], -u[0], 0, 0}, {0, 0, 0, 0} };
        mat4x4_scale(S, S, s);

        mat4x4 C;
        mat4x4_identity(C);
        mat4x4_sub(C, C, T);

        mat4x4_scale(C, C, c);

        mat4x4_add(T, T, C);
        mat4x4_add(T, T, S);

        T[3][3] = 1.;
        mat4x4_mul(R, M, T);
    }
    else {
        mat4x4_dup(R, M);
    }
}

static inline void mat4x4_translate_in_place(mat4x4 M, float x, float y, float z)
{
    vec4 t = { x, y, z, 0 };
    vec4 r;
    int i;
    for (i = 0; i < 4; ++i)
    {
        mat4x4_row(r, M, i);
        M[3][i] += vec4_mul_inner(r, t);
    }
}

static inline void mat4x4_perspective(mat4x4 m, float y_fov, float aspect, float n, float f)
{
    /* NOTE: Degrees are an unhandy unit to work with.
     * linmath.h uses radians for everything! */
    float const a = (float)(1.f / tan(y_fov / 2.f));

    m[0][0] = a / aspect;
    m[0][1] = 0.f;
    m[0][2] = 0.f;
    m[0][3] = 0.f;

    m[1][0] = 0.f;
    m[1][1] = a;
    m[1][2] = 0.f;
    m[1][3] = 0.f;

    m[2][0] = 0.f;
    m[2][1] = 0.f;
    m[2][2] = -((f + n) / (f - n));
    m[2][3] = -1.f;

    m[3][0] = 0.f;
    m[3][1] = 0.f;
    m[3][2] = -((2.f * f * n) / (f - n));
    m[3][3] = 0.f;
}

static inline void mat4x4_look_at(mat4x4 m, vec3 eye, vec3 center, vec3 up)
{
    /* Adapted from Android's OpenGL Matrix.java.                        */
    /* See the OpenGL GLUT documentation for gluLookAt for a description */
    /* of the algorithm. We implement it in a straightforward way:       */

    /* TODO: The negation of of can be spared by swapping the order of
     *       operands in the following cross products in the right way. */
    vec3 f;
    vec3_sub(f, center, eye);
    vec3_norm(f, f);

    vec3 s;
    vec3_mul_cross(s, f, up);
    vec3_norm(s, s);

    vec3 t;
    vec3_mul_cross(t, s, f);

    m[0][0] = s[0];
    m[0][1] = t[0];
    m[0][2] = -f[0];
    m[0][3] = 0.f;

    m[1][0] = s[1];
    m[1][1] = t[1];
    m[1][2] = -f[1];
    m[1][3] = 0.f;

    m[2][0] = s[2];
    m[2][1] = t[2];
    m[2][2] = -f[2];
    m[2][3] = 0.f;

    m[3][0] = 0.f;
    m[3][1] = 0.f;
    m[3][2] = 0.f;
    m[3][3] = 1.f;

    mat4x4_translate_in_place(m, -eye[0], -eye[1], -eye[2]);
}

static uint64_t getTimeInNanoseconds(void)
{
    LARGE_INTEGER freq;
    LARGE_INTEGER count;
    QueryPerformanceCounter(&count);
    QueryPerformanceFrequency(&freq);
    assert(freq.LowPart != 0 || freq.HighPart != 0);

    if (count.QuadPart < MAXLONGLONG / 1000000) {
        assert(freq.QuadPart != 0);
        return count.QuadPart * 1000000 / freq.QuadPart;
    }
    else {
        assert(freq.QuadPart >= 1000000);
        return count.QuadPart / (freq.QuadPart / 1000000);
    }
}

static void DbgMsg(char* fmt, ...)
{
    va_list va;
    va_start(va, fmt);
    vprintf(fmt, va);
    va_end(va);
    fflush(stdout);
}

static inline const char* string_VkObjectType(VkObjectType input_value)
{
    switch ((VkObjectType)input_value) {
    case VK_OBJECT_TYPE_QUERY_POOL:
        return "VK_OBJECT_TYPE_QUERY_POOL";
    case VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION:
        return "VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION";
    case VK_OBJECT_TYPE_SEMAPHORE:
        return "VK_OBJECT_TYPE_SEMAPHORE";
    case VK_OBJECT_TYPE_SHADER_MODULE:
        return "VK_OBJECT_TYPE_SHADER_MODULE";
    case VK_OBJECT_TYPE_SWAPCHAIN_KHR:
        return "VK_OBJECT_TYPE_SWAPCHAIN_KHR";
    case VK_OBJECT_TYPE_SAMPLER:
        return "VK_OBJECT_TYPE_SAMPLER";
    case VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NV:
        return "VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NV";
    case VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT:
        return "VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT";
    case VK_OBJECT_TYPE_IMAGE:
        return "VK_OBJECT_TYPE_IMAGE";
    case VK_OBJECT_TYPE_UNKNOWN:
        return "VK_OBJECT_TYPE_UNKNOWN";
    case VK_OBJECT_TYPE_DESCRIPTOR_POOL:
        return "VK_OBJECT_TYPE_DESCRIPTOR_POOL";
    case VK_OBJECT_TYPE_COMMAND_BUFFER:
        return "VK_OBJECT_TYPE_COMMAND_BUFFER";
    case VK_OBJECT_TYPE_BUFFER:
        return "VK_OBJECT_TYPE_BUFFER";
    case VK_OBJECT_TYPE_SURFACE_KHR:
        return "VK_OBJECT_TYPE_SURFACE_KHR";
    case VK_OBJECT_TYPE_INSTANCE:
        return "VK_OBJECT_TYPE_INSTANCE";
    case VK_OBJECT_TYPE_VALIDATION_CACHE_EXT:
        return "VK_OBJECT_TYPE_VALIDATION_CACHE_EXT";
    case VK_OBJECT_TYPE_IMAGE_VIEW:
        return "VK_OBJECT_TYPE_IMAGE_VIEW";
    case VK_OBJECT_TYPE_DESCRIPTOR_SET:
        return "VK_OBJECT_TYPE_DESCRIPTOR_SET";
    case VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT:
        return "VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT";
    case VK_OBJECT_TYPE_COMMAND_POOL:
        return "VK_OBJECT_TYPE_COMMAND_POOL";
    case VK_OBJECT_TYPE_PHYSICAL_DEVICE:
        return "VK_OBJECT_TYPE_PHYSICAL_DEVICE";
    case VK_OBJECT_TYPE_DISPLAY_KHR:
        return "VK_OBJECT_TYPE_DISPLAY_KHR";
    case VK_OBJECT_TYPE_BUFFER_VIEW:
        return "VK_OBJECT_TYPE_BUFFER_VIEW";
    case VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT:
        return "VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT";
    case VK_OBJECT_TYPE_FRAMEBUFFER:
        return "VK_OBJECT_TYPE_FRAMEBUFFER";
    case VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE:
        return "VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE";
    case VK_OBJECT_TYPE_PIPELINE_CACHE:
        return "VK_OBJECT_TYPE_PIPELINE_CACHE";
    case VK_OBJECT_TYPE_PIPELINE_LAYOUT:
        return "VK_OBJECT_TYPE_PIPELINE_LAYOUT";
    case VK_OBJECT_TYPE_DEVICE_MEMORY:
        return "VK_OBJECT_TYPE_DEVICE_MEMORY";
    case VK_OBJECT_TYPE_FENCE:
        return "VK_OBJECT_TYPE_FENCE";
    case VK_OBJECT_TYPE_QUEUE:
        return "VK_OBJECT_TYPE_QUEUE";
    case VK_OBJECT_TYPE_DEVICE:
        return "VK_OBJECT_TYPE_DEVICE";
    case VK_OBJECT_TYPE_RENDER_PASS:
        return "VK_OBJECT_TYPE_RENDER_PASS";
    case VK_OBJECT_TYPE_DISPLAY_MODE_KHR:
        return "VK_OBJECT_TYPE_DISPLAY_MODE_KHR";
    case VK_OBJECT_TYPE_EVENT:
        return "VK_OBJECT_TYPE_EVENT";
    case VK_OBJECT_TYPE_PIPELINE:
        return "VK_OBJECT_TYPE_PIPELINE";
    default:
        return "Unhandled VkObjectType";
    }
}

//--------------------------------------------------------------------------------------
// Mesh and VertexFormat Data
//--------------------------------------------------------------------------------------
// clang-format off
static const float g_vertex_buffer_data[] = {
    -1.0f,-1.0f,-1.0f,  // -X side
    -1.0f,-1.0f, 1.0f,
    -1.0f, 1.0f, 1.0f,
    -1.0f, 1.0f, 1.0f,
    -1.0f, 1.0f,-1.0f,
    -1.0f,-1.0f,-1.0f,

    -1.0f,-1.0f,-1.0f,  // -Z side
     1.0f, 1.0f,-1.0f,
     1.0f,-1.0f,-1.0f,
    -1.0f,-1.0f,-1.0f,
    -1.0f, 1.0f,-1.0f,
     1.0f, 1.0f,-1.0f,

    -1.0f,-1.0f,-1.0f,  // -Y side
     1.0f,-1.0f,-1.0f,
     1.0f,-1.0f, 1.0f,
    -1.0f,-1.0f,-1.0f,
     1.0f,-1.0f, 1.0f,
    -1.0f,-1.0f, 1.0f,

    -1.0f, 1.0f,-1.0f,  // +Y side
    -1.0f, 1.0f, 1.0f,
     1.0f, 1.0f, 1.0f,
    -1.0f, 1.0f,-1.0f,
     1.0f, 1.0f, 1.0f,
     1.0f, 1.0f,-1.0f,

     1.0f, 1.0f,-1.0f,  // +X side
     1.0f, 1.0f, 1.0f,
     1.0f,-1.0f, 1.0f,
     1.0f,-1.0f, 1.0f,
     1.0f,-1.0f,-1.0f,
     1.0f, 1.0f,-1.0f,

    -1.0f, 1.0f, 1.0f,  // +Z side
    -1.0f,-1.0f, 1.0f,
     1.0f, 1.0f, 1.0f,
    -1.0f,-1.0f, 1.0f,
     1.0f,-1.0f, 1.0f,
     1.0f, 1.0f, 1.0f,
};

static const float g_uv_buffer_data[] = {
    0.0f, 1.0f,  // -X side
    1.0f, 1.0f,
    1.0f, 0.0f,
    1.0f, 0.0f,
    0.0f, 0.0f,
    0.0f, 1.0f,

    1.0f, 1.0f,  // -Z side
    0.0f, 0.0f,
    0.0f, 1.0f,
    1.0f, 1.0f,
    1.0f, 0.0f,
    0.0f, 0.0f,

    1.0f, 0.0f,  // -Y side
    1.0f, 1.0f,
    0.0f, 1.0f,
    1.0f, 0.0f,
    0.0f, 1.0f,
    0.0f, 0.0f,

    1.0f, 0.0f,  // +Y side
    0.0f, 0.0f,
    0.0f, 1.0f,
    1.0f, 0.0f,
    0.0f, 1.0f,
    1.0f, 1.0f,

    1.0f, 0.0f,  // +X side
    0.0f, 0.0f,
    0.0f, 1.0f,
    0.0f, 1.0f,
    1.0f, 1.0f,
    1.0f, 0.0f,

    0.0f, 0.0f,  // +Z side
    0.0f, 1.0f,
    1.0f, 0.0f,
    0.0f, 1.0f,
    1.0f, 1.0f,
    1.0f, 0.0f,
};

enum
{
    MAX_QUEUE_FAMILY_COUNT = 16,
    MAX_GPU_COUNT = 64,
    MAX_DEVICE_EXTENSION_COUNT = 512,
    MAX_FORMAT_COUNT = 16,
    MAX_SWAPCHAIN_IMAGE_COUNT = 16
};

typedef struct
{
    VkImage image;
    VkCommandBuffer cmd;
    VkCommandBuffer graphics_to_present_cmd;
    VkImageView view;
    VkBuffer uniform_buffer;
    VkDeviceMemory uniform_memory;
    void* uniform_memory_ptr;
    VkFramebuffer framebuffer;
    VkDescriptorSet descriptor_set;
} SwapchainImageResources;

struct
{
    VkFormat format;

    VkImage image;
    VkMemoryAllocateInfo mem_alloc;
    VkDeviceMemory mem;
    VkImageView view;
} s_depth;

static VkSurfaceKHR s_surface;
static VkInstance s_inst;
static VkPhysicalDevice s_gpu;
static bool prepared;
static bool use_staging_buffer;
static bool separate_present_queue;
static bool is_minimized;
static uint32_t gpu_number;
static VkPhysicalDeviceProperties gpu_props;
static VkQueueFamilyProperties queue_props[MAX_QUEUE_FAMILY_COUNT];
static VkPhysicalDeviceMemoryProperties memory_properties;

static uint32_t enabled_extension_count;
static uint32_t enabled_layer_count;
static char* extension_names[64];
static char* enabled_layers[64];

static VkCommandBuffer cmd_buf;  // Buffer for initialization commands
static VkDevice s_device;
static VkQueue present_queue;
static VkQueue graphics_queue;
static VkCommandPool cmd_pool;
static VkCommandPool present_cmd_pool;
static VkRenderPass render_pass;
static VkSwapchainKHR swapchain;
static SwapchainImageResources swapchain_image_resources[MAX_SWAPCHAIN_IMAGE_COUNT];
static VkPresentModeKHR present_mode;
static VkFence fences[FRAME_LAG];
static VkDebugUtilsMessengerEXT dbg_messenger;
static uint32_t swapchainImageCount;
static int frame_index;
static uint32_t current_buffer;
static uint32_t queue_family_count;
static uint32_t graphics_queue_family_index;
static uint32_t present_queue_family_index;
static VkSemaphore image_acquired_semaphores[FRAME_LAG];
static VkSemaphore draw_complete_semaphores[FRAME_LAG];
static VkSemaphore image_ownership_semaphores[FRAME_LAG];
static int render_width, render_height;
static VkFormat s_format;
static VkColorSpaceKHR color_space;
static bool s_validate;
static bool separate_present_queue;
static bool pause;
static bool syncd_with_actual_presents;
static float spin_angle;
static float spin_increment;
static bool VK_KHR_incremental_present_enabled;
static bool syncd_with_actual_presents;
static uint64_t refresh_duration;
static uint64_t refresh_duration_multiplier;
static uint64_t target_IPD;  // image present duration (inverse of frame rate)
static uint64_t prev_desired_present_time;
static uint32_t next_present_id;
static uint32_t last_early_id;  // 0 if no early images
static uint32_t last_late_id;   // 0 if no late images
static VkPipeline s_pipeline;
static VkPipelineLayout pipeline_layout;
static mat4x4 projection_matrix;
static mat4x4 view_matrix;
static mat4x4 model_matrix;
static VkDescriptorPool desc_pool;
static VkPipelineCache pipeline_cache;
static VkDescriptorSetLayout desc_layout;
static struct texture_object textures[DEMO_TEXTURE_COUNT];
static struct texture_object staging_texture;
static VkShaderModule vert_shader_module;
static VkShaderModule frag_shader_module;

static int32_t cur_frame;
static int32_t frame_count;
static bool quit;
static bool validate_checks_disabled;
static bool use_break;
static bool suppress_popups;
static bool in_callback = false;

static HINSTANCE connection;         // hInstance - Windows Instance
static HWND window;                  // hWnd - window handle
static POINT minsize;                // minimum window size

static bool ActualTimeLate(uint64_t desired, uint64_t actual, uint64_t rdur)
{
    // The desired time was the earliest time that the present should have
    // occured.  In almost every case, the actual time should be later than the
    // desired time.  We should only consider the actual time "late" if it is
    // after "desired + rdur".
    if (actual <= desired) {
        // The actual time was before or equal to the desired time.  This will
        // probably never happen, but in case it does, return false since the
        // present was obviously NOT late.
        return false;
    }
    uint64_t deadline = desired + rdur;
    if (actual > deadline) {
        return true;
    }
    else {
        return false;
    }
}

static bool CanPresentEarlier(uint64_t earliest, uint64_t actual, uint64_t margin, uint64_t rdur)
{
    if (earliest < actual)
    {
        // Consider whether this present could have occured earlier.  Make sure
        // that earliest time was at least 2msec earlier than actual time, and
        // that the margin was at least 2msec:
        uint64_t diff = actual - earliest;
        if ((diff >= (2 * MILLION)) && (margin >= (2 * MILLION))) {
            // This present could have occured earlier because both: 1) the
            // earliest time was at least 2 msec before actual time, and 2) the
            // margin was at least 2msec.
            return true;
        }
    }
    return false;
}

static bool memory_type_from_properties(uint32_t typeBits, VkFlags requirements_mask, uint32_t* typeIndex)
{
    // Search memtypes to find first index with those properties
    for (uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; i++) {
        if ((typeBits & 1) == 1) {
            // Type is available, does it match user properties?
            if ((memory_properties.memoryTypes[i].propertyFlags & requirements_mask) == requirements_mask)
            {
                *typeIndex = i;
                return true;
            }
        }
        typeBits >>= 1;
    }
    // No memory types matched, return failure
    return false;
}

static void demo_flush_init_cmd(void)
{
    // This function could get called twice if the texture uses a staging buffer
    // In that case the second call should be ignored
    if (cmd_buf == VK_NULL_HANDLE) return;

    VkResult err = vkEndCommandBuffer(cmd_buf);
    assert(err == VK_SUCCESS);

    VkFence fence;
    VkFenceCreateInfo fence_ci = { .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, .pNext = NULL, .flags = 0 };
    err = vkCreateFence(s_device, &fence_ci, NULL, &fence);
    assert(err == VK_SUCCESS);

    const VkCommandBuffer cmdBufs[] = { cmd_buf };
    VkSubmitInfo submit_info = { .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
                                .pNext = NULL,
                                .waitSemaphoreCount = 0,
                                .pWaitSemaphores = NULL,
                                .pWaitDstStageMask = NULL,
                                .commandBufferCount = 1,
                                .pCommandBuffers = cmdBufs,
                                .signalSemaphoreCount = 0,
                                .pSignalSemaphores = NULL };

    err = vkQueueSubmit(graphics_queue, 1, &submit_info, fence);
    assert(err == VK_SUCCESS);

    err = vkWaitForFences(s_device, 1, &fence, VK_TRUE, UINT64_MAX);
    assert(err == VK_SUCCESS);

    vkFreeCommandBuffers(s_device, cmd_pool, 1, cmdBufs);
    vkDestroyFence(s_device, fence, NULL);
    cmd_buf = VK_NULL_HANDLE;
}

static void demo_set_image_layout(VkImage image, VkImageAspectFlags aspectMask, VkImageLayout old_image_layout,
    VkImageLayout new_image_layout, VkAccessFlagBits srcAccessMask, VkPipelineStageFlags src_stages,
    VkPipelineStageFlags dest_stages)
{
    assert(cmd_buf != NULL);

    VkImageMemoryBarrier image_memory_barrier = { .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
                                                 .pNext = NULL,
                                                 .srcAccessMask = srcAccessMask,
                                                 .dstAccessMask = 0,
                                                 .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
                                                 .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
                                                 .oldLayout = old_image_layout,
                                                 .newLayout = new_image_layout,
                                                 .image = image,
                                                 .subresourceRange = {aspectMask, 0, 1, 0, 1} };

    switch (new_image_layout) {
    case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
        /* Make sure anything that was copying from this image has completed */
        image_memory_barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
        break;

    case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
        image_memory_barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
        break;

    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
        image_memory_barrier.dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
        break;

    case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
        image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
        break;

    case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
        image_memory_barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
        break;

    case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR:
        image_memory_barrier.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT;
        break;

    default:
        image_memory_barrier.dstAccessMask = 0;
        break;
    }

    VkImageMemoryBarrier* pmemory_barrier = &image_memory_barrier;

    vkCmdPipelineBarrier(cmd_buf, src_stages, dest_stages, 0, 0, NULL, 0, NULL, 1, pmemory_barrier);
}

static void demo_draw_build_cmd(VkCommandBuffer cmd_buf)
{
    VkDebugUtilsLabelEXT label;
    memset(&label, 0, sizeof(label));
    const VkCommandBufferBeginInfo cmd_buf_info = {
        .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
        .pNext = NULL,
        .flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT,
        .pInheritanceInfo = NULL,
    };
    const VkClearValue clear_values[2] = {
        [0] = {.color.float32 = {0.2f, 0.2f, 0.2f, 0.2f}},
        [1] = {.depthStencil = {1.0f, 0}},
    };
    const VkRenderPassBeginInfo rp_begin = {
        .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
        .pNext = NULL,
        .renderPass = render_pass,
        .framebuffer = swapchain_image_resources[current_buffer].framebuffer,
        .renderArea.offset.x = 0,
        .renderArea.offset.y = 0,
        .renderArea.extent.width = render_width,
        .renderArea.extent.height = render_height,
        .clearValueCount = 2,
        .pClearValues = clear_values,
    };

    VkResult err = vkBeginCommandBuffer(cmd_buf, &cmd_buf_info);

    if (s_validate)
    {
        // Set a name for the command buffer
        VkDebugUtilsObjectNameInfoEXT cmd_buf_name = {
            .sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT,
            .pNext = NULL,
            .objectType = VK_OBJECT_TYPE_COMMAND_BUFFER,
            .objectHandle = (uint64_t)cmd_buf,
            .pObjectName = "CubeDrawCommandBuf",
        };
        fpSetDebugUtilsObjectNameEXT(s_device, &cmd_buf_name);

        label.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
        label.pNext = NULL;
        label.pLabelName = "DrawBegin";
        label.color[0] = 0.4f;
        label.color[1] = 0.3f;
        label.color[2] = 0.2f;
        label.color[3] = 0.1f;
        fpCmdBeginDebugUtilsLabelEXT(cmd_buf, &label);
    }

    assert(err == VK_SUCCESS);
    vkCmdBeginRenderPass(cmd_buf, &rp_begin, VK_SUBPASS_CONTENTS_INLINE);

    if (s_validate) {
        label.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
        label.pNext = NULL;
        label.pLabelName = "InsideRenderPass";
        label.color[0] = 8.4f;
        label.color[1] = 7.3f;
        label.color[2] = 6.2f;
        label.color[3] = 7.1f;
        fpCmdBeginDebugUtilsLabelEXT(cmd_buf, &label);
    }

    vkCmdBindPipeline(cmd_buf, VK_PIPELINE_BIND_POINT_GRAPHICS, s_pipeline);
    vkCmdBindDescriptorSets(cmd_buf, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout, 0, 1,
        &swapchain_image_resources[current_buffer].descriptor_set, 0, NULL);
    VkViewport viewport;
    memset(&viewport, 0, sizeof(viewport));
    float viewport_dimension;
    if (render_width < render_height) {
        viewport_dimension = (float)render_width;
        viewport.y = (render_height - render_width) / 2.0f;
    }
    else {
        viewport_dimension = (float)render_height;
        viewport.x = (render_width - render_height) / 2.0f;
    }
    viewport.height = viewport_dimension;
    viewport.width = viewport_dimension;
    viewport.minDepth = (float)0.0f;
    viewport.maxDepth = (float)1.0f;
    vkCmdSetViewport(cmd_buf, 0, 1, &viewport);

    VkRect2D scissor = { 0 };
    scissor.extent.width = render_width;
    scissor.extent.height = render_height;
    scissor.offset.x = 0;
    scissor.offset.y = 0;
    vkCmdSetScissor(cmd_buf, 0, 1, &scissor);

    if (s_validate)
    {
        label.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
        label.pNext = NULL;
        label.pLabelName = "ActualDraw";
        label.color[0] = -0.4f;
        label.color[1] = -0.3f;
        label.color[2] = -0.2f;
        label.color[3] = -0.1f;
        fpCmdBeginDebugUtilsLabelEXT(cmd_buf, &label);
    }

    vkCmdDraw(cmd_buf, 12 * 3, 1, 0, 0);
    if (s_validate) {
        fpCmdEndDebugUtilsLabelEXT(cmd_buf);
    }

    // Note that ending the renderpass changes the image's layout from
    // COLOR_ATTACHMENT_OPTIMAL to PRESENT_SRC_KHR
    vkCmdEndRenderPass(cmd_buf);
    if (s_validate) {
        fpCmdEndDebugUtilsLabelEXT(cmd_buf);
    }

    if (separate_present_queue)
    {
        // We have to transfer ownership from the graphics queue family to the
        // present queue family to be able to present.  Note that we don't have
        // to transfer from present queue family back to graphics queue family at
        // the start of the next frame because we don't care about the image's
        // contents at that point.
        VkImageMemoryBarrier image_ownership_barrier = { .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
                                                        .pNext = NULL,
                                                        .srcAccessMask = 0,
                                                        .dstAccessMask = 0,
                                                        .oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
                                                        .newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
                                                        .srcQueueFamilyIndex = graphics_queue_family_index,
                                                        .dstQueueFamilyIndex = present_queue_family_index,
                                                        .image = swapchain_image_resources[current_buffer].image,
                                                        .subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} };

        vkCmdPipelineBarrier(cmd_buf, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, NULL, 0,
            NULL, 1, &image_ownership_barrier);
    }
    if (s_validate) {
        fpCmdEndDebugUtilsLabelEXT(cmd_buf);
    }
    err = vkEndCommandBuffer(cmd_buf);
    assert(err == VK_SUCCESS);
}

static void demo_build_image_ownership_cmd(int i)
{
    const VkCommandBufferBeginInfo cmd_buf_info = {
        .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
        .pNext = NULL,
        .flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT,
        .pInheritanceInfo = NULL,
    };
    VkResult err = vkBeginCommandBuffer(swapchain_image_resources[i].graphics_to_present_cmd, &cmd_buf_info);
    assert(err == VK_SUCCESS);

    VkImageMemoryBarrier image_ownership_barrier = { .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
                                                    .pNext = NULL,
                                                    .srcAccessMask = 0,
                                                    .dstAccessMask = 0,
                                                    .oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
                                                    .newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
                                                    .srcQueueFamilyIndex = graphics_queue_family_index,
                                                    .dstQueueFamilyIndex = present_queue_family_index,
                                                    .image = swapchain_image_resources[i].image,
                                                    .subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} };

    vkCmdPipelineBarrier(swapchain_image_resources[i].graphics_to_present_cmd, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
        VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, NULL, 0, NULL, 1, &image_ownership_barrier);
    err = vkEndCommandBuffer(swapchain_image_resources[i].graphics_to_present_cmd);
    assert(err == VK_SUCCESS);
}

static void demo_update_data_buffer(void)
{
    mat4x4 MVP, Model, VP;
    int matrixSize = sizeof(MVP);

    mat4x4_mul(VP, projection_matrix, view_matrix);

    // Rotate around the Y axis
    mat4x4_dup(Model, model_matrix);
    mat4x4_rotate(model_matrix, Model, 0.0f, 1.0f, 0.0f, (float)degreesToRadians(spin_angle));
    mat4x4_mul(MVP, VP, model_matrix);

    memcpy(swapchain_image_resources[current_buffer].uniform_memory_ptr, (const void*)&MVP[0][0], matrixSize);
}

static void demo_prepare_buffers(void)
{
    VkSwapchainKHR oldSwapchain = swapchain;

    // Check the surface capabilities and formats
    VkSurfaceCapabilitiesKHR surfCapabilities;
    VkResult err =vkGetPhysicalDeviceSurfaceCapabilitiesKHR(s_gpu, s_surface, &surfCapabilities);
    assert(err == VK_SUCCESS);

    uint32_t presentModeCount;
    err = vkGetPhysicalDeviceSurfacePresentModesKHR(s_gpu, s_surface, &presentModeCount, NULL);
    assert(err == VK_SUCCESS);
    VkPresentModeKHR* presentModes = (VkPresentModeKHR*)malloc(presentModeCount * sizeof(VkPresentModeKHR));
    assert(presentModes);
    err = vkGetPhysicalDeviceSurfacePresentModesKHR(s_gpu, s_surface, &presentModeCount, presentModes);
    assert(err == VK_SUCCESS);

    VkExtent2D swapchainExtent;
    // width and height are either both 0xFFFFFFFF, or both not 0xFFFFFFFF.
    if (surfCapabilities.currentExtent.width == 0xFFFFFFFF)
    {
        // If the surface size is undefined, the size is set to the size
        // of the images requested, which must fit within the minimum and
        // maximum values.
        swapchainExtent.width = render_width;
        swapchainExtent.height = render_height;

        if (swapchainExtent.width < surfCapabilities.minImageExtent.width) {
            swapchainExtent.width = surfCapabilities.minImageExtent.width;
        }
        else if (swapchainExtent.width > surfCapabilities.maxImageExtent.width) {
            swapchainExtent.width = surfCapabilities.maxImageExtent.width;
        }

        if (swapchainExtent.height < surfCapabilities.minImageExtent.height) {
            swapchainExtent.height = surfCapabilities.minImageExtent.height;
        }
        else if (swapchainExtent.height > surfCapabilities.maxImageExtent.height) {
            swapchainExtent.height = surfCapabilities.maxImageExtent.height;
        }
    }
    else
    {
        // If the surface size is defined, the swap chain size must match
        swapchainExtent = surfCapabilities.currentExtent;
        render_width = surfCapabilities.currentExtent.width;
        render_height = surfCapabilities.currentExtent.height;
    }

    if (render_width == 0 || render_height == 0) {
        is_minimized = true;
        return;
    }
    else {
        is_minimized = false;
    }

    // The FIFO present mode is guaranteed by the spec to be supported
    // and to have no tearing.  It's a great default present mode to use.
    VkPresentModeKHR swapchainPresentMode = VK_PRESENT_MODE_FIFO_KHR;

    //  There are times when you may wish to use another present mode.  The
    //  following code shows how to select them, and the comments provide some
    //  reasons you may wish to use them.
    //
    // It should be noted that Vulkan 1.0 doesn't provide a method for
    // synchronizing rendering with the presentation engine's display.  There
    // is a method provided for throttling rendering with the display, but
    // there are some presentation engines for which this method will not work.
    // If an application doesn't throttle its rendering, and if it renders much
    // faster than the refresh rate of the display, this can waste power on
    // mobile devices.  That is because power is being spent rendering images
    // that may never be seen.

    // VK_PRESENT_MODE_IMMEDIATE_KHR is for applications that don't care about
    // tearing, or have some way of synchronizing their rendering with the
    // display.
    // VK_PRESENT_MODE_MAILBOX_KHR may be useful for applications that
    // generally render a new presentable image every refresh cycle, but are
    // occasionally early.  In this case, the application wants the new image
    // to be displayed instead of the previously-queued-for-presentation image
    // that has not yet been displayed.
    // VK_PRESENT_MODE_FIFO_RELAXED_KHR is for applications that generally
    // render a new presentable image every refresh cycle, but are occasionally
    // late.  In this case (perhaps because of stuttering/latency concerns),
    // the application wants the late image to be immediately displayed, even
    // though that may mean some tearing.

    if (present_mode != swapchainPresentMode)
    {
        for (size_t i = 0; i < presentModeCount; ++i)
        {
            if (presentModes[i] == present_mode)
            {
                swapchainPresentMode = present_mode;
                break;
            }
        }
    }
    if (swapchainPresentMode != present_mode) {
        assert("Present mode specified is not supported\n" == NULL);
    }

    // Determine the number of VkImages to use in the swap chain.
    // Application desires to acquire 3 images at a time for triple
    // buffering
    uint32_t desiredNumOfSwapchainImages = 3;
    if (desiredNumOfSwapchainImages < surfCapabilities.minImageCount) {
        desiredNumOfSwapchainImages = surfCapabilities.minImageCount;
    }
    // If maxImageCount is 0, we can ask for as many images as we want;
    // otherwise we're limited to maxImageCount
    if ((surfCapabilities.maxImageCount > 0) && (desiredNumOfSwapchainImages > surfCapabilities.maxImageCount)) {
        // Application must settle for fewer images than desired:
        desiredNumOfSwapchainImages = surfCapabilities.maxImageCount;
    }

    VkSurfaceTransformFlagsKHR preTransform;
    if (surfCapabilities.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR) {
        preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
    }
    else {
        preTransform = surfCapabilities.currentTransform;
    }

    // Find a supported composite alpha mode - one of these is guaranteed to be set
    VkCompositeAlphaFlagBitsKHR compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
    VkCompositeAlphaFlagBitsKHR compositeAlphaFlags[4] = {
        VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
        VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR,
        VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR,
        VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR,
    };
    for (size_t i = 0; i < sizeof(compositeAlphaFlags) / sizeof(compositeAlphaFlags[0]); i++)
    {
        if (surfCapabilities.supportedCompositeAlpha & compositeAlphaFlags[i]) {
            compositeAlpha = compositeAlphaFlags[i];
            break;
        }
    }

    VkSwapchainCreateInfoKHR swapchain_ci = {
        .sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
        .pNext = NULL,
        .surface = s_surface,
        .minImageCount = desiredNumOfSwapchainImages,
        .imageFormat = s_format,
        .imageColorSpace = color_space,
        .imageExtent =
            {
                .width = swapchainExtent.width,
                .height = swapchainExtent.height,
            },
        .imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
        .preTransform = preTransform,
        .compositeAlpha = compositeAlpha,
        .imageArrayLayers = 1,
        .imageSharingMode = VK_SHARING_MODE_EXCLUSIVE,
        .queueFamilyIndexCount = 0,
        .pQueueFamilyIndices = NULL,
        .presentMode = swapchainPresentMode,
        .oldSwapchain = oldSwapchain,
        .clipped = true,
    };
    uint32_t i;
    err = vkCreateSwapchainKHR(s_device, &swapchain_ci, NULL, &swapchain);
    assert(err == VK_SUCCESS);

    // If we just re-created an existing swapchain, we should destroy the old
    // swapchain at this point.
    // Note: destroying the swapchain also cleans up all its associated
    // presentable images once the platform is done with them.
    if (oldSwapchain != VK_NULL_HANDLE) {
        vkDestroySwapchainKHR(s_device, oldSwapchain, NULL);
    }

    err = vkGetSwapchainImagesKHR(s_device, swapchain, &swapchainImageCount, NULL);
    assert(err == VK_SUCCESS);
    swapchainImageCount = min(swapchainImageCount, MAX_SWAPCHAIN_IMAGE_COUNT);

    VkImage swapchainImages[MAX_SWAPCHAIN_IMAGE_COUNT];
    err = vkGetSwapchainImagesKHR(s_device, swapchain, &swapchainImageCount, swapchainImages);
    assert(err == VK_SUCCESS);

    for (i = 0; i < swapchainImageCount; i++) {
        VkImageViewCreateInfo color_image_view = {
            .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
            .pNext = NULL,
            .format = s_format,
            .components =
                {
                    .r = VK_COMPONENT_SWIZZLE_R,
                    .g = VK_COMPONENT_SWIZZLE_G,
                    .b = VK_COMPONENT_SWIZZLE_B,
                    .a = VK_COMPONENT_SWIZZLE_A,
                },
            .subresourceRange =
                {.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = 0, .layerCount = 1},
            .viewType = VK_IMAGE_VIEW_TYPE_2D,
            .flags = 0,
        };

        swapchain_image_resources[i].image = swapchainImages[i];

        color_image_view.image = swapchain_image_resources[i].image;

        err = vkCreateImageView(s_device, &color_image_view, NULL, &swapchain_image_resources[i].view);
        assert(err == VK_SUCCESS);
    }

    if (NULL != presentModes) {
        free(presentModes);
    }
}

static void demo_prepare_depth(void)
{
    const VkFormat depth_format = VK_FORMAT_D16_UNORM;
    const VkImageCreateInfo image = {
        .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
        .pNext = NULL,
        .imageType = VK_IMAGE_TYPE_2D,
        .format = depth_format,
        .extent = {render_width, render_height, 1},
        .mipLevels = 1,
        .arrayLayers = 1,
        .samples = VK_SAMPLE_COUNT_1_BIT,
        .tiling = VK_IMAGE_TILING_OPTIMAL,
        .usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
        .flags = 0,
    };

    VkImageViewCreateInfo view = {
        .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
        .pNext = NULL,
        .image = VK_NULL_HANDLE,
        .format = depth_format,
        .subresourceRange =
            {.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT, .baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = 0, .layerCount = 1},
        .flags = 0,
        .viewType = VK_IMAGE_VIEW_TYPE_2D,
    };

    VkMemoryRequirements mem_reqs;

    s_depth.format = depth_format;

    /* create image */
    VkResult err = vkCreateImage(s_device, &image, NULL, &s_depth.image);
    assert(err == VK_SUCCESS);

    vkGetImageMemoryRequirements(s_device, s_depth.image, &mem_reqs);
    assert(err == VK_SUCCESS);

    s_depth.mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
    s_depth.mem_alloc.pNext = NULL;
    s_depth.mem_alloc.allocationSize = mem_reqs.size;
    s_depth.mem_alloc.memoryTypeIndex = 0;

    bool pass = memory_type_from_properties(mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
        &s_depth.mem_alloc.memoryTypeIndex);
    assert(pass);

    /* allocate memory */
    err = vkAllocateMemory(s_device, &s_depth.mem_alloc, NULL, &s_depth.mem);
    assert(err == VK_SUCCESS);

    /* bind memory */
    err = vkBindImageMemory(s_device, s_depth.image, s_depth.mem, 0);
    assert(err == VK_SUCCESS);

    /* create image view */
    view.image = s_depth.image;
    err = vkCreateImageView(s_device, &view, NULL, &s_depth.view);
    assert(err == VK_SUCCESS);
}

/* Convert ppm image data from header file into RGBA texture image */
#include "lunarg.ppm.h"
bool loadTexture(const char* filename, uint8_t* rgba_data, VkSubresourceLayout* layout, int32_t* width, int32_t* height)
{
    (void)filename;
    char* cPtr;
    cPtr = (char*)lunarg_ppm;
    if ((unsigned char*)cPtr >= (lunarg_ppm + lunarg_ppm_len) || strncmp(cPtr, "P6\n", 3)) {
        return false;
    }
    while (strncmp(cPtr++, "\n", 1))
        ;
    sscanf(cPtr, "%u %u", width, height);
    if (rgba_data == NULL) {
        return true;
    }
    while (strncmp(cPtr++, "\n", 1))
        ;
    if ((unsigned char*)cPtr >= (lunarg_ppm + lunarg_ppm_len) || strncmp(cPtr, "255\n", 4)) {
        return false;
    }
    while (strncmp(cPtr++, "\n", 1))
        ;
    for (int y = 0; y < *height; y++) {
        uint8_t* rowPtr = rgba_data;
        for (int x = 0; x < *width; x++) {
            memcpy(rowPtr, cPtr, 3);
            rowPtr[3] = 255; /* Alpha of 1 */
            rowPtr += 4;
            cPtr += 3;
        }
        rgba_data += layout->rowPitch;
    }
    return true;
}

static void demo_prepare_texture_image(const char* filename, struct texture_object* tex_obj,
    VkImageTiling tiling, VkImageUsageFlags usage, VkFlags required_props)
{
    const VkFormat tex_format = VK_FORMAT_R8G8B8A8_UNORM;
    int32_t tex_width;
    int32_t tex_height;

    if (!loadTexture(filename, NULL, NULL, &tex_width, &tex_height)) {
        assert("Failed to load textures" == NULL && "Load Texture Failure" == NULL);
    }

    tex_obj->tex_width = tex_width;
    tex_obj->tex_height = tex_height;

    const VkImageCreateInfo image_create_info = {
        .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
        .pNext = NULL,
        .imageType = VK_IMAGE_TYPE_2D,
        .format = tex_format,
        .extent = {tex_width, tex_height, 1},
        .mipLevels = 1,
        .arrayLayers = 1,
        .samples = VK_SAMPLE_COUNT_1_BIT,
        .tiling = tiling,
        .usage = usage,
        .flags = 0,
        .initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED,
    };

    VkMemoryRequirements mem_reqs;

    VkResult err = vkCreateImage(s_device, &image_create_info, NULL, &tex_obj->image);
    assert(err == VK_SUCCESS);

    vkGetImageMemoryRequirements(s_device, tex_obj->image, &mem_reqs);

    tex_obj->mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
    tex_obj->mem_alloc.pNext = NULL;
    tex_obj->mem_alloc.allocationSize = mem_reqs.size;
    tex_obj->mem_alloc.memoryTypeIndex = 0;

    bool pass = memory_type_from_properties(mem_reqs.memoryTypeBits, required_props, &tex_obj->mem_alloc.memoryTypeIndex);
    assert(pass);

    /* allocate memory */
    err = vkAllocateMemory(s_device, &tex_obj->mem_alloc, NULL, &(tex_obj->mem));
    assert(err == VK_SUCCESS);

    /* bind memory */
    err = vkBindImageMemory(s_device, tex_obj->image, tex_obj->mem, 0);
    assert(err == VK_SUCCESS);

    if (required_props & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) {
        const VkImageSubresource subres = {
            .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
            .mipLevel = 0,
            .arrayLayer = 0,
        };
        VkSubresourceLayout layout;
        void* data;

        vkGetImageSubresourceLayout(s_device, tex_obj->image, &subres, &layout);

        err = vkMapMemory(s_device, tex_obj->mem, 0, tex_obj->mem_alloc.allocationSize, 0, &data);
        assert(err == VK_SUCCESS);

        if (!loadTexture(filename, data, &layout, &tex_width, &tex_height)) {
            printf("Error loading texture: %s\n", filename);
        }

        vkUnmapMemory(s_device, tex_obj->mem);
    }

    tex_obj->imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
}

static void demo_prepare_texture_buffer(const char* filename, struct texture_object* tex_obj)
{
    int32_t tex_width;
    int32_t tex_height;

    if (!loadTexture(filename, NULL, NULL, &tex_width, &tex_height)) {
        assert("Failed to load textures" == NULL && "Load Texture Failure" == NULL);
    }

    tex_obj->tex_width = tex_width;
    tex_obj->tex_height = tex_height;

    const VkBufferCreateInfo buffer_create_info = { .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
                                                   .pNext = NULL,
                                                   .flags = 0,
                                                   .size = tex_width * tex_height * 4,
                                                   .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
                                                   .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
                                                   .queueFamilyIndexCount = 0,
                                                   .pQueueFamilyIndices = NULL };

    VkResult err = vkCreateBuffer(s_device, &buffer_create_info, NULL, &tex_obj->buffer);
    assert(err == VK_SUCCESS);

    VkMemoryRequirements mem_reqs;
    vkGetBufferMemoryRequirements(s_device, tex_obj->buffer, &mem_reqs);

    tex_obj->mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
    tex_obj->mem_alloc.pNext = NULL;
    tex_obj->mem_alloc.allocationSize = mem_reqs.size;
    tex_obj->mem_alloc.memoryTypeIndex = 0;

    VkFlags requirements = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    bool pass = memory_type_from_properties(mem_reqs.memoryTypeBits, requirements, &tex_obj->mem_alloc.memoryTypeIndex);
    assert(pass);

    err = vkAllocateMemory(s_device, &tex_obj->mem_alloc, NULL, &(tex_obj->mem));
    assert(err == VK_SUCCESS);

    /* bind memory */
    err = vkBindBufferMemory(s_device, tex_obj->buffer, tex_obj->mem, 0);
    assert(err == VK_SUCCESS);

    VkSubresourceLayout layout;
    memset(&layout, 0, sizeof(layout));
    layout.rowPitch = tex_width * 4;

    void* data;
    err = vkMapMemory(s_device, tex_obj->mem, 0, tex_obj->mem_alloc.allocationSize, 0, &data);
    assert(err == VK_SUCCESS);

    if (!loadTexture(filename, data, &layout, &tex_width, &tex_height)) {
        printf("Error loading texture: %s\n", filename);
    }

    vkUnmapMemory(s_device, tex_obj->mem);
}

static void demo_prepare_textures(void)
{
    const VkFormat tex_format = VK_FORMAT_R8G8B8A8_UNORM;
    VkFormatProperties props;
    uint32_t i;
    const char* const tex_files[] = { "lunarg.ppm" };

    vkGetPhysicalDeviceFormatProperties(s_gpu, tex_format, &props);

    for (i = 0; i < DEMO_TEXTURE_COUNT; i++)
    {
        if ((props.linearTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) && !use_staging_buffer)
        {
            /* Device can texture using linear textures */
            demo_prepare_texture_image(tex_files[i], &textures[i], VK_IMAGE_TILING_LINEAR, VK_IMAGE_USAGE_SAMPLED_BIT,
                VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
            // Nothing in the pipeline needs to be complete to start, and don't allow fragment
            // shader to run until layout transition completes
            demo_set_image_layout(textures[i].image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_PREINITIALIZED,
                textures[i].imageLayout, 0, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
            staging_texture.image = 0;
        }
        else if (props.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)
        {
            /* Must use staging buffer to copy linear texture to optimized */

            memset(&staging_texture, 0, sizeof(staging_texture));
            demo_prepare_texture_buffer(tex_files[i], &staging_texture);

            demo_prepare_texture_image(tex_files[i], &textures[i], VK_IMAGE_TILING_OPTIMAL,
                (VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT),
                VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);

            demo_set_image_layout(textures[i].image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_PREINITIALIZED,
                VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 0, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
                VK_PIPELINE_STAGE_TRANSFER_BIT);

            VkBufferImageCopy copy_region = {
                .bufferOffset = 0,
                .bufferRowLength = staging_texture.tex_width,
                .bufferImageHeight = staging_texture.tex_height,
                .imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1},
                .imageOffset = {0, 0, 0},
                .imageExtent = {staging_texture.tex_width, staging_texture.tex_height, 1},
            };

            vkCmdCopyBufferToImage(cmd_buf, staging_texture.buffer, textures[i].image,
                VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &copy_region);

            demo_set_image_layout(textures[i].image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
                textures[i].imageLayout, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
                VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);

        }
        else {
            /* Can't support VK_FORMAT_R8G8B8A8_UNORM !? */
            assert("No support for R8G8B8A8_UNORM as texture image format" == NULL);
        }

        const VkSamplerCreateInfo sampler = {
            .sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
            .pNext = NULL,
            .magFilter = VK_FILTER_NEAREST,
            .minFilter = VK_FILTER_NEAREST,
            .mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST,
            .addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
            .addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
            .addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
            .mipLodBias = 0.0f,
            .anisotropyEnable = VK_FALSE,
            .maxAnisotropy = 1,
            .compareOp = VK_COMPARE_OP_NEVER,
            .minLod = 0.0f,
            .maxLod = 0.0f,
            .borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE,
            .unnormalizedCoordinates = VK_FALSE,
        };

        VkImageViewCreateInfo view = {
            .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
            .pNext = NULL,
            .image = VK_NULL_HANDLE,
            .viewType = VK_IMAGE_VIEW_TYPE_2D,
            .format = tex_format,
            .components =
                {
                    VK_COMPONENT_SWIZZLE_R,
                    VK_COMPONENT_SWIZZLE_G,
                    VK_COMPONENT_SWIZZLE_B,
                    VK_COMPONENT_SWIZZLE_A,
                },
            .subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1},
            .flags = 0,
        };

        /* create sampler */
        VkResult err = vkCreateSampler(s_device, &sampler, NULL, &textures[i].sampler);
        assert(err == VK_SUCCESS);

        /* create image view */
        view.image = textures[i].image;
        err = vkCreateImageView(s_device, &view, NULL, &textures[i].view);
        assert(err == VK_SUCCESS);
    }
}

static void demo_prepare_cube_data_buffers()
{
    VkBufferCreateInfo buf_info;
    VkMemoryRequirements mem_reqs;
    VkMemoryAllocateInfo mem_alloc;
    mat4x4 MVP, VP;
    VkResult err = VK_SUCCESS;
    bool pass = false;
    struct vktexcube_vs_uniform data;

    mat4x4_mul(VP, projection_matrix, view_matrix);
    mat4x4_mul(MVP, VP, model_matrix);
    memcpy(data.mvp, MVP, sizeof(MVP));
    //    dumpMatrix("MVP", MVP);

    for (unsigned int i = 0; i < 12 * 3; i++)
    {
        data.position[i][0] = g_vertex_buffer_data[i * 3];
        data.position[i][1] = g_vertex_buffer_data[i * 3 + 1];
        data.position[i][2] = g_vertex_buffer_data[i * 3 + 2];
        data.position[i][3] = 1.0f;
        data.attr[i][0] = g_uv_buffer_data[2 * i];
        data.attr[i][1] = g_uv_buffer_data[2 * i + 1];
        data.attr[i][2] = 0;
        data.attr[i][3] = 0;
    }

    memset(&buf_info, 0, sizeof(buf_info));
    buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
    buf_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
    buf_info.size = sizeof(data);

    for (unsigned int i = 0; i < swapchainImageCount; i++)
    {
        err = vkCreateBuffer(s_device, &buf_info, NULL, &swapchain_image_resources[i].uniform_buffer);
        assert(err == VK_SUCCESS);

        vkGetBufferMemoryRequirements(s_device, swapchain_image_resources[i].uniform_buffer, &mem_reqs);

        mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
        mem_alloc.pNext = NULL;
        mem_alloc.allocationSize = mem_reqs.size;
        mem_alloc.memoryTypeIndex = 0;

        pass = memory_type_from_properties(mem_reqs.memoryTypeBits,
            VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
            &mem_alloc.memoryTypeIndex);
        assert(pass);

        err = vkAllocateMemory(s_device, &mem_alloc, NULL, &swapchain_image_resources[i].uniform_memory);
        assert(err == VK_SUCCESS);

        err = vkMapMemory(s_device, swapchain_image_resources[i].uniform_memory, 0, VK_WHOLE_SIZE, 0,
            &swapchain_image_resources[i].uniform_memory_ptr);
        assert(err == VK_SUCCESS);

        memcpy(swapchain_image_resources[i].uniform_memory_ptr, &data, sizeof data);

        err = vkBindBufferMemory(s_device, swapchain_image_resources[i].uniform_buffer,
            swapchain_image_resources[i].uniform_memory, 0);
        assert(err == VK_SUCCESS);
    }
}

static void demo_prepare_descriptor_layout(void)
{
    const VkDescriptorSetLayoutBinding layout_bindings[2] = {
        [0] =
            {
                .binding = 0,
                .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
                .descriptorCount = 1,
                .stageFlags = VK_SHADER_STAGE_VERTEX_BIT,
                .pImmutableSamplers = NULL,
            },
        [1] =
            {
                .binding = 1,
                .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
                .descriptorCount = DEMO_TEXTURE_COUNT,
                .stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
                .pImmutableSamplers = NULL,
            },
    };
    const VkDescriptorSetLayoutCreateInfo descriptor_layout = {
        .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
        .pNext = NULL,
        .bindingCount = 2,
        .pBindings = layout_bindings,
    };

    VkResult err = vkCreateDescriptorSetLayout(s_device, &descriptor_layout, NULL, &desc_layout);
    assert(err == VK_SUCCESS);

    const VkPipelineLayoutCreateInfo pPipelineLayoutCreateInfo = {
        .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
        .pNext = NULL,
        .setLayoutCount = 1,
        .pSetLayouts = &desc_layout,
    };

    err = vkCreatePipelineLayout(s_device, &pPipelineLayoutCreateInfo, NULL, &pipeline_layout);
    assert(err == VK_SUCCESS);
}

static void demo_prepare_render_pass(void)
{
    // The initial layout for the color and depth attachments will be LAYOUT_UNDEFINED
    // because at the start of the renderpass, we don't care about their contents.
    // At the start of the subpass, the color attachment's layout will be transitioned
    // to LAYOUT_COLOR_ATTACHMENT_OPTIMAL and the depth stencil attachment's layout
    // will be transitioned to LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL.  At the end of
    // the renderpass, the color attachment's layout will be transitioned to
    // LAYOUT_PRESENT_SRC_KHR to be ready to present.  This is all done as part of
    // the renderpass, no barriers are necessary.
    const VkAttachmentDescription attachments[2] = {
        [0] =
            {
                .format = s_format,
                .flags = 0,
                .samples = VK_SAMPLE_COUNT_1_BIT,
                .loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR,
                .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
                .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
                .stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE,
                .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
                .finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
            },
        [1] =
            {
                .format = s_depth.format,
                .flags = 0,
                .samples = VK_SAMPLE_COUNT_1_BIT,
                .loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR,
                .storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE,
                .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
                .stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE,
                .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
                .finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
            },
    };
    const VkAttachmentReference color_reference = {
        .attachment = 0,
        .layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
    };
    const VkAttachmentReference depth_reference = {
        .attachment = 1,
        .layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
    };
    const VkSubpassDescription subpass = {
        .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
        .flags = 0,
        .inputAttachmentCount = 0,
        .pInputAttachments = NULL,
        .colorAttachmentCount = 1,
        .pColorAttachments = &color_reference,
        .pResolveAttachments = NULL,
        .pDepthStencilAttachment = &depth_reference,
        .preserveAttachmentCount = 0,
        .pPreserveAttachments = NULL,
    };

    VkSubpassDependency attachmentDependencies[2] = {
        [0] =
            {
            // Depth buffer is shared between swapchain images
            .srcSubpass = VK_SUBPASS_EXTERNAL,
            .dstSubpass = 0,
            .srcStageMask = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
            .dstStageMask = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
            .srcAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
            .dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
            .dependencyFlags = 0,
        },
    [1] =
        {
            // Image Layout Transition
            .srcSubpass = VK_SUBPASS_EXTERNAL,
            .dstSubpass = 0,
            .srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
            .dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
            .srcAccessMask = 0,
            .dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT,
            .dependencyFlags = 0,
        },
    };

    const VkRenderPassCreateInfo rp_info = {
        .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
        .pNext = NULL,
        .flags = 0,
        .attachmentCount = 2,
        .pAttachments = attachments,
        .subpassCount = 1,
        .pSubpasses = &subpass,
        .dependencyCount = 2,
        .pDependencies = attachmentDependencies,
    };

    VkResult err = vkCreateRenderPass(s_device, &rp_info, NULL, &render_pass);
    assert(err == VK_SUCCESS);
}

static VkShaderModule demo_prepare_shader_module(const uint32_t* code, size_t size)
{
    VkShaderModule module;
    VkShaderModuleCreateInfo moduleCreateInfo;

    moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
    moduleCreateInfo.pNext = NULL;
    moduleCreateInfo.flags = 0;
    moduleCreateInfo.codeSize = size;
    moduleCreateInfo.pCode = code;

    VkResult err = vkCreateShaderModule(s_device, &moduleCreateInfo, NULL, &module);
    assert(err == VK_SUCCESS);

    return module;
}

static void demo_prepare_vs(void)
{
    const uint32_t vs_code[] = {
#include "cube.vert.inc"
    };
    vert_shader_module = demo_prepare_shader_module(vs_code, sizeof(vs_code));
}

static void demo_prepare_fs(void)
{
    const uint32_t fs_code[] = {
#include "cube.frag.inc"
    };
    frag_shader_module = demo_prepare_shader_module(fs_code, sizeof(fs_code));
}

static void demo_prepare_pipeline(void)
{
#define NUM_DYNAMIC_STATES 2 /*Viewport + Scissor*/

    VkGraphicsPipelineCreateInfo pipeline;
    VkPipelineCacheCreateInfo pipelineCache;
    VkPipelineVertexInputStateCreateInfo vi;
    VkPipelineInputAssemblyStateCreateInfo ia;
    VkPipelineRasterizationStateCreateInfo rs;
    VkPipelineColorBlendStateCreateInfo cb;
    VkPipelineDepthStencilStateCreateInfo ds;
    VkPipelineViewportStateCreateInfo vp;
    VkPipelineMultisampleStateCreateInfo ms;
    VkDynamicState dynamicStateEnables[NUM_DYNAMIC_STATES];
    VkPipelineDynamicStateCreateInfo dynamicState;

    memset(dynamicStateEnables, 0, sizeof dynamicStateEnables);
    memset(&dynamicState, 0, sizeof dynamicState);
    dynamicState.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
    dynamicState.pDynamicStates = dynamicStateEnables;

    memset(&pipeline, 0, sizeof(pipeline));
    pipeline.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
    pipeline.layout = pipeline_layout;

    memset(&vi, 0, sizeof(vi));
    vi.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;

    memset(&ia, 0, sizeof(ia));
    ia.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
    ia.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;

    memset(&rs, 0, sizeof(rs));
    rs.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
    rs.polygonMode = VK_POLYGON_MODE_FILL;
    rs.cullMode = VK_CULL_MODE_BACK_BIT;
    rs.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
    rs.depthClampEnable = VK_FALSE;
    rs.rasterizerDiscardEnable = VK_FALSE;
    rs.depthBiasEnable = VK_FALSE;
    rs.lineWidth = 1.0f;

    memset(&cb, 0, sizeof(cb));
    cb.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
    VkPipelineColorBlendAttachmentState att_state[1];
    memset(att_state, 0, sizeof(att_state));
    att_state[0].colorWriteMask = 0xf;
    att_state[0].blendEnable = VK_FALSE;
    cb.attachmentCount = 1;
    cb.pAttachments = att_state;

    memset(&vp, 0, sizeof(vp));
    vp.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
    vp.viewportCount = 1;
    dynamicStateEnables[dynamicState.dynamicStateCount++] = VK_DYNAMIC_STATE_VIEWPORT;
    vp.scissorCount = 1;
    dynamicStateEnables[dynamicState.dynamicStateCount++] = VK_DYNAMIC_STATE_SCISSOR;

    memset(&ds, 0, sizeof(ds));
    ds.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
    ds.depthTestEnable = VK_TRUE;
    ds.depthWriteEnable = VK_TRUE;
    ds.depthCompareOp = VK_COMPARE_OP_LESS_OR_EQUAL;
    ds.depthBoundsTestEnable = VK_FALSE;
    ds.back.failOp = VK_STENCIL_OP_KEEP;
    ds.back.passOp = VK_STENCIL_OP_KEEP;
    ds.back.compareOp = VK_COMPARE_OP_ALWAYS;
    ds.stencilTestEnable = VK_FALSE;
    ds.front = ds.back;

    memset(&ms, 0, sizeof(ms));
    ms.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
    ms.pSampleMask = NULL;
    ms.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;

    demo_prepare_vs();
    demo_prepare_fs();

    // Two stages: vs and fs
    VkPipelineShaderStageCreateInfo shaderStages[2];
    memset(&shaderStages, 0, 2 * sizeof(VkPipelineShaderStageCreateInfo));

    shaderStages[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
    shaderStages[0].stage = VK_SHADER_STAGE_VERTEX_BIT;
    shaderStages[0].module = vert_shader_module;
    shaderStages[0].pName = "main";

    shaderStages[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
    shaderStages[1].stage = VK_SHADER_STAGE_FRAGMENT_BIT;
    shaderStages[1].module = frag_shader_module;
    shaderStages[1].pName = "main";

    memset(&pipelineCache, 0, sizeof(pipelineCache));
    pipelineCache.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;

    VkResult err = vkCreatePipelineCache(s_device, &pipelineCache, NULL, &pipeline_cache);
    assert(err == VK_SUCCESS);

    pipeline.pVertexInputState = &vi;
    pipeline.pInputAssemblyState = &ia;
    pipeline.pRasterizationState = &rs;
    pipeline.pColorBlendState = &cb;
    pipeline.pMultisampleState = &ms;
    pipeline.pViewportState = &vp;
    pipeline.pDepthStencilState = &ds;
    pipeline.stageCount = sizeof(shaderStages) / sizeof(shaderStages[0]);
    pipeline.pStages = shaderStages;
    pipeline.renderPass = render_pass;
    pipeline.pDynamicState = &dynamicState;

    pipeline.renderPass = render_pass;

    err = vkCreateGraphicsPipelines(s_device, pipeline_cache, 1, &pipeline, NULL, &s_pipeline);
    assert(err == VK_SUCCESS);

    vkDestroyShaderModule(s_device, frag_shader_module, NULL);
    vkDestroyShaderModule(s_device, vert_shader_module, NULL);
}

static void demo_prepare_descriptor_pool(void)
{
    const VkDescriptorPoolSize type_counts[2] = {
        [0] =
            {
                .type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
                .descriptorCount = swapchainImageCount,
            },
        [1] =
            {
                .type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
                .descriptorCount = swapchainImageCount * DEMO_TEXTURE_COUNT,
            },
    };
    const VkDescriptorPoolCreateInfo descriptor_pool = {
        .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
        .pNext = NULL,
        .maxSets = swapchainImageCount,
        .poolSizeCount = 2,
        .pPoolSizes = type_counts,
    };

    VkResult err = vkCreateDescriptorPool(s_device, &descriptor_pool, NULL, &desc_pool);
    assert(err == VK_SUCCESS);
}

static void demo_prepare_descriptor_set(void)
{
    VkDescriptorImageInfo tex_descs[DEMO_TEXTURE_COUNT];
    VkWriteDescriptorSet writes[2];

    VkDescriptorSetAllocateInfo alloc_info = { .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
                                              .pNext = NULL,
                                              .descriptorPool = desc_pool,
                                              .descriptorSetCount = 1,
                                              .pSetLayouts = &desc_layout };

    VkDescriptorBufferInfo buffer_info;
    buffer_info.offset = 0;
    buffer_info.range = sizeof(struct vktexcube_vs_uniform);

    memset(&tex_descs, 0, sizeof(tex_descs));
    for (unsigned int i = 0; i < DEMO_TEXTURE_COUNT; i++)
    {
        tex_descs[i].sampler = textures[i].sampler;
        tex_descs[i].imageView = textures[i].view;
        tex_descs[i].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
    }

    memset(&writes, 0, sizeof(writes));

    writes[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
    writes[0].descriptorCount = 1;
    writes[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
    writes[0].pBufferInfo = &buffer_info;

    writes[1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
    writes[1].dstBinding = 1;
    writes[1].descriptorCount = DEMO_TEXTURE_COUNT;
    writes[1].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
    writes[1].pImageInfo = tex_descs;

    for (unsigned int i = 0; i < swapchainImageCount; i++)
    {
        VkResult err = vkAllocateDescriptorSets(s_device, &alloc_info, &swapchain_image_resources[i].descriptor_set);
        assert(err == VK_SUCCESS);
        buffer_info.buffer = swapchain_image_resources[i].uniform_buffer;
        writes[0].dstSet = swapchain_image_resources[i].descriptor_set;
        writes[1].dstSet = swapchain_image_resources[i].descriptor_set;
        vkUpdateDescriptorSets(s_device, 2, writes, 0, NULL);
    }
}

static void demo_prepare_framebuffers(void)
{
    VkImageView attachments[2] = { [1] = s_depth.view };

    const VkFramebufferCreateInfo fb_info = {
        .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
        .pNext = NULL,
        .renderPass = render_pass,
        .attachmentCount = 2,
        .pAttachments = attachments,
        .width = render_width,
        .height = render_height,
        .layers = 1,
    };

    for (uint32_t i = 0; i < swapchainImageCount; i++)
    {
        attachments[0] = swapchain_image_resources[i].view;
        VkResult err = vkCreateFramebuffer(s_device, &fb_info, NULL, &swapchain_image_resources[i].framebuffer);
        assert(err == VK_SUCCESS);
    }
}

static void demo_destroy_texture(struct texture_object* tex_objs)
{
    /* clean up staging resources */
    vkFreeMemory(s_device, tex_objs->mem, NULL);
    if (tex_objs->image) vkDestroyImage(s_device, tex_objs->image, NULL);
    if (tex_objs->buffer) vkDestroyBuffer(s_device, tex_objs->buffer, NULL);
}

static void demo_create_surface(void)
{
    // Create a WSI surface for the window:
    VkWin32SurfaceCreateInfoKHR createInfo;
    createInfo.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR;
    createInfo.pNext = NULL;
    createInfo.flags = 0;
    createInfo.hinstance = connection;
    createInfo.hwnd = window;

    VkResult err = vkCreateWin32SurfaceKHR(s_inst, &createInfo, NULL, &s_surface);
    assert(err == VK_SUCCESS);
}

static void demo_prepare(void)
{
    VkResult err;
    if (cmd_pool == VK_NULL_HANDLE)
    {
        const VkCommandPoolCreateInfo cmd_pool_info = {
            .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
            .pNext = NULL,
            .queueFamilyIndex = graphics_queue_family_index,
            .flags = 0,
        };
        err = vkCreateCommandPool(s_device, &cmd_pool_info, NULL, &cmd_pool);
        assert(err == VK_SUCCESS);
    }

    const VkCommandBufferAllocateInfo cmdAllocInfo = {
        .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
        .pNext = NULL,
        .commandPool = cmd_pool,
        .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
        .commandBufferCount = 1,
    };
    err = vkAllocateCommandBuffers(s_device, &cmdAllocInfo, &cmd_buf);
    assert(err == VK_SUCCESS);
    VkCommandBufferBeginInfo cmd_buf_info = {
        .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
        .pNext = NULL,
        .flags = 0,
        .pInheritanceInfo = NULL,
    };
    err = vkBeginCommandBuffer(cmd_buf, &cmd_buf_info);
    assert(err == VK_SUCCESS);

    demo_prepare_buffers();

    if (is_minimized) {
        prepared = false;
        return;
    }

    demo_prepare_depth();
    demo_prepare_textures();
    demo_prepare_cube_data_buffers();

    demo_prepare_descriptor_layout();
    demo_prepare_render_pass();
    demo_prepare_pipeline();

    for (uint32_t i = 0; i < swapchainImageCount; i++) {
        err = vkAllocateCommandBuffers(s_device, &cmdAllocInfo, &swapchain_image_resources[i].cmd);
        assert(err == VK_SUCCESS);
    }

    if (separate_present_queue)
    {
        const VkCommandPoolCreateInfo present_cmd_pool_info = {
            .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
            .pNext = NULL,
            .queueFamilyIndex = present_queue_family_index,
            .flags = 0,
        };
        err = vkCreateCommandPool(s_device, &present_cmd_pool_info, NULL, &present_cmd_pool);
        assert(err == VK_SUCCESS);
        const VkCommandBufferAllocateInfo present_cmd_info = {
            .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
            .pNext = NULL,
            .commandPool = present_cmd_pool,
            .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
            .commandBufferCount = 1,
        };
        for (uint32_t i = 0; i < swapchainImageCount; i++)
        {
            err = vkAllocateCommandBuffers(s_device, &present_cmd_info,
                &swapchain_image_resources[i].graphics_to_present_cmd);
            assert(err == VK_SUCCESS);
            demo_build_image_ownership_cmd(i);
        }
    }

    demo_prepare_descriptor_pool();
    demo_prepare_descriptor_set();

    demo_prepare_framebuffers();

    for (uint32_t i = 0; i < swapchainImageCount; i++)
    {
        current_buffer = i;
        demo_draw_build_cmd(swapchain_image_resources[i].cmd);
    }

    /*
     * Prepare functions above may generate pipeline commands
     * that need to be flushed before beginning the render loop.
     */
    demo_flush_init_cmd();
    if (staging_texture.buffer) {
        demo_destroy_texture(&staging_texture);
    }

    current_buffer = 0;
    prepared = true;
}

static void demo_cleanup(void)
{
    uint32_t i;

    prepared = false;
    vkDeviceWaitIdle(s_device);

    // Wait for fences from present operations
    for (i = 0; i < FRAME_LAG; i++)
    {
        vkWaitForFences(s_device, 1, &fences[i], VK_TRUE, UINT64_MAX);
        vkDestroyFence(s_device, fences[i], NULL);
        vkDestroySemaphore(s_device, image_acquired_semaphores[i], NULL);
        vkDestroySemaphore(s_device, draw_complete_semaphores[i], NULL);
        if (separate_present_queue) {
            vkDestroySemaphore(s_device, image_ownership_semaphores[i], NULL);
        }
    }

    // If the window is currently minimized, demo_resize has already done some cleanup for us.
    if (!is_minimized)
    {
        for (i = 0; i < swapchainImageCount; i++) {
            vkDestroyFramebuffer(s_device, swapchain_image_resources[i].framebuffer, NULL);
        }
        vkDestroyDescriptorPool(s_device, desc_pool, NULL);

        vkDestroyPipeline(s_device, s_pipeline, NULL);
        vkDestroyPipelineCache(s_device, pipeline_cache, NULL);
        vkDestroyRenderPass(s_device, render_pass, NULL);
        vkDestroyPipelineLayout(s_device, pipeline_layout, NULL);
        vkDestroyDescriptorSetLayout(s_device, desc_layout, NULL);

        for (i = 0; i < DEMO_TEXTURE_COUNT; i++)
        {
            vkDestroyImageView(s_device, textures[i].view, NULL);
            vkDestroyImage(s_device, textures[i].image, NULL);
            vkFreeMemory(s_device, textures[i].mem, NULL);
            vkDestroySampler(s_device, textures[i].sampler, NULL);
        }
        vkDestroySwapchainKHR(s_device, swapchain, NULL);

        vkDestroyImageView(s_device, s_depth.view, NULL);
        vkDestroyImage(s_device, s_depth.image, NULL);
        vkFreeMemory(s_device, s_depth.mem, NULL);

        for (i = 0; i < swapchainImageCount; i++)
        {
            vkDestroyImageView(s_device, swapchain_image_resources[i].view, NULL);
            vkFreeCommandBuffers(s_device, cmd_pool, 1, &swapchain_image_resources[i].cmd);
            vkDestroyBuffer(s_device, swapchain_image_resources[i].uniform_buffer, NULL);
            vkUnmapMemory(s_device, swapchain_image_resources[i].uniform_memory);
            vkFreeMemory(s_device, swapchain_image_resources[i].uniform_memory, NULL);
        }

        vkDestroyCommandPool(s_device, cmd_pool, NULL);

        if (separate_present_queue) {
            vkDestroyCommandPool(s_device, present_cmd_pool, NULL);
        }
    }
    vkDeviceWaitIdle(s_device);
    vkDestroyDevice(s_device, NULL);
    if (s_validate) {
        fpDestroyDebugUtilsMessengerEXT(s_inst, dbg_messenger, NULL);
    }
    vkDestroySurfaceKHR(s_inst, s_surface, NULL);

    vkDestroyInstance(s_inst, NULL);
}

static void demo_resize(void)
{
    uint32_t i;

    // Don't react to resize until after first initialization.
    if (!prepared)
    {
        if (is_minimized) {
            demo_prepare();
        }
        return;
    }
    // In order to properly resize the window, we must re-create the swapchain
    // AND redo the command buffers, etc.
    //
    // First, perform part of the demo_cleanup() function:
    prepared = false;
    vkDeviceWaitIdle(s_device);

    for (i = 0; i < swapchainImageCount; i++) {
        vkDestroyFramebuffer(s_device, swapchain_image_resources[i].framebuffer, NULL);
    }
    vkDestroyDescriptorPool(s_device, desc_pool, NULL);

    vkDestroyPipeline(s_device, s_pipeline, NULL);
    vkDestroyPipelineCache(s_device, pipeline_cache, NULL);
    vkDestroyRenderPass(s_device, render_pass, NULL);
    vkDestroyPipelineLayout(s_device, pipeline_layout, NULL);
    vkDestroyDescriptorSetLayout(s_device, desc_layout, NULL);

    for (i = 0; i < DEMO_TEXTURE_COUNT; i++) {
        vkDestroyImageView(s_device, textures[i].view, NULL);
        vkDestroyImage(s_device, textures[i].image, NULL);
        vkFreeMemory(s_device, textures[i].mem, NULL);
        vkDestroySampler(s_device, textures[i].sampler, NULL);
    }

    vkDestroyImageView(s_device, s_depth.view, NULL);
    vkDestroyImage(s_device, s_depth.image, NULL);
    vkFreeMemory(s_device, s_depth.mem, NULL);

    for (i = 0; i < swapchainImageCount; i++)
    {
        vkDestroyImageView(s_device, swapchain_image_resources[i].view, NULL);
        vkFreeCommandBuffers(s_device, cmd_pool, 1, &swapchain_image_resources[i].cmd);
        vkDestroyBuffer(s_device, swapchain_image_resources[i].uniform_buffer, NULL);
        vkUnmapMemory(s_device, swapchain_image_resources[i].uniform_memory);
        vkFreeMemory(s_device, swapchain_image_resources[i].uniform_memory, NULL);
    }
    vkDestroyCommandPool(s_device, cmd_pool, NULL);
    cmd_pool = VK_NULL_HANDLE;
    if (separate_present_queue) {
        vkDestroyCommandPool(s_device, present_cmd_pool, NULL);
    }
    free(swapchain_image_resources);

    // Second, re-perform the demo_prepare() function, which will re-create the
    // swapchain:
    demo_prepare();
}

static void demo_draw(void)
{
    // Ensure no more than FRAME_LAG renderings are outstanding
    vkWaitForFences(s_device, 1, &fences[frame_index], VK_TRUE, UINT64_MAX);
    vkResetFences(s_device, 1, &fences[frame_index]);

    VkResult err;
    do
    {
        // Get the index of the next available swapchain image:
        err = vkAcquireNextImageKHR(s_device, swapchain, UINT64_MAX,
                image_acquired_semaphores[frame_index], VK_NULL_HANDLE, &current_buffer);

        if (err == VK_ERROR_OUT_OF_DATE_KHR) {
            // demo->swapchain is out of date (e.g. the window was resized) and
            // must be recreated:
            demo_resize();
        }
        else if (err == VK_SUBOPTIMAL_KHR) {
            // demo->swapchain is not as optimal as it could be, but the platform's
            // presentation engine will still present the image correctly.
            break;
        }
        else if (err == VK_ERROR_SURFACE_LOST_KHR)
        {
            vkDestroySurfaceKHR(s_inst, s_surface, NULL);
            demo_create_surface();
            demo_resize();
        }
        else {
            assert(err == VK_SUCCESS);
        }
    } while (err != VK_SUCCESS);

    demo_update_data_buffer();

    // Wait for the image acquired semaphore to be signaled to ensure
    // that the image won't be rendered to until the presentation
    // engine has fully released ownership to the application, and it is
    // okay to render to the image.
    VkPipelineStageFlags pipe_stage_flags;
    VkSubmitInfo submit_info;
    submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
    submit_info.pNext = NULL;
    submit_info.pWaitDstStageMask = &pipe_stage_flags;
    pipe_stage_flags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
    submit_info.waitSemaphoreCount = 1;
    submit_info.pWaitSemaphores = &image_acquired_semaphores[frame_index];
    submit_info.commandBufferCount = 1;
    submit_info.pCommandBuffers = &swapchain_image_resources[current_buffer].cmd;
    submit_info.signalSemaphoreCount = 1;
    submit_info.pSignalSemaphores = &draw_complete_semaphores[frame_index];
    err = vkQueueSubmit(graphics_queue, 1, &submit_info, fences[frame_index]);
    assert(err == VK_SUCCESS);

    if (separate_present_queue)
    {
        // If we are using separate queues, change image ownership to the
        // present queue before presenting, waiting for the draw complete
        // semaphore and signalling the ownership released semaphore when finished
        VkFence nullFence = VK_NULL_HANDLE;
        pipe_stage_flags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
        submit_info.waitSemaphoreCount = 1;
        submit_info.pWaitSemaphores = &draw_complete_semaphores[frame_index];
        submit_info.commandBufferCount = 1;
        submit_info.pCommandBuffers = &swapchain_image_resources[current_buffer].graphics_to_present_cmd;
        submit_info.signalSemaphoreCount = 1;
        submit_info.pSignalSemaphores = &image_ownership_semaphores[frame_index];
        err = vkQueueSubmit(present_queue, 1, &submit_info, nullFence);
        assert(err == VK_SUCCESS);
    }

    // If we are using separate queues we have to wait for image ownership,
    // otherwise wait for draw complete
    VkPresentInfoKHR present = {
        .sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
        .pNext = NULL,
        .waitSemaphoreCount = 1,
        .pWaitSemaphores = (separate_present_queue) ? &image_ownership_semaphores[frame_index]
                                                          : &draw_complete_semaphores[frame_index],
        .swapchainCount = 1,
        .pSwapchains = &swapchain,
        .pImageIndices = &current_buffer,
    };

    VkRectLayerKHR rect;
    VkPresentRegionKHR region;
    VkPresentRegionsKHR regions;
    if (VK_KHR_incremental_present_enabled)
    {
        // If using VK_KHR_incremental_present, we provide a hint of the region
        // that contains changed content relative to the previously-presented
        // image.  The implementation can use this hint in order to save
        // work/power (by only copying the region in the hint).  The
        // implementation is free to ignore the hint though, and so we must
        // ensure that the entire image has the correctly-drawn content.
        uint32_t eighthOfWidth = render_width / 8;
        uint32_t eighthOfHeight = render_height / 8;

        rect.offset.x = eighthOfWidth;
        rect.offset.y = eighthOfHeight;
        rect.extent.width = eighthOfWidth * 6;
        rect.extent.height = eighthOfHeight * 6;
        rect.layer = 0;

        region.rectangleCount = 1;
        region.pRectangles = &rect;

        regions.sType = VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR;
        regions.pNext = present.pNext;
        regions.swapchainCount = present.swapchainCount;
        regions.pRegions = &region;
        present.pNext = &regions;
    }    

    err = vkQueuePresentKHR(present_queue, &present);
    frame_index += 1;
    frame_index %= FRAME_LAG;

    if (err == VK_ERROR_OUT_OF_DATE_KHR) {
        // demo->swapchain is out of date (e.g. the window was resized) and
        // must be recreated:
        demo_resize();
    }
    else if (err == VK_SUBOPTIMAL_KHR) {
        // demo->swapchain is not as optimal as it could be, but the platform's
        // presentation engine will still present the image correctly.
    }
    else if (err == VK_ERROR_SURFACE_LOST_KHR)
    {
        vkDestroySurfaceKHR(s_inst, s_surface, NULL);
        demo_create_surface();
        demo_resize();
    }
    else {
        assert(err == VK_SUCCESS);
    }
}

static void demo_run(void)
{
    if (!prepared) return;

    demo_draw();
    cur_frame++;

    if (frame_count != INT32_MAX && cur_frame == frame_count) {
        PostQuitMessage(validation_error);
    }
}

/*
 * Return 1 (true) if all layer names specified in check_names
 * can be found in given layer properties.
 */
static VkBool32 demo_check_layers(uint32_t check_count, char** check_names, uint32_t layer_count, VkLayerProperties* layers)
{
    for (uint32_t i = 0; i < check_count; i++)
    {
        VkBool32 found = 0;
        for (uint32_t j = 0; j < layer_count; j++)
        {
            if (!strcmp(check_names[i], layers[j].layerName)) {
                found = 1;
                break;
            }
        }
        if (!found) {
            printf("Cannot find layer: %s\n", check_names[i]);
            return 0;
        }
    }
    return 1;
}

static VKAPI_ATTR VkBool32 VKAPI_CALL debug_messenger_callback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
    VkDebugUtilsMessageTypeFlagsEXT messageType,
    const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData,
    void* pUserData)
{
    char prefix[64] = "";
    char* message = (char*)malloc(strlen(pCallbackData->pMessage) + 5000);
    assert(message);
    struct demo* demo = (struct demo*)pUserData;

    if (use_break) {
        DebugBreak();
    }

    if (messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT) {
        strcat(prefix, "VERBOSE : ");
    }
    else if (messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT) {
        strcat(prefix, "INFO : ");
    }
    else if (messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT) {
        strcat(prefix, "WARNING : ");
    }
    else if (messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) {
        strcat(prefix, "ERROR : ");
    }

    if (messageType & VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT) {
        strcat(prefix, "GENERAL");
    }
    else {
        if (messageType & VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT) {
            strcat(prefix, "VALIDATION");
            validation_error = 1;
        }
        if (messageType & VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT) {
            if (messageType & VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT) {
                strcat(prefix, "|");
            }
            strcat(prefix, "PERFORMANCE");
        }
    }

    sprintf(message, "%s - Message Id Number: %d | Message Id Name: %s\n\t%s\n", prefix, pCallbackData->messageIdNumber,
        pCallbackData->pMessageIdName, pCallbackData->pMessage);
    if (pCallbackData->objectCount > 0) {
        char tmp_message[500];
        sprintf(tmp_message, "\n\tObjects - %d\n", pCallbackData->objectCount);
        strcat(message, tmp_message);
        for (uint32_t object = 0; object < pCallbackData->objectCount; ++object) {
            if (NULL != pCallbackData->pObjects[object].pObjectName && strlen(pCallbackData->pObjects[object].pObjectName) > 0) {
                sprintf(tmp_message, "\t\tObject[%d] - %s, Handle %p, Name \"%s\"\n", object,
                    string_VkObjectType(pCallbackData->pObjects[object].objectType),
                    (void*)(pCallbackData->pObjects[object].objectHandle), pCallbackData->pObjects[object].pObjectName);
            }
            else {
                sprintf(tmp_message, "\t\tObject[%d] - %s, Handle %p\n", object,
                    string_VkObjectType(pCallbackData->pObjects[object].objectType),
                    (void*)(pCallbackData->pObjects[object].objectHandle));
            }
            strcat(message, tmp_message);
        }
    }
    if (pCallbackData->cmdBufLabelCount > 0) {
        char tmp_message[500];
        sprintf(tmp_message, "\n\tCommand Buffer Labels - %d\n", pCallbackData->cmdBufLabelCount);
        strcat(message, tmp_message);
        for (uint32_t cmd_buf_label = 0; cmd_buf_label < pCallbackData->cmdBufLabelCount; ++cmd_buf_label) {
            sprintf(tmp_message, "\t\tLabel[%d] - %s { %f, %f, %f, %f}\n", cmd_buf_label,
                pCallbackData->pCmdBufLabels[cmd_buf_label].pLabelName, pCallbackData->pCmdBufLabels[cmd_buf_label].color[0],
                pCallbackData->pCmdBufLabels[cmd_buf_label].color[1], pCallbackData->pCmdBufLabels[cmd_buf_label].color[2],
                pCallbackData->pCmdBufLabels[cmd_buf_label].color[3]);
            strcat(message, tmp_message);
        }
    }

    in_callback = true;
    if (!suppress_popups) MessageBoxA(NULL, message, "Alert", MB_OK);
    in_callback = false;

    free(message);

    // Don't bail out, but keep going.
    return false;
}

static void demo_init_vk(void)
{
    VkResult err;
    uint32_t instance_extension_count = 0;
    uint32_t instance_layer_count = 0;
    char* instance_validation_layers[] = { "VK_LAYER_KHRONOS_validation" };
    enabled_extension_count = 0;
    enabled_layer_count = 0;
    is_minimized = false;
    cmd_pool = VK_NULL_HANDLE;

    // Look for validation layers
    VkBool32 validation_found = 0;
    if (s_validate)
    {
        err = vkEnumerateInstanceLayerProperties(&instance_layer_count, NULL);
        assert(err == VK_SUCCESS);

        if (instance_layer_count > 0)
        {
            VkLayerProperties* instance_layers = malloc(sizeof(VkLayerProperties) * instance_layer_count);
            err = vkEnumerateInstanceLayerProperties(&instance_layer_count, instance_layers);
            assert(err == VK_SUCCESS);

            validation_found = demo_check_layers(sizeof(instance_validation_layers) / sizeof(instance_validation_layers[0]), instance_validation_layers,
                instance_layer_count, instance_layers);
            if (validation_found) {
                enabled_layer_count = sizeof(instance_validation_layers) / sizeof(instance_validation_layers[0]);
                enabled_layers[0] = "VK_LAYER_KHRONOS_validation";
            }
            free(instance_layers);
        }

        if (!validation_found) {
            assert(
                "vkEnumerateInstanceLayerProperties failed to find required validation layer.\n\n"
                "Please look at the Getting Started guide for additional information.\n" == NULL && 
                "vkCreateInstance Failure" == NULL);
        }
    }

    /* Look for instance extensions */
    VkBool32 surfaceExtFound = 0;
    VkBool32 platformSurfaceExtFound = 0;
    memset(extension_names, 0, sizeof(extension_names));

    err = vkEnumerateInstanceExtensionProperties(NULL, &instance_extension_count, NULL);
    assert(err == VK_SUCCESS);

    if (instance_extension_count > 0)
    {
        VkExtensionProperties* instance_extensions = malloc(sizeof(VkExtensionProperties) * instance_extension_count);
        err = vkEnumerateInstanceExtensionProperties(NULL, &instance_extension_count, instance_extensions);
        assert(err == VK_SUCCESS);
        for (uint32_t i = 0; i < instance_extension_count; i++)
        {
            if (!strcmp(VK_KHR_SURFACE_EXTENSION_NAME, instance_extensions[i].extensionName)) {
                surfaceExtFound = 1;
                extension_names[enabled_extension_count++] = VK_KHR_SURFACE_EXTENSION_NAME;
            }
            if (!strcmp(VK_KHR_WIN32_SURFACE_EXTENSION_NAME, instance_extensions[i].extensionName)) {
                platformSurfaceExtFound = 1;
                extension_names[enabled_extension_count++] = VK_KHR_WIN32_SURFACE_EXTENSION_NAME;
            }
            if (!strcmp(VK_EXT_DEBUG_UTILS_EXTENSION_NAME, instance_extensions[i].extensionName)) {
                if (s_validate) {
                    extension_names[enabled_extension_count++] = VK_EXT_DEBUG_UTILS_EXTENSION_NAME;
                }
            }
            assert(enabled_extension_count < 64);
        }

        free(instance_extensions);
    }

    if (!surfaceExtFound) {
        assert("vkEnumerateInstanceExtensionProperties failed to find the " VK_KHR_SURFACE_EXTENSION_NAME
            " extension.\n\n"
            "Do you have a compatible Vulkan installable client driver (ICD) installed?\n"
            "Please look at the Getting Started guide for additional information.\n" == NULL &&
            "vkCreateInstance Failure" == NULL);
    }
    if (!platformSurfaceExtFound) {
        assert("vkEnumerateInstanceExtensionProperties failed to find the " VK_KHR_WIN32_SURFACE_EXTENSION_NAME
            " extension.\n\n"
            "Do you have a compatible Vulkan installable client driver (ICD) installed?\n"
            "Please look at the Getting Started guide for additional information.\n" == NULL &&
            "vkCreateInstance Failure" == NULL);
    }
    const VkApplicationInfo app = {
        .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
        .pNext = NULL,
        .pApplicationName = APP_SHORT_NAME,
        .applicationVersion = 0,
        .pEngineName = APP_SHORT_NAME,
        .engineVersion = 0,
        .apiVersion = VK_API_VERSION_1_0,
    };
    VkInstanceCreateInfo inst_info = {
        .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
        .pNext = NULL,
        .pApplicationInfo = &app,
        .enabledLayerCount = enabled_layer_count,
        .ppEnabledLayerNames = (const char* const*)instance_validation_layers,
        .enabledExtensionCount = enabled_extension_count,
        .ppEnabledExtensionNames = (const char* const*)extension_names,
    };

    /*
     * This is info for a temp callback to use during CreateInstance.
     * After the instance is created, we use the instance-based
     * function to register the final callback.
     */
    VkDebugUtilsMessengerCreateInfoEXT dbg_messenger_create_info;
    if (s_validate)
    {
        // VK_EXT_debug_utils style
        dbg_messenger_create_info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT;
        dbg_messenger_create_info.pNext = NULL;
        dbg_messenger_create_info.flags = 0;
        dbg_messenger_create_info.messageSeverity =
            VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT;
        dbg_messenger_create_info.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
            VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT |
            VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT;
        dbg_messenger_create_info.pfnUserCallback = debug_messenger_callback;
        inst_info.pNext = &dbg_messenger_create_info;
    }

    uint32_t gpu_count;

    err = vkCreateInstance(&inst_info, NULL, &s_inst);
    if (err == VK_ERROR_INCOMPATIBLE_DRIVER) {
        assert(
            "Cannot find a compatible Vulkan installable client driver (ICD).\n\n"
            "Please look at the Getting Started guide for additional information.\n" == NULL &&
            "vkCreateInstance Failure" == NULL);
    }
    else if (err == VK_ERROR_EXTENSION_NOT_PRESENT) {
        assert(
            "Cannot find a specified extension library.\n"
            "Make sure your layers path is set appropriately.\n" == NULL &&
            "vkCreateInstance Failure" == NULL);
    }
    else if (err) {
        assert(
            "vkCreateInstance failed.\n\n"
            "Do you have a compatible Vulkan installable client driver (ICD) installed?\n"
            "Please look at the Getting Started guide for additional information.\n" == NULL &&
            "vkCreateInstance Failure" == NULL);
    }

    /* Make initial call to query gpu_count, then second call for gpu info*/
    err = vkEnumeratePhysicalDevices(s_inst, &gpu_count, NULL);
    assert(err == VK_SUCCESS);

    if (gpu_count > 0)
    {
        gpu_count = min(gpu_count, MAX_GPU_COUNT);

        VkPhysicalDevice physical_devices[MAX_GPU_COUNT];
        err = vkEnumeratePhysicalDevices(s_inst, &gpu_count, physical_devices);
        assert(err == VK_SUCCESS);
        if (gpu_number > gpu_count - 1)
        {
            printf("Gpu %u specified is not present, gpu count = %u\n", gpu_number, gpu_count);
            printf("Continuing with gpu 0\n");
            gpu_number = 0;
        }
        s_gpu = physical_devices[gpu_number];
    }
    else {
        assert(
            "vkEnumeratePhysicalDevices reported zero accessible devices.\n\n"
            "Do you have a compatible Vulkan installable client driver (ICD) installed?\n"
            "Please look at the Getting Started guide for additional information.\n" == NULL &&
            "vkEnumeratePhysicalDevices Failure" == NULL);
    }

    /* Look for device extensions */
    uint32_t device_extension_count = 0;
    VkBool32 swapchainExtFound = 0;
    enabled_extension_count = 0;
    memset(extension_names, 0, sizeof(extension_names));

    err = vkEnumerateDeviceExtensionProperties(s_gpu, NULL, &device_extension_count, NULL);
    assert(err == VK_SUCCESS);

    if (device_extension_count > 0)
    {
        device_extension_count = min(device_extension_count, MAX_DEVICE_EXTENSION_COUNT);
        VkExtensionProperties device_extensions[MAX_DEVICE_EXTENSION_COUNT];
        err = vkEnumerateDeviceExtensionProperties(s_gpu, NULL, &device_extension_count, device_extensions);
        assert(err == VK_SUCCESS);

        for (uint32_t i = 0; i < device_extension_count; i++) {
            if (strcmp(VK_KHR_SWAPCHAIN_EXTENSION_NAME, device_extensions[i].extensionName) == 0) {
                swapchainExtFound = 1;
                extension_names[enabled_extension_count++] = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
            }
            assert(enabled_extension_count < 64);
        }

        if (VK_KHR_incremental_present_enabled) {
            // Even though the user "enabled" the extension via the command
            // line, we must make sure that it's enumerated for use with the
            // device.  Therefore, disable it here, and re-enable it again if
            // enumerated.
            VK_KHR_incremental_present_enabled = false;
            for (uint32_t i = 0; i < device_extension_count; i++)
            {
                if (!strcmp(VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME, device_extensions[i].extensionName))
                {
                    extension_names[enabled_extension_count++] = VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME;
                    VK_KHR_incremental_present_enabled = true;
                    DbgMsg("VK_KHR_incremental_present extension enabled\n");
                }
                assert(enabled_extension_count < 64);
            }
            if (!VK_KHR_incremental_present_enabled) {
                DbgMsg("VK_KHR_incremental_present extension NOT AVAILABLE\n");
            }
        }
    }

    if (!swapchainExtFound) {
        assert("vkEnumerateDeviceExtensionProperties failed to find the " VK_KHR_SWAPCHAIN_EXTENSION_NAME
            " extension.\n\nDo you have a compatible Vulkan installable client driver (ICD) installed?\n"
            "Please look at the Getting Started guide for additional information.\n" == NULL && 
            "vkCreateInstance Failure" == NULL);
    }

    if (s_validate)
    {
        // Setup VK_EXT_debug_utils function pointers always (we use them for
        // debug labels and names).
        fpCreateDebugUtilsMessengerEXT =
            (PFN_vkCreateDebugUtilsMessengerEXT)vkGetInstanceProcAddr(s_inst, "vkCreateDebugUtilsMessengerEXT");
        fpDestroyDebugUtilsMessengerEXT =
            (PFN_vkDestroyDebugUtilsMessengerEXT)vkGetInstanceProcAddr(s_inst, "vkDestroyDebugUtilsMessengerEXT");
        fpSubmitDebugUtilsMessageEXT =
            (PFN_vkSubmitDebugUtilsMessageEXT)vkGetInstanceProcAddr(s_inst, "vkSubmitDebugUtilsMessageEXT");
        fpCmdBeginDebugUtilsLabelEXT =
            (PFN_vkCmdBeginDebugUtilsLabelEXT)vkGetInstanceProcAddr(s_inst, "vkCmdBeginDebugUtilsLabelEXT");
        fpCmdEndDebugUtilsLabelEXT =
            (PFN_vkCmdEndDebugUtilsLabelEXT)vkGetInstanceProcAddr(s_inst, "vkCmdEndDebugUtilsLabelEXT");
        fpCmdInsertDebugUtilsLabelEXT =
            (PFN_vkCmdInsertDebugUtilsLabelEXT)vkGetInstanceProcAddr(s_inst, "vkCmdInsertDebugUtilsLabelEXT");
        fpSetDebugUtilsObjectNameEXT =
            (PFN_vkSetDebugUtilsObjectNameEXT)vkGetInstanceProcAddr(s_inst, "vkSetDebugUtilsObjectNameEXT");
        if (NULL == fpCreateDebugUtilsMessengerEXT || NULL == fpDestroyDebugUtilsMessengerEXT ||
            NULL == fpSubmitDebugUtilsMessageEXT || NULL == fpCmdBeginDebugUtilsLabelEXT ||
            NULL == fpCmdEndDebugUtilsLabelEXT || NULL == fpCmdInsertDebugUtilsLabelEXT ||
            NULL == fpSetDebugUtilsObjectNameEXT) {
            assert("GetProcAddr: Failed to init VK_EXT_debug_utils\n" == NULL && "GetProcAddr: Failure" == NULL);
        }

        err = fpCreateDebugUtilsMessengerEXT(s_inst, &dbg_messenger_create_info, NULL, &dbg_messenger);
        switch (err)
        {
        case VK_SUCCESS:
            break;
        case VK_ERROR_OUT_OF_HOST_MEMORY:
            assert("CreateDebugUtilsMessengerEXT: out of host memory\n" == NULL &&
                "CreateDebugUtilsMessengerEXT Failure" == NULL);
            break;
        default:
            assert("CreateDebugUtilsMessengerEXT: unknown failure\n" == NULL &&
                "CreateDebugUtilsMessengerEXT Failure" == NULL);
            break;
        }
    }
    vkGetPhysicalDeviceProperties(s_gpu, &gpu_props);

    /* Call with NULL data to get count */
    vkGetPhysicalDeviceQueueFamilyProperties(s_gpu, &queue_family_count, NULL);
    assert(queue_family_count >= 1);
    if (queue_family_count > MAX_QUEUE_FAMILY_COUNT) {
        queue_family_count = MAX_QUEUE_FAMILY_COUNT;
    }

    vkGetPhysicalDeviceQueueFamilyProperties(s_gpu, &queue_family_count, queue_props);

    // Query fine-grained feature support for this device.
    //  If app has specific feature requirements it should check supported
    //  features based on this query
    VkPhysicalDeviceFeatures physDevFeatures;
    vkGetPhysicalDeviceFeatures(s_gpu, &physDevFeatures);
}

static void demo_create_device(void)
{
    float queue_priorities[1] = { 0.0 };
    VkDeviceQueueCreateInfo queues[2];
    queues[0].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
    queues[0].pNext = NULL;
    queues[0].queueFamilyIndex = graphics_queue_family_index;
    queues[0].queueCount = 1;
    queues[0].pQueuePriorities = queue_priorities;
    queues[0].flags = 0;

    VkDeviceCreateInfo device = {
        .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
        .pNext = NULL,
        .queueCreateInfoCount = 1,
        .pQueueCreateInfos = queues,
        .enabledLayerCount = 0,
        .ppEnabledLayerNames = NULL,
        .enabledExtensionCount = enabled_extension_count,
        .ppEnabledExtensionNames = (const char* const*)extension_names,
        .pEnabledFeatures = NULL,  // If specific features are required, pass them in here
    };
    if (separate_present_queue)
    {
        queues[1].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
        queues[1].pNext = NULL;
        queues[1].queueFamilyIndex = present_queue_family_index;
        queues[1].queueCount = 1;
        queues[1].pQueuePriorities = queue_priorities;
        queues[1].flags = 0;
        device.queueCreateInfoCount = 2;
    }
    VkResult err = vkCreateDevice(s_gpu, &device, NULL, &s_device);
    assert(err == VK_SUCCESS);
}

static VkSurfaceFormatKHR pick_surface_format(const VkSurfaceFormatKHR* surfaceFormats, uint32_t count)
{
    // Prefer non-SRGB formats...
    for (uint32_t i = 0; i < count; i++)
    {
        const VkFormat format = surfaceFormats[i].format;

        if (format == VK_FORMAT_R8G8B8A8_UNORM || format == VK_FORMAT_B8G8R8A8_UNORM ||
            format == VK_FORMAT_A2B10G10R10_UNORM_PACK32 || format == VK_FORMAT_A2R10G10B10_UNORM_PACK32 ||
            format == VK_FORMAT_R16G16B16A16_SFLOAT) {
            return surfaceFormats[i];
        }
    }

    printf("Can't find our preferred formats... Falling back to first exposed format. Rendering may be incorrect.\n");

    assert(count >= 1);
    return surfaceFormats[0];
}

static void demo_init_vk_swapchain(void)
{
    demo_create_surface();

    // Iterate over each queue to learn whether it supports presenting:
    VkBool32 supportsPresent[MAX_QUEUE_FAMILY_COUNT];
    for (uint32_t i = 0; i < queue_family_count; i++) {
        vkGetPhysicalDeviceSurfaceSupportKHR(s_gpu, i, s_surface, &supportsPresent[i]);
    }

    // Search for a graphics and a present queue in the array of queue
    // families, try to find one that supports both
    uint32_t graphicsQueueFamilyIndex = UINT32_MAX;
    uint32_t presentQueueFamilyIndex = UINT32_MAX;
    for (uint32_t i = 0; i < queue_family_count; i++)
    {
        if ((queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) != 0) {
            if (graphicsQueueFamilyIndex == UINT32_MAX) {
                graphicsQueueFamilyIndex = i;
            }

            if (supportsPresent[i] == VK_TRUE) {
                graphicsQueueFamilyIndex = i;
                presentQueueFamilyIndex = i;
                break;
            }
        }
    }

    if (presentQueueFamilyIndex == UINT32_MAX)
    {
        // If didn't find a queue that supports both graphics and present, then
        // find a separate present queue.
        for (uint32_t i = 0; i < queue_family_count; ++i)
        {
            if (supportsPresent[i] == VK_TRUE) {
                presentQueueFamilyIndex = i;
                break;
            }
        }
    }

    // Generate error if could not find both a graphics and a present queue
    if (graphicsQueueFamilyIndex == UINT32_MAX || presentQueueFamilyIndex == UINT32_MAX) {
        assert("Could not find both graphics and present queues\n" == NULL && 
            "Swapchain Initialization Failure" == NULL);
    }

    graphics_queue_family_index = graphicsQueueFamilyIndex;
    present_queue_family_index = presentQueueFamilyIndex;
    separate_present_queue = (graphics_queue_family_index != present_queue_family_index);

    demo_create_device();

    vkGetDeviceQueue(s_device, graphics_queue_family_index, 0, &graphics_queue);

    if (!separate_present_queue) {
        present_queue = graphics_queue;
    }
    else {
        vkGetDeviceQueue(s_device, present_queue_family_index, 0, &present_queue);
    }

    // Get the list of VkFormat's that are supported:
    uint32_t formatCount;
    VkResult err = vkGetPhysicalDeviceSurfaceFormatsKHR(s_gpu, s_surface, &formatCount, NULL);
    assert(err == VK_SUCCESS);

    formatCount = min(formatCount, MAX_FORMAT_COUNT);
    VkSurfaceFormatKHR surfFormats[MAX_FORMAT_COUNT];
    err = vkGetPhysicalDeviceSurfaceFormatsKHR(s_gpu, s_surface, &formatCount, surfFormats);
    assert(err == VK_SUCCESS);
    VkSurfaceFormatKHR surfaceFormat = pick_surface_format(surfFormats, formatCount);
    s_format = surfaceFormat.format;
    color_space = surfaceFormat.colorSpace;

    quit = false;
    cur_frame = 0;

    // Create semaphores to synchronize acquiring presentable buffers before
    // rendering and waiting for drawing to be complete before presenting
    VkSemaphoreCreateInfo semaphoreCreateInfo = {
        .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
        .pNext = NULL,
        .flags = 0,
    };

    // Create fences that we can use to throttle if we get too far
    // ahead of the image presents
    VkFenceCreateInfo fence_ci = {
        .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, .pNext = NULL, .flags = VK_FENCE_CREATE_SIGNALED_BIT };
    for (uint32_t i = 0; i < FRAME_LAG; i++) {
        err = vkCreateFence(s_device, &fence_ci, NULL, &fences[i]);
        assert(err == VK_SUCCESS);

        err = vkCreateSemaphore(s_device, &semaphoreCreateInfo, NULL, &image_acquired_semaphores[i]);
        assert(err == VK_SUCCESS);

        err = vkCreateSemaphore(s_device, &semaphoreCreateInfo, NULL, &draw_complete_semaphores[i]);
        assert(err == VK_SUCCESS);

        if (separate_present_queue) {
            err = vkCreateSemaphore(s_device, &semaphoreCreateInfo, NULL, &image_ownership_semaphores[i]);
            assert(err == VK_SUCCESS);
        }
    }
    frame_index = 0;

    // Get Memory information and properties
    vkGetPhysicalDeviceMemoryProperties(s_gpu, &memory_properties);
}

static void demo_init(int windowWidth, int windowHeight)
{
    vec3 eye = { 0.0f, 3.0f, 5.0f };
    vec3 origin = { 0, 0, 0 };
    vec3 up = { 0.0f, 1.0f, 0.0 };

    present_mode = VK_PRESENT_MODE_FIFO_KHR;
    frame_count = INT32_MAX;
    /* For cube demo we just grab the first physical device by default */
    gpu_number = 0;

    demo_init_vk();

    render_width = windowWidth;
    render_height = windowHeight;

    spin_angle = 1.0f;
    spin_increment = 0.1f;
    pause = false;

    mat4x4_perspective(projection_matrix, (float)degreesToRadians(45.0f), 1.0f, 0.1f, 100.0f);
    mat4x4_look_at(view_matrix, eye, origin, up);
    mat4x4_identity(model_matrix);

    projection_matrix[1][1] *= -1;  // Flip projection matrix from GL to Vulkan orientation.
}

static LRESULT CALLBACK WndProc(HWND hWnd, UINT uMsg, WPARAM wParam, LPARAM lParam)
{
    switch (uMsg)
    {
    case WM_CREATE:
    {
        RECT windowRect;
        GetWindowRect(hWnd, &windowRect);
        break;
    }

    case WM_CLOSE:
        PostQuitMessage(0);
        break;

    case WM_PAINT:
        if (!in_callback) {
            demo_run();
        }
        break;

    case WM_GETMINMAXINFO:  // set window's minimum size
        ((MINMAXINFO*)lParam)->ptMinTrackSize = minsize;
        return 0;

    case WM_ERASEBKGND:
        return 1;

    case WM_SIZE:
        // Resize the application to the new window size, except when
        // it was minimized. Vulkan doesn't support images or swapchains
        // with width=0 and height=0.
        if (wParam != SIZE_MINIMIZED)
        {
            render_width = lParam & 0xffff;
            render_height = (lParam & 0xffff0000) >> 16;
            demo_resize();
        }
        break;

    case WM_KEYDOWN:
        switch (wParam)
        {
        case VK_ESCAPE:
            PostQuitMessage(validation_error);
            break;
        case VK_LEFT:
            spin_angle -= spin_increment;
            break;
        case VK_RIGHT:
            spin_angle += spin_increment;
            break;
        case VK_SPACE:
            pause = !pause;
            break;
        }
        return 0;

    default:
        break;
    }

    return DefWindowProc(hWnd, uMsg, wParam, lParam);
}

static void InitializeWindow(LPCWSTR appName, int windowWidth, int windowHeight)
{
    connection = GetModuleHandle(NULL);
    WNDCLASSEX win_class;

    // Initialize the window class structure:
    win_class.cbSize = sizeof(WNDCLASSEX);
    win_class.style = CS_HREDRAW | CS_VREDRAW;
    win_class.lpfnWndProc = WndProc;
    win_class.cbClsExtra = 0;
    win_class.cbWndExtra = 0;
    win_class.hInstance = connection;  // hInstance
    win_class.hIcon = LoadIcon(NULL, IDI_APPLICATION);
    win_class.hCursor = LoadCursor(NULL, IDC_ARROW);
    win_class.hbrBackground = (HBRUSH)GetStockObject(WHITE_BRUSH);
    win_class.lpszMenuName = NULL;
    win_class.lpszClassName = appName;
    win_class.hIconSm = LoadIcon(NULL, IDI_WINLOGO);
    // Register window class:
    if (!RegisterClassEx(&win_class))
    {
        // It didn't work, so try to give a useful error:
        printf("Unexpected error trying to start the application!\n");
        fflush(stdout);
        exit(1);
    }
    // Create window with the registered class:
    RECT wr = { 0, 0, windowWidth, windowHeight };
    AdjustWindowRect(&wr, WS_OVERLAPPEDWINDOW, FALSE);

    const LONG windowStyle = WS_OVERLAPPEDWINDOW | WS_VISIBLE | WS_SYSMENU;

    window = CreateWindowEx(
        0,                              // extra style
        appName,                        // class name
        appName,                        // app name
        windowStyle,                    // window style
        CW_USEDEFAULT, CW_USEDEFAULT,   // x, y coords
        windowWidth,                    // width
        windowHeight,                    // height
        NULL,                           // handle to parent
        NULL,                           // handle to menu
        connection,                     // hInstance
        NULL);

    if (window == NULL) {
        // It didn't work, so try to give a useful error:
        puts("Cannot create a window in which to draw!");
    }

    // Window client area size must be at least 1 pixel high, to prevent crash.
    minsize.x = GetSystemMetrics(SM_CXMINTRACK);
    minsize.y = GetSystemMetrics(SM_CYMINTRACK) + 1;
}

int main(int argc, const char* argv[])
{
    const int windowWidth = 512, windowHeight = 512;

    InitializeWindow(APP_SHORT_NAME_W, windowWidth, windowHeight);

    // Initialize Vulkan Assets
    demo_init(windowWidth, windowHeight);
    demo_init_vk_swapchain();

    demo_prepare();

    // initialize loop condition variable
    bool done = false;

    // main message loop
    MSG msg;
    while (!done)
    {
        PeekMessage(&msg, NULL, 0, 0, PM_REMOVE);
        if (msg.message == WM_QUIT)  // check for a quit message
        {
            demo_cleanup();
            done = true;  // if found, quit app
        }
        else {
            /* Translate and dispatch to event queue*/
            TranslateMessage(&msg);
            DispatchMessage(&msg);
        }
        RedrawWindow(window, NULL, NULL, RDW_INTERNALPAINT);
    }
    
    if (window != NULL)
    {
        DestroyWindow(window);
        window = NULL;
    }
}

