#include "stb_image_load.h"

//#ifdef STB_IMAGE_IMPLEMENTATION

#if defined(STBI_ONLY_JPEG) || defined(STBI_ONLY_PNG) || defined(STBI_ONLY_BMP) || defined(STBI_ONLY_TGA) || defined(STBI_ONLY_GIF) || defined(STBI_ONLY_PSD) || defined(STBI_ONLY_HDR) \
	|| defined(STBI_ONLY_PIC) || defined(STBI_ONLY_PNM) || defined(STBI_ONLY_ZLIB)
#ifndef STBI_ONLY_JPEG
#define STBI_NO_JPEG
#endif
#ifndef STBI_ONLY_PNG
#define STBI_NO_PNG
#endif
#ifndef STBI_ONLY_BMP
#define STBI_NO_BMP
#endif
#ifndef STBI_ONLY_PSD
#define STBI_NO_PSD
#endif
#ifndef STBI_ONLY_TGA
#define STBI_NO_TGA
#endif
#ifndef STBI_ONLY_GIF
#define STBI_NO_GIF
#endif
#ifndef STBI_ONLY_HDR
#define STBI_NO_HDR
#endif
#ifndef STBI_ONLY_PIC
#define STBI_NO_PIC
#endif
#ifndef STBI_ONLY_PNM
#define STBI_NO_PNM
#endif
#endif

#if defined(STBI_NO_PNG) && !defined(STBI_SUPPORT_ZLIB) && !defined(STBI_NO_ZLIB)
#define STBI_NO_ZLIB
#endif

#include <stdarg.h>
#include <stddef.h> // ptrdiff_t on osx
#include <stdlib.h>
#include <string.h>
#include <limits.h>

#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR)
#include <math.h> // ldexp, pow
#endif

#ifndef STBI_NO_STDIO
#include <stdio.h>
#endif

#ifndef STBI_ASSERT
#include <assert.h>
#define STBI_ASSERT(x) assert(x)
#endif

#ifdef __cplusplus
#define STBI_EXTERN extern "C"
#else
#define STBI_EXTERN extern
#endif

#ifndef _MSC_VER
#ifdef __cplusplus
#define stbi_inline inline
#else
#define stbi_inline
#endif
#else
#define stbi_inline __forceinline
#endif

#ifndef STBI_NO_THREAD_LOCALS
#if defined(__cplusplus) && __cplusplus >= 201103L
#define STBI_THREAD_LOCAL thread_local
#elif defined(__GNUC__) && __GNUC__ < 5
#define STBI_THREAD_LOCAL __thread
#elif defined(_MSC_VER)
#define STBI_THREAD_LOCAL __declspec(thread)
#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_THREADS__)
#define STBI_THREAD_LOCAL _Thread_local
#endif

#ifndef STBI_THREAD_LOCAL
#if defined(__GNUC__)
#define STBI_THREAD_LOCAL __thread
#endif
#endif
#endif

#ifdef _MSC_VER
typedef unsigned short __stbi_uint16;
typedef signed short __stbi_int16;
typedef unsigned int __stbi_uint32;
typedef signed int __stbi_int32;
#else
#include <stdint.h>
typedef uint16_t __stbi_uint16;
typedef int16_t __stbi_int16;
typedef uint32_t __stbi_uint32;
typedef int32_t __stbi_int32;
#endif

// should produce compiler error if size is wrong
typedef unsigned char validate_uint32[sizeof(__stbi_uint32) == 4 ? 1 : -1];

#ifdef _MSC_VER
#define STBI_NOTUSED(v) (void)(v)
#else
#define STBI_NOTUSED(v) (void)sizeof(v)
#endif

#ifdef _MSC_VER
#define STBI_HAS_LROTL
#endif

#ifdef STBI_HAS_LROTL
#define stbi_lrot(x, y) _lrotl(x, y)
#else
#define stbi_lrot(x, y) (((x) << (y)) | ((x) >> (32 - (y))))
#endif

#if defined(STBI_MALLOC) && defined(STBI_FREE) && (defined(STBI_REALLOC) || defined(STBI_REALLOC_SIZED))
// ok
#elif !defined(STBI_MALLOC) && !defined(STBI_FREE) && !defined(STBI_REALLOC) && !defined(STBI_REALLOC_SIZED)
// ok
#else
#error "must define all or none of STBI_MALLOC, STBI_FREE, and STBI_REALLOC (or STBI_REALLOC_SIZED)."
#endif

#ifndef STBI_MALLOC
#define STBI_MALLOC(sz) malloc(sz)
#define STBI_REALLOC(p, newsz) realloc(p, newsz)
#define STBI_FREE(p) free(p)
#endif

#ifndef STBI_REALLOC_SIZED
#define STBI_REALLOC_SIZED(p, oldsz, newsz) STBI_REALLOC(p, newsz)
#endif

// x86/x64 detection
#if defined(__x86_64__) || defined(_M_X64)
#define STBI__X64_TARGET
#elif defined(__i386) || defined(_M_IX86)
#define STBI__X86_TARGET
#endif

#if defined(__GNUC__) && defined(STBI__X86_TARGET) && !defined(__SSE2__) && !defined(STBI_NO_SIMD)
// gcc doesn't support sse2 intrinsics unless you compile with -msse2,
// which in turn means it Gets to use SSE2 everywhere. This is unfortunate,
// but previous attempts to provide the SSE2 functions with runtime
// detection caused numerous issues. The way architecture extensions are
// exposed in GCC/Clang is, sadly, not really suited for one-file libs.
// New behavior: if compiled with -msse2, we use SSE2 without any
// detection; if not, we don't use it at all.
#define STBI_NO_SIMD
#endif

#if defined(__MINGW32__) && defined(STBI__X86_TARGET) && !defined(STBI_MINGW_ENABLE_SSE2) && !defined(STBI_NO_SIMD)
// Note that __MINGW32__ doesn't actually mean 32-bit, so we have to avoid STBI__X64_TARGET
//
// 32-bit MinGW wants ESP to be 16-byte aligned, but this is not in the
// Windows ABI and VC++ as well as Windows DLLs don't maintain that invariant.
// As a result, enabling SSE2 on 32-bit MinGW is dangerous when not
// simultaneously enabling "-mstackrealign".
//
// See https://github.com/nothings/stb/issues/81 for more information.
//
// So default to no SSE2 on 32-bit MinGW. If you've read this far and added
// -mstackrealign to your build settings, feel free to #define STBI_MINGW_ENABLE_SSE2.
#define STBI_NO_SIMD
#endif

#if !defined(STBI_NO_SIMD) && (defined(STBI__X86_TARGET) || defined(STBI__X64_TARGET))
#define STBI_SSE2
#include <emmintrin.h>

#ifdef _MSC_VER

#if _MSC_VER >= 1400 // not VC6
#include <intrin.h> // __cpuid
static int __stbi_cpuid3(void)
{
	int info[4];
	__cpuid(info, 1);
	return info[3];
}
#else
static int __stbi_cpuid3(void)
{
	int res;
	__asm {
		mov  eax, 1
		cpuid
		mov  res, edx
	}
	return res;
}
#endif

#define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name

#if !defined(STBI_NO_JPEG) && defined(STBI_SSE2)
static int __stbi_sse2_available(void)
{
	int info3 = __stbi_cpuid3();
	return ((info3 >> 26) & 1) != 0;
}
#endif

#else // assume GCC-style if not VC++
#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16)))

#if !defined(STBI_NO_JPEG) && defined(STBI_SSE2)
static int __stbi_sse2_available(void)
{
	// If we're even attempting to compile this on GCC/Clang, that means
	// -msse2 is on, which means the compiler is allowed to use SSE2
	// instructions at will, and so are we.
	return 1;
}
#endif

#endif
#endif

// ARM NEON
#if defined(STBI_NO_SIMD) && defined(STBI_NEON)
#undef STBI_NEON
#endif

#ifdef STBI_NEON
#include <arm_neon.h>
// assume GCC or Clang on ARM targets
#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16)))
#endif

#ifndef STBI_SIMD_ALIGN
#define STBI_SIMD_ALIGN(type, name) type name
#endif

#ifndef STBI_MAX_DIMENSIONS
#define STBI_MAX_DIMENSIONS (1 << 24)
#endif

///////////////////////////////////////////////
//
//  __stbi_context struct and start_xxx functions

// __stbi_context structure is our basic context used by all images, so it
// contains all the IO context, plus some basic image information
typedef struct
{
	__stbi_uint32 img_x, img_y;
	int img_n, img_out_n;

	stbi_io_callbacks io;
	void* io_user_data;

	int read_from_callbacks;
	int buflen;
	stbi_uc buffer_start[128];
	int callback_already_read;

	stbi_uc *img_buffer, *img_buffer_end;
	stbi_uc *img_buffer_original, *img_buffer_original_end;
} __stbi_context;

static void __stbi_refill_buffer(__stbi_context* s);

// Initialize a memory-decode context
static void __stbi_start_mem(__stbi_context* s, stbi_uc const* buffer, int len)
{
	s->io.read = NULL;
	s->read_from_callbacks = 0;
	s->callback_already_read = 0;
	s->img_buffer = s->img_buffer_original = (stbi_uc*)buffer;
	s->img_buffer_end = s->img_buffer_original_end = (stbi_uc*)buffer + len;
}

// Initialize a callback-based context
static void __stbi_start_callbacks(__stbi_context* s, stbi_io_callbacks* c, void* user)
{
	s->io = *c;
	s->io_user_data = user;
	s->buflen = sizeof(s->buffer_start);
	s->read_from_callbacks = 1;
	s->callback_already_read = 0;
	s->img_buffer = s->img_buffer_original = s->buffer_start;
	__stbi_refill_buffer(s);
	s->img_buffer_original_end = s->img_buffer_end;
}

#ifndef STBI_NO_STDIO

static int __stbi_stdio_read(void* user, char* value, int size)
{
	return (int)fread(value, 1, size, (FILE*)user);
}

static void __stbi_stdio_skip(void* user, int n)
{
	int ch;
	fseek((FILE*)user, n, SEEK_CUR);
	ch = fgetc((FILE*)user); /* have to read a byte to Reset feof()'s flag */
	if (ch != EOF)
	{
		ungetc(ch, (FILE*)user); /* push byte back onto stream if valid. */
	}
}

static int __stbi_stdio_eof(void* user)
{
	return int(feof((FILE*)user) || ferror((FILE*)user));
}

static stbi_io_callbacks __stbi_stdio_callbacks = {
	__stbi_stdio_read,
	__stbi_stdio_skip,
	__stbi_stdio_eof,
};

static void __stbi_start_file(__stbi_context* s, FILE* f)
{
	__stbi_start_callbacks(s, &__stbi_stdio_callbacks, (void*)f);
}

//static void stop_file(__stbi_context *s) { }

#endif // !STBI_NO_STDIO

static void __stbi_rewind(__stbi_context* s)
{
	// conceptually rewind SHOULD rewind to the beginning of the stream,
	// but we just rewind to the beginning of the initial buffer, because
	// we only use it after doing 'test', which only ever looks at at most 92 bytes
	s->img_buffer = s->img_buffer_original;
	s->img_buffer_end = s->img_buffer_original_end;
}

enum
{
	STBI_ORDER_RGB,
	STBI_ORDER_BGR
};

typedef struct
{
	int bits_per_channel;
	int num_channels;
	int channel_order;
} __stbi_result_info;

#ifndef STBI_NO_JPEG
static int __stbi_jpeg_test(__stbi_context* s);
static void* __stbi_jpeg_load(__stbi_context* s, int* x, int* y, int* comp, int req_comp, __stbi_result_info* ri);
static int __stbi_jpeg_info(__stbi_context* s, int* x, int* y, int* comp);
#endif

#ifndef STBI_NO_PNG
static int __stbi_png_test(__stbi_context* s);
static void* __stbi_png_load(__stbi_context* s, int* x, int* y, int* comp, int req_comp, __stbi_result_info* ri);
static int __stbi_png_info(__stbi_context* s, int* x, int* y, int* comp);
static int __stbi_png_is16(__stbi_context* s);
#endif

#ifndef STBI_NO_BMP
static int __stbi_bmp_test(__stbi_context* s);
static void* __stbi_bmp_load(__stbi_context* s, int* x, int* y, int* comp, int req_comp, __stbi_result_info* ri);
static int __stbi_bmp_info(__stbi_context* s, int* x, int* y, int* comp);
#endif

#ifndef STBI_NO_TGA
static int __stbi_tga_test(__stbi_context* s);
static void* __stbi_tga_load(__stbi_context* s, int* x, int* y, int* comp, int req_comp, __stbi_result_info* ri);
static int __stbi_tga_info(__stbi_context* s, int* x, int* y, int* comp);
#endif

#ifndef STBI_NO_PSD
static int __stbi_psd_test(__stbi_context* s);
static void* __stbi_psd_load(__stbi_context* s, int* x, int* y, int* comp, int req_comp, __stbi_result_info* ri, int bpc);
static int __stbi_psd_info(__stbi_context* s, int* x, int* y, int* comp);
static int __stbi_psd_is16(__stbi_context* s);
#endif

#ifndef STBI_NO_HDR
static int __stbi_hdr_test(__stbi_context* s);
static float* __stbi_hdr_load(__stbi_context* s, int* x, int* y, int* comp, int req_comp, __stbi_result_info* ri);
static int __stbi_hdr_info(__stbi_context* s, int* x, int* y, int* comp);
#endif

#ifndef STBI_NO_PIC
static int __stbi_pic_test(__stbi_context* s);
static void* __stbi_pic_load(__stbi_context* s, int* x, int* y, int* comp, int req_comp, __stbi_result_info* ri);
static int __stbi_pic_info(__stbi_context* s, int* x, int* y, int* comp);
#endif

#ifndef STBI_NO_GIF
static int __stbi_gif_test(__stbi_context* s);
static void* __stbi_gif_load(__stbi_context* s, int* x, int* y, int* comp, int req_comp, __stbi_result_info* ri);
static void* __stbi_load_gif_main(__stbi_context* s, int** delays, int* x, int* y, int* z, int* comp, int req_comp);
static int __stbi_gif_info(__stbi_context* s, int* x, int* y, int* comp);
#endif

#ifndef STBI_NO_PNM
static int __stbi_pnm_test(__stbi_context* s);
static void* __stbi_pnm_load(__stbi_context* s, int* x, int* y, int* comp, int req_comp, __stbi_result_info* ri);
static int __stbi_pnm_info(__stbi_context* s, int* x, int* y, int* comp);
#endif

static
#ifdef STBI_THREAD_LOCAL
	STBI_THREAD_LOCAL
#endif
	const char* __stbi_g_failure_reason;

STBIDEF const char* stbi_failure_reason(void)
{
	return __stbi_g_failure_reason;
}

#ifndef STBI_NO_FAILURE_STRINGS
static int __stbi_err(const char* str)
{
	__stbi_g_failure_reason = str;
	return 0;
}
#endif

static void* __stbi_malloc(size_t size)
{
	return STBI_MALLOC(size);
}

// stb_image uses ints pervasively, including for offset calculations.
// therefore the largest decoded image size we can support with the
// current code, even on 64-bit targets, is INT_MAX. this is not a
// significant limitation for the intended use case.
//
// we do, however, need to make sure our size calculations don't
// overflow. hence a few helper functions for size calculations that
// multiply integers together, making sure that they're non-negative
// and no overflow occurs.

// return 1 if the sum is valid, 0 on overflow.
// negative terms are considered invalid.
static int __stbi_addsizes_valid(int a, int b)
{
	if (b < 0) return 0;
	// now 0 <= b <= INT_MAX, hence also
	// 0 <= INT_MAX - b <= INTMAX.
	// And "a + b <= INT_MAX" (which might overflow) is the
	// same as a <= INT_MAX - b (no overflow)
	return a <= INT_MAX - b;
}

// returns 1 if the product is valid, 0 on overflow.
// negative factors are considered invalid.
static int __stbi_mul2sizes_valid(int a, int b)
{
	if (a < 0 || b < 0) return 0;
	if (b == 0) return 1; // mul-by-0 is always safe
	// portable way to check for no overflows in a*b
	return a <= INT_MAX / b;
}

#if !defined(STBI_NO_JPEG) || !defined(STBI_NO_PNG) || !defined(STBI_NO_TGA) || !defined(STBI_NO_HDR)
// returns 1 if "a*b + add" has no negative terms/factors and doesn't overflow
static int __stbi_mad2sizes_valid(int a, int b, int add)
{
	return (__stbi_mul2sizes_valid(a, b) && __stbi_addsizes_valid(a * b, add)) ? 1 : 0;
}
#endif

// returns 1 if "a*b*c + add" has no negative terms/factors and doesn't overflow
static int __stbi_mad3sizes_valid(int a, int b, int c, int add)
{
	return (__stbi_mul2sizes_valid(a, b) && __stbi_mul2sizes_valid(a * b, c) && __stbi_addsizes_valid(a * b * c, add)) ? 1 : 0;
}

// returns 1 if "a*b*c*d + add" has no negative terms/factors and doesn't overflow
#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR)
static int __stbi_mad4sizes_valid(int a, int b, int c, int d, int add)
{
	return (__stbi_mul2sizes_valid(a, b) && __stbi_mul2sizes_valid(a * b, c) && __stbi_mul2sizes_valid(a * b * c, d) && __stbi_addsizes_valid(a * b * c * d, add)) ? 1 : 0;
}
#endif

#if !defined(STBI_NO_JPEG) || !defined(STBI_NO_PNG) || !defined(STBI_NO_TGA) || !defined(STBI_NO_HDR)
// mallocs with size overflow checking
static void* __stbi_malloc_mad2(int a, int b, int add)
{
	if (!__stbi_mad2sizes_valid(a, b, add)) return NULL;
	return __stbi_malloc(a * b + add);
}
#endif

static void* __stbi_malloc_mad3(int a, int b, int c, int add)
{
	if (!__stbi_mad3sizes_valid(a, b, c, add)) return NULL;
	return __stbi_malloc(a * b * c + add);
}

#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR)
static void* __stbi_malloc_mad4(int a, int b, int c, int d, int add)
{
	if (!__stbi_mad4sizes_valid(a, b, c, d, add)) return NULL;
	return __stbi_malloc(a * b * c * d + add);
}
#endif

// __stbi_err - error
// __stbi_errpf - error returning pointer to float
// __stbi_errpuc - error returning pointer to unsigned char

#ifdef STBI_NO_FAILURE_STRINGS
#define __stbi_err(x, y) 0
#elif defined(STBI_FAILURE_USERMSG)
#define __stbi_err(x, y) __stbi_err(y)
#else
#define __stbi_err(x, y) __stbi_err(x)
#endif

#define __stbi_errpf(x, y) ((float*)(size_t)(__stbi_err(x, y) ? NULL : NULL))
#define __stbi_errpuc(x, y) ((unsigned char*)(size_t)(__stbi_err(x, y) ? NULL : NULL))

STBIDEF void stbi_image_free(void* retval_from_stbi_load)
{
	STBI_FREE(retval_from_stbi_load);
}

#ifndef STBI_NO_LINEAR
static float* __stbi_ldr_to_hdr(stbi_uc* value, int x, int y, int comp);
#endif

#ifndef STBI_NO_HDR
static stbi_uc* __stbi_hdr_to_ldr(float* value, int x, int y, int comp);
#endif

static int __stbi_vertically_flip_on_load_global = 0;

STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip)
{
	__stbi_vertically_flip_on_load_global = flag_true_if_should_flip;
}

#ifndef STBI_THREAD_LOCAL
#define __stbi_vertically_flip_on_load __stbi_vertically_flip_on_load_global
#else
static STBI_THREAD_LOCAL int __stbi_vertically_flip_on_load_local, __stbi_vertically_flip_on_load_set;

STBIDEF void stbi_set_flip_vertically_on_load_thread(int flag_true_if_should_flip)
{
	__stbi_vertically_flip_on_load_local = flag_true_if_should_flip;
	__stbi_vertically_flip_on_load_set = 1;
}

#define __stbi_vertically_flip_on_load (__stbi_vertically_flip_on_load_set ? __stbi_vertically_flip_on_load_local : __stbi_vertically_flip_on_load_global)
#endif // STBI_THREAD_LOCAL

static void* __stbi_load_main(__stbi_context* s, int* x, int* y, int* comp, int req_comp, __stbi_result_info* ri, int bpc)
{
	memset(ri, 0, sizeof(*ri)); // make sure it's Initialized if we add new fields
	ri->bits_per_channel = 8; // default is 8 so most paths don't have to be changed
	ri->channel_order = STBI_ORDER_RGB; // all current input & output are this, but this is here so we can add BGR order
	ri->num_channels = 0;

#ifndef STBI_NO_JPEG
	if (__stbi_jpeg_test(s)) return __stbi_jpeg_load(s, x, y, comp, req_comp, ri);
#endif
#ifndef STBI_NO_PNG
	if (__stbi_png_test(s)) return __stbi_png_load(s, x, y, comp, req_comp, ri);
#endif
#ifndef STBI_NO_BMP
	if (__stbi_bmp_test(s)) return __stbi_bmp_load(s, x, y, comp, req_comp, ri);
#endif
#ifndef STBI_NO_GIF
	if (__stbi_gif_test(s)) return __stbi_gif_load(s, x, y, comp, req_comp, ri);
#endif
#ifndef STBI_NO_PSD
	if (__stbi_psd_test(s)) return __stbi_psd_load(s, x, y, comp, req_comp, ri, bpc);
#else
	STBI_NOTUSED(bpc);
#endif
#ifndef STBI_NO_PIC
	if (__stbi_pic_test(s)) return __stbi_pic_load(s, x, y, comp, req_comp, ri);
#endif
#ifndef STBI_NO_PNM
	if (__stbi_pnm_test(s)) return __stbi_pnm_load(s, x, y, comp, req_comp, ri);
#endif

#ifndef STBI_NO_HDR
	if (__stbi_hdr_test(s))
	{
		float* hdr = __stbi_hdr_load(s, x, y, comp, req_comp, ri);
		return __stbi_hdr_to_ldr(hdr, *x, *y, req_comp ? req_comp : *comp);
	}
#endif

#ifndef STBI_NO_TGA
	// test tga last because it's a crappy test!
	if (__stbi_tga_test(s)) return __stbi_tga_load(s, x, y, comp, req_comp, ri);
#endif

	return __stbi_errpuc("unknown image type", "Image not of any known type, or corrupt");
}

static stbi_uc* __stbi_convert_16_to_8(__stbi_uint16* orig, int w, int h, int channels)
{
	int i;
	int img_len = w * h * channels;
	stbi_uc* reduced;

	reduced = (stbi_uc*)__stbi_malloc(img_len);
	if (reduced == NULL) return __stbi_errpuc("outofmem", "Out of memory");

	for (i = 0; i < img_len; ++i) reduced[i] = (stbi_uc)((orig[i] >> 8) & 0xFF); // top half of each byte is sufficient approx of 16->8 bit scaling

	STBI_FREE(orig);
	return reduced;
}

static __stbi_uint16* __stbi_convert_8_to_16(stbi_uc* orig, int w, int h, int channels)
{
	int i;
	int img_len = w * h * channels;
	__stbi_uint16* enlarged;

	enlarged = (__stbi_uint16*)__stbi_malloc(img_len * 2);
	if (enlarged == NULL) return (__stbi_uint16*)__stbi_errpuc("outofmem", "Out of memory");

	for (i = 0; i < img_len; ++i) enlarged[i] = (__stbi_uint16)((orig[i] << 8) + orig[i]); // replicate to high and low byte, maps 0->0, 255->0xffff

	STBI_FREE(orig);
	return enlarged;
}

static void __stbi_vertical_flip(void* image, int w, int h, int bytes_per_pixel)
{
	int row;
	size_t bytes_per_row = (size_t)w * bytes_per_pixel;
	stbi_uc temp[2048];
	stbi_uc* bytes = (stbi_uc*)image;

	for (row = 0; row < (h >> 1); row++)
	{
		stbi_uc* row0 = bytes + row * bytes_per_row;
		stbi_uc* row1 = bytes + (h - row - 1) * bytes_per_row;
		// swap row0 with row1
		size_t bytes_left = bytes_per_row;
		while (bytes_left)
		{
			size_t bytes_copy = (bytes_left < sizeof(temp)) ? bytes_left : sizeof(temp);
			memcpy(temp, row0, bytes_copy);
			memcpy(row0, row1, bytes_copy);
			memcpy(row1, temp, bytes_copy);
			row0 += bytes_copy;
			row1 += bytes_copy;
			bytes_left -= bytes_copy;
		}
	}
}

#ifndef STBI_NO_GIF
static void __stbi_vertical_flip_slices(void* image, int w, int h, int z, int bytes_per_pixel)
{
	int slice;
	int slice_size = w * h * bytes_per_pixel;

	stbi_uc* bytes = (stbi_uc*)image;
	for (slice = 0; slice < z; ++slice)
	{
		__stbi_vertical_flip(bytes, w, h, bytes_per_pixel);
		bytes += slice_size;
	}
}
#endif

static unsigned char* __stbi_load_and_postprocess_8bit(__stbi_context* s, int* x, int* y, int* comp, int req_comp)
{
	__stbi_result_info ri;
	void* result = __stbi_load_main(s, x, y, comp, req_comp, &ri, 8);

	if (result == NULL) return NULL;

	// it is the responsibility of the loaders to make sure we Get either 8 or 16 bit.
	STBI_ASSERT(ri.bits_per_channel == 8 || ri.bits_per_channel == 16);

	if (ri.bits_per_channel != 8)
	{
		result = __stbi_convert_16_to_8((__stbi_uint16*)result, *x, *y, req_comp == 0 ? *comp : req_comp);
		ri.bits_per_channel = 8;
	}

	// @TODO: move __stbi_convert_format to here

	if (__stbi_vertically_flip_on_load)
	{
		int channels = req_comp ? req_comp : *comp;
		__stbi_vertical_flip(result, *x, *y, channels * sizeof(stbi_uc));
	}

	return (unsigned char*)result;
}

static __stbi_uint16* __stbi_load_and_postprocess_16bit(__stbi_context* s, int* x, int* y, int* comp, int req_comp)
{
	__stbi_result_info ri;
	void* result = __stbi_load_main(s, x, y, comp, req_comp, &ri, 16);

	if (result == NULL) return NULL;

	// it is the responsibility of the loaders to make sure we Get either 8 or 16 bit.
	STBI_ASSERT(ri.bits_per_channel == 8 || ri.bits_per_channel == 16);

	if (ri.bits_per_channel != 16)
	{
		result = __stbi_convert_8_to_16((stbi_uc*)result, *x, *y, req_comp == 0 ? *comp : req_comp);
		ri.bits_per_channel = 16;
	}

	// @TODO: move __stbi_convert_format16 to here
	// @TODO: special case RGB-to-Y (and RGBA-to-YA) for 8-bit-to-16-bit case to keep more precision

	if (__stbi_vertically_flip_on_load)
	{
		int channels = req_comp ? req_comp : *comp;
		__stbi_vertical_flip(result, *x, *y, channels * sizeof(__stbi_uint16));
	}

	return (__stbi_uint16*)result;
}

#if !defined(STBI_NO_HDR) && !defined(STBI_NO_LINEAR)
static void __stbi_float_postprocess(float* result, int* x, int* y, int* comp, int req_comp)
{
	if (__stbi_vertically_flip_on_load && result != NULL)
	{
		int channels = req_comp ? req_comp : *comp;
		__stbi_vertical_flip(result, *x, *y, channels * sizeof(float));
	}
}
#endif

#ifndef STBI_NO_STDIO

#if defined(_MSC_VER) && defined(STBI_WINDOWS_UTF8)
STBI_EXTERN __declspec(dllimport) int __stdcall MultiByteToWideChar(unsigned int cp, unsigned long flags, const char* str, int cbmb, wchar_t* widestr, int cchwide);
STBI_EXTERN __declspec(dllimport) int __stdcall WideCharToMultiByte(
	unsigned int cp, unsigned long flags, const wchar_t* widestr, int cchwide, char* str, int cbmb, const char* defchar, int* used_default);
#endif

#if defined(_MSC_VER) && defined(STBI_WINDOWS_UTF8)
STBIDEF int stbi_convert_wchar_to_utf8(char* buffer, size_t bufferlen, const wchar_t* input)
{
	return WideCharToMultiByte(65001 /* UTF8 */, 0, input, -1, buffer, (int)bufferlen, NULL, NULL);
}
#endif

static FILE* __stbi_fopen(char const* filename, char const* figureMode)
{
	FILE* f;
#if defined(_MSC_VER) && defined(STBI_WINDOWS_UTF8)
	wchar_t wMode[64];
	wchar_t wFilename[1024];
	if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, filename, -1, wFilename, sizeof(wFilename))) return 0;

	if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, figureMode, -1, wMode, sizeof(wMode))) return 0;

#if _MSC_VER >= 1400
	if (0 != _wfopen_s(&f, wFilename, wMode)) f = 0;
#else
	f = _wfopen(wFilename, wMode);
#endif

#elif defined(_MSC_VER) && _MSC_VER >= 1400
	if (0 != fopen_s(&f, filename, figureMode)) f = 0;
#else
	f = fopen(filename, figureMode);
#endif
	return f;
}

STBIDEF stbi_uc* stbi_load(char const* filename, int* x, int* y, int* comp, int req_comp)
{
	FILE* f = __stbi_fopen(filename, "rb");
	unsigned char* result;
	if (!f) return __stbi_errpuc("can't fopen", "Unable to open file");
	result = stbi_load_from_file(f, x, y, comp, req_comp);
	fclose(f);
	return result;
}

STBIDEF stbi_uc* stbi_load_from_file(FILE* f, int* x, int* y, int* comp, int req_comp)
{
	unsigned char* result;
	__stbi_context s;
	__stbi_start_file(&s, f);
	result = __stbi_load_and_postprocess_8bit(&s, x, y, comp, req_comp);
	if (result)
	{
		// need to 'unget' all the characters in the IO buffer
		fseek(f, -(int)(s.img_buffer_end - s.img_buffer), SEEK_CUR);
	}
	return result;
}

STBIDEF __stbi_uint16* stbi_load_from_file_16(FILE* f, int* x, int* y, int* comp, int req_comp)
{
	__stbi_uint16* result;
	__stbi_context s;
	__stbi_start_file(&s, f);
	result = __stbi_load_and_postprocess_16bit(&s, x, y, comp, req_comp);
	if (result)
	{
		// need to 'unget' all the characters in the IO buffer
		fseek(f, -(int)(s.img_buffer_end - s.img_buffer), SEEK_CUR);
	}
	return result;
}

STBIDEF stbi_us* stbi_load_16(char const* filename, int* x, int* y, int* comp, int req_comp)
{
	FILE* f = __stbi_fopen(filename, "rb");
	__stbi_uint16* result;
	if (!f) return (stbi_us*)__stbi_errpuc("can't fopen", "Unable to open file");
	result = stbi_load_from_file_16(f, x, y, comp, req_comp);
	fclose(f);
	return result;
}

#endif //!STBI_NO_STDIO

STBIDEF stbi_us* stbi_load_16_from_memory(stbi_uc const* buffer, int len, int* x, int* y, int* channels_in_file, int desired_channels)
{
	__stbi_context s;
	__stbi_start_mem(&s, buffer, len);
	return __stbi_load_and_postprocess_16bit(&s, x, y, channels_in_file, desired_channels);
}

STBIDEF stbi_us* stbi_load_16_from_callbacks(stbi_io_callbacks const* clbk, void* user, int* x, int* y, int* channels_in_file, int desired_channels)
{
	__stbi_context s;
	__stbi_start_callbacks(&s, (stbi_io_callbacks*)clbk, user);
	return __stbi_load_and_postprocess_16bit(&s, x, y, channels_in_file, desired_channels);
}

STBIDEF stbi_uc* stbi_load_from_memory(stbi_uc const* buffer, int len, int* x, int* y, int* comp, int req_comp)
{
	__stbi_context s;
	__stbi_start_mem(&s, buffer, len);
	return __stbi_load_and_postprocess_8bit(&s, x, y, comp, req_comp);
}

STBIDEF stbi_uc* stbi_load_from_callbacks(stbi_io_callbacks const* clbk, void* user, int* x, int* y, int* comp, int req_comp)
{
	__stbi_context s;
	__stbi_start_callbacks(&s, (stbi_io_callbacks*)clbk, user);
	return __stbi_load_and_postprocess_8bit(&s, x, y, comp, req_comp);
}

#ifndef STBI_NO_GIF
STBIDEF stbi_uc* stbi_load_gif_from_memory(stbi_uc const* buffer, int len, int** delays, int* x, int* y, int* z, int* comp, int req_comp)
{
	unsigned char* result;
	__stbi_context s;
	__stbi_start_mem(&s, buffer, len);

	result = (unsigned char*)__stbi_load_gif_main(&s, delays, x, y, z, comp, req_comp);
	if (__stbi_vertically_flip_on_load)
	{
		__stbi_vertical_flip_slices(result, *x, *y, *z, *comp);
	}

	return result;
}
#endif

#ifndef STBI_NO_LINEAR
static float* __stbi_loadf_main(__stbi_context* s, int* x, int* y, int* comp, int req_comp)
{
	unsigned char* value;
#ifndef STBI_NO_HDR
	if (__stbi_hdr_test(s))
	{
		__stbi_result_info ri;
		float* hdr_data = __stbi_hdr_load(s, x, y, comp, req_comp, &ri);
		if (hdr_data) __stbi_float_postprocess(hdr_data, x, y, comp, req_comp);
		return hdr_data;
	}
#endif
	value = __stbi_load_and_postprocess_8bit(s, x, y, comp, req_comp);
	if (value) return __stbi_ldr_to_hdr(value, *x, *y, req_comp ? req_comp : *comp);
	return __stbi_errpf("unknown image type", "Image not of any known type, or corrupt");
}

STBIDEF float* stbi_loadf_from_memory(stbi_uc const* buffer, int len, int* x, int* y, int* comp, int req_comp)
{
	__stbi_context s;
	__stbi_start_mem(&s, buffer, len);
	return __stbi_loadf_main(&s, x, y, comp, req_comp);
}

STBIDEF float* stbi_loadf_from_callbacks(stbi_io_callbacks const* clbk, void* user, int* x, int* y, int* comp, int req_comp)
{
	__stbi_context s;
	__stbi_start_callbacks(&s, (stbi_io_callbacks*)clbk, user);
	return __stbi_loadf_main(&s, x, y, comp, req_comp);
}

#ifndef STBI_NO_STDIO
STBIDEF float* stbi_loadf(char const* filename, int* x, int* y, int* comp, int req_comp)
{
	float* result;
	FILE* f = __stbi_fopen(filename, "rb");
	if (!f) return __stbi_errpf("can't fopen", "Unable to open file");
	result = stbi_loadf_from_file(f, x, y, comp, req_comp);
	fclose(f);
	return result;
}

STBIDEF float* stbi_loadf_from_file(FILE* f, int* x, int* y, int* comp, int req_comp)
{
	__stbi_context s;
	__stbi_start_file(&s, f);
	return __stbi_loadf_main(&s, x, y, comp, req_comp);
}
#endif // !STBI_NO_STDIO

#endif // !STBI_NO_LINEAR

// these is-hdr-or-not is defined independent of whether STBI_NO_LINEAR is
// defined, for API simplicity; if STBI_NO_LINEAR is defined, it always
// reports false!

STBIDEF int stbi_is_hdr_from_memory(stbi_uc const* buffer, int len)
{
#ifndef STBI_NO_HDR
	__stbi_context s;
	__stbi_start_mem(&s, buffer, len);
	return __stbi_hdr_test(&s);
#else
	STBI_NOTUSED(buffer);
	STBI_NOTUSED(len);
	return 0;
#endif
}

#ifndef STBI_NO_STDIO
STBIDEF int stbi_is_hdr(char const* filename)
{
	FILE* f = __stbi_fopen(filename, "rb");
	int result = 0;
	if (f)
	{
		result = stbi_is_hdr_from_file(f);
		fclose(f);
	}
	return result;
}

STBIDEF int stbi_is_hdr_from_file(FILE* f)
{
#ifndef STBI_NO_HDR
	long pos = ftell(f);
	int res;
	__stbi_context s;
	__stbi_start_file(&s, f);
	res = __stbi_hdr_test(&s);
	fseek(f, pos, SEEK_SET);
	return res;
#else
	STBI_NOTUSED(f);
	return 0;
#endif
}
#endif // !STBI_NO_STDIO

STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const* clbk, void* user)
{
#ifndef STBI_NO_HDR
	__stbi_context s;
	__stbi_start_callbacks(&s, (stbi_io_callbacks*)clbk, user);
	return __stbi_hdr_test(&s);
#else
	STBI_NOTUSED(clbk);
	STBI_NOTUSED(user);
	return 0;
#endif
}

#ifndef STBI_NO_LINEAR
static float __stbi_l2h_gamma = 2.2f, __stbi_l2h_scale = 1.0f;

STBIDEF void stbi_ldr_to_hdr_gamma(float gamma)
{
	__stbi_l2h_gamma = gamma;
}
STBIDEF void stbi_ldr_to_hdr_scale(float scale)
{
	__stbi_l2h_scale = scale;
}
#endif

static float __stbi_h2l_gamma_i = 1.0f / 2.2f, __stbi_h2l_scale_i = 1.0f;

STBIDEF void stbi_hdr_to_ldr_gamma(float gamma)
{
	__stbi_h2l_gamma_i = 1 / gamma;
}
STBIDEF void stbi_hdr_to_ldr_scale(float scale)
{
	__stbi_h2l_scale_i = 1 / scale;
}

//////////////////////////////////////////////////////////////////////////////
//
// Common code used by all image loaders
//

enum
{
	STBI__SCAN_load = 0,
	STBI__SCAN_type,
	STBI__SCAN_header
};

static void __stbi_refill_buffer(__stbi_context* s)
{
	int n = (s->io.read)(s->io_user_data, (char*)s->buffer_start, s->buflen);
	s->callback_already_read += (int)(s->img_buffer - s->img_buffer_original);
	if (n == 0)
	{
		// at end of file, treat same as if from memory, but need to handle case
		// where s->img_buffer isn't pointing to safe memory, e.g. 0-byte file
		s->read_from_callbacks = 0;
		s->img_buffer = s->buffer_start;
		s->img_buffer_end = s->buffer_start + 1;
		*s->img_buffer = 0;
	}
	else
	{
		s->img_buffer = s->buffer_start;
		s->img_buffer_end = s->buffer_start + n;
	}
}

stbi_inline static stbi_uc __stbi_get8(__stbi_context* s)
{
	if (s->img_buffer < s->img_buffer_end) return *s->img_buffer++;
	if (s->read_from_callbacks)
	{
		__stbi_refill_buffer(s);
		return *s->img_buffer++;
	}
	return 0;
}

#if defined(STBI_NO_JPEG) && defined(STBI_NO_HDR) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM)
// nothing
#else
stbi_inline static int __stbi_at_eof(__stbi_context* s)
{
	if (s->io.read)
	{
		if (!(s->io.eof)(s->io_user_data)) return 0;
		// if feof() is true, check if buffer = end
		// special case: we've only got the special 0 character at the end
		if (s->read_from_callbacks == 0) return 1;
	}

	return s->img_buffer >= s->img_buffer_end;
}
#endif

#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC)
// nothing
#else
static void __stbi_skip(__stbi_context* s, int n)
{
	if (n == 0) return; // already there!
	if (n < 0)
	{
		s->img_buffer = s->img_buffer_end;
		return;
	}
	if (s->io.read)
	{
		int blen = (int)(s->img_buffer_end - s->img_buffer);
		if (blen < n)
		{
			s->img_buffer = s->img_buffer_end;
			(s->io.skip)(s->io_user_data, n - blen);
			return;
		}
	}
	s->img_buffer += n;
}
#endif

#if defined(STBI_NO_PNG) && defined(STBI_NO_TGA) && defined(STBI_NO_HDR) && defined(STBI_NO_PNM)
// nothing
#else
static int __stbi_getn(__stbi_context* s, stbi_uc* buffer, int n)
{
	if (s->io.read)
	{
		int blen = (int)(s->img_buffer_end - s->img_buffer);
		if (blen < n)
		{
			int res, count;

			memcpy(buffer, s->img_buffer, blen);

			count = (s->io.read)(s->io_user_data, (char*)buffer + blen, n - blen);
			res = (count == (n - blen));
			s->img_buffer = s->img_buffer_end;
			return res;
		}
	}

	if (s->img_buffer + n <= s->img_buffer_end)
	{
		memcpy(buffer, s->img_buffer, n);
		s->img_buffer += n;
		return 1;
	}
	else
		return 0;
}
#endif

#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_PSD) && defined(STBI_NO_PIC)
// nothing
#else
static int __stbi_get16be(__stbi_context* s)
{
	int z = __stbi_get8(s);
	return (z << 8) + __stbi_get8(s);
}
#endif

#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) && defined(STBI_NO_PIC)
// nothing
#else
static __stbi_uint32 __stbi_get32be(__stbi_context* s)
{
	__stbi_uint32 z = __stbi_get16be(s);
	return (z << 16) + __stbi_get16be(s);
}
#endif

#if defined(STBI_NO_BMP) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF)
// nothing
#else
static int __stbi_get16le(__stbi_context* s)
{
	int z = __stbi_get8(s);
	return z + (__stbi_get8(s) << 8);
}
#endif

#ifndef STBI_NO_BMP
static __stbi_uint32 __stbi_get32le(__stbi_context* s)
{
	__stbi_uint32 z = __stbi_get16le(s);
	return z + (__stbi_get16le(s) << 16);
}
#endif

#define STBI__BYTECAST(x) ((stbi_uc)((x)&255)) // truncate int to byte without warnings

#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM)
// nothing
#else
//////////////////////////////////////////////////////////////////////////////
//
//  generic converter from built-in img_n to req_comp
//    individual types do this automatically as much as possible (e.g. jpeg
//    does all cases internally since it needs to colorspace convert anyway,
//    and it never has alpha, so very few cases ). png can automatically
//    interleave an alpha=255 channel, but falls back to this for other cases
//
//  assume value buffer is malloced, so malloc a new one and free that one
//  only failure figureMode is malloc failing

static stbi_uc __stbi_compute_y(int r, int g, int b)
{
	return (stbi_uc)(((r * 77) + (g * 150) + (29 * b)) >> 8);
}
#endif

#if defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM)
// nothing
#else
static unsigned char* __stbi_convert_format(unsigned char* value, int img_n, int req_comp, unsigned int x, unsigned int y)
{
	int i, j;
	unsigned char* good;

	if (req_comp == img_n) return value;
	STBI_ASSERT(req_comp >= 1 && req_comp <= 4);

	good = (unsigned char*)__stbi_malloc_mad3(req_comp, x, y, 0);
	if (good == NULL)
	{
		STBI_FREE(value);
		return __stbi_errpuc("outofmem", "Out of memory");
	}

	for (j = 0; j < (int)y; ++j)
	{
		unsigned char* src = value + j * x * img_n;
		unsigned char* dest = good + j * x * req_comp;

#define STBI__COMBO(a, b) ((a)*8 + (b))
#define STBI__CASE(a, b)    \
	case STBI__COMBO(a, b): \
		for (i = x - 1; i >= 0; --i, src += a, dest += b)
		// convert source image with img_n components to one with req_comp components;
		// avoid switch per pixel, so use switch per scanline and massive macros
		switch (STBI__COMBO(img_n, req_comp))
		{
			STBI__CASE(1, 2)
			{
				dest[0] = src[0];
				dest[1] = 255;
			}
			break;
			STBI__CASE(1, 3)
			{
				dest[0] = dest[1] = dest[2] = src[0];
			}
			break;
			STBI__CASE(1, 4)
			{
				dest[0] = dest[1] = dest[2] = src[0];
				dest[3] = 255;
			}
			break;
			STBI__CASE(2, 1)
			{
				dest[0] = src[0];
			}
			break;
			STBI__CASE(2, 3)
			{
				dest[0] = dest[1] = dest[2] = src[0];
			}
			break;
			STBI__CASE(2, 4)
			{
				dest[0] = dest[1] = dest[2] = src[0];
				dest[3] = src[1];
			}
			break;
			STBI__CASE(3, 4)
			{
				dest[0] = src[0];
				dest[1] = src[1];
				dest[2] = src[2];
				dest[3] = 255;
			}
			break;
			STBI__CASE(3, 1)
			{
				dest[0] = __stbi_compute_y(src[0], src[1], src[2]);
			}
			break;
			STBI__CASE(3, 2)
			{
				dest[0] = __stbi_compute_y(src[0], src[1], src[2]);
				dest[1] = 255;
			}
			break;
			STBI__CASE(4, 1)
			{
				dest[0] = __stbi_compute_y(src[0], src[1], src[2]);
			}
			break;
			STBI__CASE(4, 2)
			{
				dest[0] = __stbi_compute_y(src[0], src[1], src[2]);
				dest[1] = src[3];
			}
			break;
			STBI__CASE(4, 3)
			{
				dest[0] = src[0];
				dest[1] = src[1];
				dest[2] = src[2];
			}
			break;
		default:
			STBI_ASSERT(0);
			STBI_FREE(value);
			STBI_FREE(good);
			return __stbi_errpuc("unsupported", "Unsupported format conversion");
		}
#undef STBI__CASE
	}

	STBI_FREE(value);
	return good;
}
#endif

#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD)
// nothing
#else
static __stbi_uint16 __stbi_compute_y_16(int r, int g, int b)
{
	return (__stbi_uint16)(((r * 77) + (g * 150) + (29 * b)) >> 8);
}
#endif

#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD)
// nothing
#else
static __stbi_uint16* __stbi_convert_format16(__stbi_uint16* value, int img_n, int req_comp, unsigned int x, unsigned int y)
{
	int i, j;
	__stbi_uint16* good;

	if (req_comp == img_n) return value;
	STBI_ASSERT(req_comp >= 1 && req_comp <= 4);

	good = (__stbi_uint16*)__stbi_malloc(req_comp * x * y * 2);
	if (good == NULL)
	{
		STBI_FREE(value);
		return (__stbi_uint16*)__stbi_errpuc("outofmem", "Out of memory");
	}

	for (j = 0; j < (int)y; ++j)
	{
		__stbi_uint16* src = value + j * x * img_n;
		__stbi_uint16* dest = good + j * x * req_comp;

#define STBI__COMBO(a, b) ((a)*8 + (b))
#define STBI__CASE(a, b)    \
	case STBI__COMBO(a, b): \
		for (i = x - 1; i >= 0; --i, src += a, dest += b)
		// convert source image with img_n components to one with req_comp components;
		// avoid switch per pixel, so use switch per scanline and massive macros
		switch (STBI__COMBO(img_n, req_comp))
		{
			STBI__CASE(1, 2)
			{
				dest[0] = src[0];
				dest[1] = 0xffff;
			}
			break;
			STBI__CASE(1, 3)
			{
				dest[0] = dest[1] = dest[2] = src[0];
			}
			break;
			STBI__CASE(1, 4)
			{
				dest[0] = dest[1] = dest[2] = src[0];
				dest[3] = 0xffff;
			}
			break;
			STBI__CASE(2, 1)
			{
				dest[0] = src[0];
			}
			break;
			STBI__CASE(2, 3)
			{
				dest[0] = dest[1] = dest[2] = src[0];
			}
			break;
			STBI__CASE(2, 4)
			{
				dest[0] = dest[1] = dest[2] = src[0];
				dest[3] = src[1];
			}
			break;
			STBI__CASE(3, 4)
			{
				dest[0] = src[0];
				dest[1] = src[1];
				dest[2] = src[2];
				dest[3] = 0xffff;
			}
			break;
			STBI__CASE(3, 1)
			{
				dest[0] = __stbi_compute_y_16(src[0], src[1], src[2]);
			}
			break;
			STBI__CASE(3, 2)
			{
				dest[0] = __stbi_compute_y_16(src[0], src[1], src[2]);
				dest[1] = 0xffff;
			}
			break;
			STBI__CASE(4, 1)
			{
				dest[0] = __stbi_compute_y_16(src[0], src[1], src[2]);
			}
			break;
			STBI__CASE(4, 2)
			{
				dest[0] = __stbi_compute_y_16(src[0], src[1], src[2]);
				dest[1] = src[3];
			}
			break;
			STBI__CASE(4, 3)
			{
				dest[0] = src[0];
				dest[1] = src[1];
				dest[2] = src[2];
			}
			break;
		default:
			STBI_ASSERT(0);
			STBI_FREE(value);
			STBI_FREE(good);
			return (__stbi_uint16*)__stbi_errpuc("unsupported", "Unsupported format conversion");
		}
#undef STBI__CASE
	}

	STBI_FREE(value);
	return good;
}
#endif

#ifndef STBI_NO_LINEAR
static float* __stbi_ldr_to_hdr(stbi_uc* value, int x, int y, int comp)
{
	int i, k, n;
	float* output;
	if (!value) return NULL;
	output = (float*)__stbi_malloc_mad4(x, y, comp, sizeof(float), 0);
	if (output == NULL)
	{
		STBI_FREE(value);
		return __stbi_errpf("outofmem", "Out of memory");
	}
	// compute number of non-alpha components
	if (comp & 1)
		n = comp;
	else
		n = comp - 1;
	for (i = 0; i < x * y; ++i)
	{
		for (k = 0; k < n; ++k)
		{
			output[i * comp + k] = (float)(pow(value[i * comp + k] / 255.0f, __stbi_l2h_gamma) * __stbi_l2h_scale);
		}
	}
	if (n < comp)
	{
		for (i = 0; i < x * y; ++i)
		{
			output[i * comp + n] = value[i * comp + n] / 255.0f;
		}
	}
	STBI_FREE(value);
	return output;
}
#endif

#ifndef STBI_NO_HDR
#define __stbi_float2int(x) ((int)(x))
static stbi_uc* __stbi_hdr_to_ldr(float* value, int x, int y, int comp)
{
	int i, k, n;
	stbi_uc* output;
	if (!value) return NULL;
	output = (stbi_uc*)__stbi_malloc_mad3(x, y, comp, 0);
	if (output == NULL)
	{
		STBI_FREE(value);
		return __stbi_errpuc("outofmem", "Out of memory");
	}
	// compute number of non-alpha components
	if (comp & 1)
		n = comp;
	else
		n = comp - 1;
	for (i = 0; i < x * y; ++i)
	{
		for (k = 0; k < n; ++k)
		{
			float z = (float)pow(value[i * comp + k] * __stbi_h2l_scale_i, __stbi_h2l_gamma_i) * 255 + 0.5f;
			if (z < 0) z = 0;
			if (z > 255) z = 255;
			output[i * comp + k] = (stbi_uc)__stbi_float2int(z);
		}
		if (k < comp)
		{
			float z = value[i * comp + k] * 255 + 0.5f;
			if (z < 0) z = 0;
			if (z > 255) z = 255;
			output[i * comp + k] = (stbi_uc)__stbi_float2int(z);
		}
	}
	STBI_FREE(value);
	return output;
}
#endif

//////////////////////////////////////////////////////////////////////////////
//
//  "baseline" JPEG/JFIF decoder
//
//    simple implementation
//      - doesn't support delayed output of y-dimension
//      - simple interface (only one output format: 8-bit interleaved RGB)
//      - doesn't try to recover corrupt jpegs
//      - doesn't allow partial loading, loading multiple at once
//      - still fast on x86 (copying globals into locals doesn't help x86)
//      - allocates lots of intermediate memory (full size of all components)
//        - non-interleaved case requires this anyway
//        - allows good upsampling (see next)
//    high-quality
//      - upsampled channels are bilinearly interpolated, even across blocks
//      - quality integer IDCT derived from IJG's 'slow'
//    performance
//      - fast huffman; reasonable integer IDCT
//      - some SIMD kernels for common paths on targets with SSE2/NEON
//      - uses a lot of intermediate memory, could cache poorly

#ifndef STBI_NO_JPEG

// huffman decoding acceleration
#define FAST_BITS 9 // larger handles more cases; smaller stomps less cache

typedef struct
{
	stbi_uc fast[1 << FAST_BITS];
	// weirdly, repacking this into AoS is a 10% speed loss, instead of a win
	__stbi_uint16 code[256];
	stbi_uc values[256];
	stbi_uc size[257];
	unsigned int maxcode[18];
	int delta[17]; // old 'firstsymbol' - old 'firstcode'
} __stbi_huffman;

typedef struct
{
	__stbi_context* s;
	__stbi_huffman huff_dc[4];
	__stbi_huffman huff_ac[4];
	__stbi_uint16 dequant[4][64];
	__stbi_int16 fast_ac[4][1 << FAST_BITS];

	// sizes for components, interleaved MCUs
	int img_h_max, img_v_max;
	int img_mcu_x, img_mcu_y;
	int img_mcu_w, img_mcu_h;

	// definition of jpeg image component
	struct
	{
		int id;
		int h, v;
		int tq;
		int hd, ha;
		int dc_pred;

		int x, y, w2, h2;
		stbi_uc* value;
		void *raw_data, *raw_coeff;
		stbi_uc* linebuf;
		short* coeff; // progressive only
		int coeff_w, coeff_h; // number of 8x8 coefficient blocks
	} img_comp[4];

	__stbi_uint32 code_buffer; // jpeg entropy-coded buffer
	int code_bits; // number of valid bits
	unsigned char marker; // marker seen while filling entropy buffer
	int nomore; // flag if we saw a marker so __zen_must stop

	int progressive;
	int spec_start;
	int spec_end;
	int succ_high;
	int succ_low;
	int eob_run;
	int jfif;
	int app14_color_transform; // Adobe APP14 tag
	int rgb;

	int scan_n, order[4];
	int restart_interval, todo;

	// kernels
	void (*idct_block_kernel)(stbi_uc* out, int out_stride, short value[64]);
	void (*YCbCr_to_RGB_kernel)(stbi_uc* out, const stbi_uc* y, const stbi_uc* pcb, const stbi_uc* pcr, int count, int step);
	stbi_uc* (*resample_row_hv_2_kernel)(stbi_uc* out, stbi_uc* in_near, stbi_uc* in_far, int w, int hs);
} __stbi_jpeg;

static int __stbi_build_huffman(__stbi_huffman* h, int* count)
{
	int i, j, k = 0;
	unsigned int code;
	// build size list for each symbol (from JPEG spec)
	for (i = 0; i < 16; ++i)
		for (j = 0; j < count[i]; ++j) h->size[k++] = (stbi_uc)(i + 1);
	h->size[k] = 0;

	// compute actual symbols (from jpeg spec)
	code = 0;
	k = 0;
	for (j = 1; j <= 16; ++j)
	{
		// compute delta to add to code to compute symbol id
		h->delta[j] = k - code;
		if (h->size[k] == j)
		{
			while (h->size[k] == j) h->code[k++] = (__stbi_uint16)(code++);
			if (code - 1 >= (1u << j)) return __stbi_err("bad code lengths", "Corrupt JPEG");
		}
		// compute largest code + 1 for this size, preshifted as needed later
		h->maxcode[j] = code << (16 - j);
		code <<= 1;
	}
	h->maxcode[j] = 0xffffffff;

	// build non-spec acceleration table; 255 is flag for not-accelerated
	memset(h->fast, 255, 1 << FAST_BITS);
	for (i = 0; i < k; ++i)
	{
		int s = h->size[i];
		if (s <= FAST_BITS)
		{
			int c = h->code[i] << (FAST_BITS - s);
			int m = 1 << (FAST_BITS - s);
			for (j = 0; j < m; ++j)
			{
				h->fast[c + j] = (stbi_uc)i;
			}
		}
	}
	return 1;
}

// build a table that decodes both magnitude and value of small ACs in
// one go.
static void __stbi_build_fast_ac(__stbi_int16* fast_ac, __stbi_huffman* h)
{
	int i;
	for (i = 0; i < (1 << FAST_BITS); ++i)
	{
		stbi_uc fast = h->fast[i];
		fast_ac[i] = 0;
		if (fast < 255)
		{
			int rs = h->values[fast];
			int run = (rs >> 4) & 15;
			int magbits = rs & 15;
			int len = h->size[fast];

			if (magbits && len + magbits <= FAST_BITS)
			{
				// magnitude code followed by receive_extend code
				int k = ((i << len) & ((1 << FAST_BITS) - 1)) >> (FAST_BITS - magbits);
				int m = 1 << (magbits - 1);
				if (k < m) k += (~0U << magbits) + 1;
				// if the result is small enough, we can fit it in fast_ac table
				if (k >= -128 && k <= 127) fast_ac[i] = (__stbi_int16)((k * 256) + (run * 16) + (len + magbits));
			}
		}
	}
}

static void __stbi_grow_buffer_unsafe(__stbi_jpeg* j)
{
	do
	{
		unsigned int b = j->nomore ? 0 : __stbi_get8(j->s);
		if (b == 0xff)
		{
			int c = __stbi_get8(j->s);
			while (c == 0xff) c = __stbi_get8(j->s); // consume fill bytes
			if (c != 0)
			{
				j->marker = (unsigned char)c;
				j->nomore = 1;
				return;
			}
		}
		j->code_buffer |= b << (24 - j->code_bits);
		j->code_bits += 8;
	} while (j->code_bits <= 24);
}

// (1 << n) - 1
static const __stbi_uint32 __stbi_bmask[17] = { 0, 1, 3, 7, 15, 31, 63, 127, 255, 511, 1023, 2047, 4095, 8191, 16383, 32767, 65535 };

// decode a jpeg huffman value from the bitstream
stbi_inline static int __stbi_jpeg_huff_decode(__stbi_jpeg* j, __stbi_huffman* h)
{
	unsigned int temp;
	int c, k;

	if (j->code_bits < 16) __stbi_grow_buffer_unsafe(j);

	// look at the top FAST_BITS and determine what symbol ID it is,
	// if the code is <= FAST_BITS
	c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS) - 1);
	k = h->fast[c];
	if (k < 255)
	{
		int s = h->size[k];
		if (s > j->code_bits) return -1;
		j->code_buffer <<= s;
		j->code_bits -= s;
		return h->values[k];
	}

	// naive test is to shift the code_buffer down so k bits are
	// valid, then test against maxcode. To speed this up, we've
	// preshifted maxcode left so that it has (16-k) 0s at the
	// end; in other words, regardless of the number of bits, it
	// wants to be compared against something shifted to have 16;
	// that way we don't need to shift inside the loop.
	temp = j->code_buffer >> 16;
	for (k = FAST_BITS + 1;; ++k)
		if (temp < h->maxcode[k]) break;
	if (k == 17)
	{
		// error! code not found
		j->code_bits -= 16;
		return -1;
	}

	if (k > j->code_bits) return -1;

	// convert the huffman code to the symbol id
	c = ((j->code_buffer >> (32 - k)) & __stbi_bmask[k]) + h->delta[k];
	STBI_ASSERT((((j->code_buffer) >> (32 - h->size[c])) & __stbi_bmask[h->size[c]]) == h->code[c]);

	// convert the id to a symbol
	j->code_bits -= k;
	j->code_buffer <<= k;
	return h->values[c];
}

// bias[n] = (-1<<n) + 1
static const int __stbi_jbias[16] = { 0, -1, -3, -7, -15, -31, -63, -127, -255, -511, -1023, -2047, -4095, -8191, -16383, -32767 };

// combined JPEG 'receive' and JPEG 'extend', since baseline
// always extends everything it receives.
stbi_inline static int __stbi_extend_receive(__stbi_jpeg* j, int n)
{
	unsigned int k;
	int sgn;
	if (j->code_bits < n) __stbi_grow_buffer_unsafe(j);

	sgn = (__stbi_int32)j->code_buffer >> 31; // sign bit is always in MSB
	k = stbi_lrot(j->code_buffer, n);
	if (n < 0 || n >= (int)(sizeof(__stbi_bmask) / sizeof(*__stbi_bmask))) return 0;
	j->code_buffer = k & ~__stbi_bmask[n];
	k &= __stbi_bmask[n];
	j->code_bits -= n;
	return k + (__stbi_jbias[n] & ~sgn);
}

// Get some unsigned bits
stbi_inline static int __stbi_jpeg_get_bits(__stbi_jpeg* j, int n)
{
	unsigned int k;
	if (j->code_bits < n) __stbi_grow_buffer_unsafe(j);
	k = stbi_lrot(j->code_buffer, n);
	j->code_buffer = k & ~__stbi_bmask[n];
	k &= __stbi_bmask[n];
	j->code_bits -= n;
	return k;
}

stbi_inline static int __stbi_jpeg_get_bit(__stbi_jpeg* j)
{
	unsigned int k;
	if (j->code_bits < 1) __stbi_grow_buffer_unsafe(j);
	k = j->code_buffer;
	j->code_buffer <<= 1;
	--j->code_bits;
	return k & 0x80000000;
}

// given a value that's at position X in the zigzag stream,
// where does it appear in the 8x8 matrix coded as row-major?
static const stbi_uc __stbi_jpeg_dezigzag[64 + 15] = { 0, 1, 8, 16, 9, 2, 3, 10, 17, 24, 32, 25, 18, 11, 4, 5, 12, 19, 26, 33, 40, 48, 41, 34, 27, 20, 13, 6, 7, 14, 21, 28, 35, 42, 49, 56, 57, 50, 43,
	36, 29, 22, 15, 23, 30, 37, 44, 51, 58, 59, 52, 45, 38, 31, 39, 46, 53, 60, 61, 54, 47, 55, 62, 63,
	// let corrupt input sample past end
	63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63 };

// decode one 64-entry block--
static int __stbi_jpeg_decode_block(__stbi_jpeg* j, short value[64], __stbi_huffman* hdc, __stbi_huffman* hac, __stbi_int16* fac, int b, __stbi_uint16* dequant)
{
	int diff, dc, k;
	int t;

	if (j->code_bits < 16) __stbi_grow_buffer_unsafe(j);
	t = __stbi_jpeg_huff_decode(j, hdc);
	if (t < 0) return __stbi_err("bad huffman code", "Corrupt JPEG");

	// 0 all the ac values now so we can do it 32-bits at a time
	memset(value, 0, 64 * sizeof(value[0]));

	diff = t ? __stbi_extend_receive(j, t) : 0;
	dc = j->img_comp[b].dc_pred + diff;
	j->img_comp[b].dc_pred = dc;
	value[0] = (short)(dc * dequant[0]);

	// decode AC components, see JPEG spec
	k = 1;
	do
	{
		unsigned int zig;
		int c, r, s;
		if (j->code_bits < 16) __stbi_grow_buffer_unsafe(j);
		c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS) - 1);
		r = fac[c];
		if (r)
		{ // fast-AC path
			k += (r >> 4) & 15; // run
			s = r & 15; // combined length
			j->code_buffer <<= s;
			j->code_bits -= s;
			// decode into unzigzag'd location
			zig = __stbi_jpeg_dezigzag[k++];
			value[zig] = (short)((r >> 8) * dequant[zig]);
		}
		else
		{
			int rs = __stbi_jpeg_huff_decode(j, hac);
			if (rs < 0) return __stbi_err("bad huffman code", "Corrupt JPEG");
			s = rs & 15;
			r = rs >> 4;
			if (s == 0)
			{
				if (rs != 0xf0) break; // end block
				k += 16;
			}
			else
			{
				k += r;
				// decode into unzigzag'd location
				zig = __stbi_jpeg_dezigzag[k++];
				value[zig] = (short)(__stbi_extend_receive(j, s) * dequant[zig]);
			}
		}
	} while (k < 64);
	return 1;
}

static int __stbi_jpeg_decode_block_prog_dc(__stbi_jpeg* j, short value[64], __stbi_huffman* hdc, int b)
{
	int diff, dc;
	int t;
	if (j->spec_end != 0) return __stbi_err("can't merge dc and ac", "Corrupt JPEG");

	if (j->code_bits < 16) __stbi_grow_buffer_unsafe(j);

	if (j->succ_high == 0)
	{
		// first scan for DC coefficient, __zen_must be first
		memset(value, 0, 64 * sizeof(value[0])); // 0 all the ac values now
		t = __stbi_jpeg_huff_decode(j, hdc);
		if (t == -1) return __stbi_err("can't merge dc and ac", "Corrupt JPEG");
		diff = t ? __stbi_extend_receive(j, t) : 0;

		dc = j->img_comp[b].dc_pred + diff;
		j->img_comp[b].dc_pred = dc;
		value[0] = (short)(dc << j->succ_low);
	}
	else
	{
		// refinement scan for DC coefficient
		if (__stbi_jpeg_get_bit(j)) value[0] += (short)(1 << j->succ_low);
	}
	return 1;
}

// @OPTIMIZE: store non-zigzagged during the decode passes,
// and only de-zigzag when dequantizing
static int __stbi_jpeg_decode_block_prog_ac(__stbi_jpeg* j, short value[64], __stbi_huffman* hac, __stbi_int16* fac)
{
	int k;
	if (j->spec_start == 0) return __stbi_err("can't merge dc and ac", "Corrupt JPEG");

	if (j->succ_high == 0)
	{
		int shift = j->succ_low;

		if (j->eob_run)
		{
			--j->eob_run;
			return 1;
		}

		k = j->spec_start;
		do
		{
			unsigned int zig;
			int c, r, s;
			if (j->code_bits < 16) __stbi_grow_buffer_unsafe(j);
			c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS) - 1);
			r = fac[c];
			if (r)
			{ // fast-AC path
				k += (r >> 4) & 15; // run
				s = r & 15; // combined length
				j->code_buffer <<= s;
				j->code_bits -= s;
				zig = __stbi_jpeg_dezigzag[k++];
				value[zig] = (short)((r >> 8) << shift);
			}
			else
			{
				int rs = __stbi_jpeg_huff_decode(j, hac);
				if (rs < 0) return __stbi_err("bad huffman code", "Corrupt JPEG");
				s = rs & 15;
				r = rs >> 4;
				if (s == 0)
				{
					if (r < 15)
					{
						j->eob_run = (1 << r);
						if (r) j->eob_run += __stbi_jpeg_get_bits(j, r);
						--j->eob_run;
						break;
					}
					k += 16;
				}
				else
				{
					k += r;
					zig = __stbi_jpeg_dezigzag[k++];
					value[zig] = (short)(__stbi_extend_receive(j, s) << shift);
				}
			}
		} while (k <= j->spec_end);
	}
	else
	{
		// refinement scan for these AC coefficients

		short bit = (short)(1 << j->succ_low);

		if (j->eob_run)
		{
			--j->eob_run;
			for (k = j->spec_start; k <= j->spec_end; ++k)
			{
				short* p = &value[__stbi_jpeg_dezigzag[k]];
				if (*p != 0)
					if (__stbi_jpeg_get_bit(j))
						if ((*p & bit) == 0)
						{
							if (*p > 0)
								*p += bit;
							else
								*p -= bit;
						}
			}
		}
		else
		{
			k = j->spec_start;
			do
			{
				int r, s;
				int rs = __stbi_jpeg_huff_decode(j, hac); // @OPTIMIZE see if we can use the fast path here, advance-by-r is so slow, eh
				if (rs < 0) return __stbi_err("bad huffman code", "Corrupt JPEG");
				s = rs & 15;
				r = rs >> 4;
				if (s == 0)
				{
					if (r < 15)
					{
						j->eob_run = (1 << r) - 1;
						if (r) j->eob_run += __stbi_jpeg_get_bits(j, r);
						r = 64; // force end of block
					}
					else
					{
						// r=15 s=0 should write 16 0s, so we just do
						// a run of 15 0s and then write s (which is 0),
						// so we don't have to do anything special here
					}
				}
				else
				{
					if (s != 1) return __stbi_err("bad huffman code", "Corrupt JPEG");
					// sign bit
					if (__stbi_jpeg_get_bit(j))
						s = bit;
					else
						s = -bit;
				}

				// advance by r
				while (k <= j->spec_end)
				{
					short* p = &value[__stbi_jpeg_dezigzag[k++]];
					if (*p != 0)
					{
						if (__stbi_jpeg_get_bit(j))
							if ((*p & bit) == 0)
							{
								if (*p > 0)
									*p += bit;
								else
									*p -= bit;
							}
					}
					else
					{
						if (r == 0)
						{
							*p = (short)s;
							break;
						}
						--r;
					}
				}
			} while (k <= j->spec_end);
		}
	}
	return 1;
}

// take a -128..127 value and __stbi_clamp it and convert to 0..255
stbi_inline static stbi_uc __stbi_clamp(int x)
{
	// trick to use a single test to catch both cases
	if ((unsigned int)x > 255)
	{
		if (x < 0) return 0;
		if (x > 255) return 255;
	}
	return (stbi_uc)x;
}

#define __stbi_f2f(x) ((int)(((x)*4096 + 0.5)))
#define __stbi_fsh(x) ((x)*4096)

// derived from jidctint -- DCT_ISLOW
#define STBI__IDCT_1D(s0, s1, s2, s3, s4, s5, s6, s7)       \
	int t0, t1, t2, t3, p1, p2, p3, p4, p5, x0, x1, x2, x3; \
	p2 = s2;                                                \
	p3 = s6;                                                \
	p1 = (p2 + p3) * __stbi_f2f(0.5411961f);                \
	t2 = p1 + p3 * __stbi_f2f(-1.847759065f);               \
	t3 = p1 + p2 * __stbi_f2f(0.765366865f);                \
	p2 = s0;                                                \
	p3 = s4;                                                \
	t0 = __stbi_fsh(p2 + p3);                               \
	t1 = __stbi_fsh(p2 - p3);                               \
	x0 = t0 + t3;                                           \
	x3 = t0 - t3;                                           \
	x1 = t1 + t2;                                           \
	x2 = t1 - t2;                                           \
	t0 = s7;                                                \
	t1 = s5;                                                \
	t2 = s3;                                                \
	t3 = s1;                                                \
	p3 = t0 + t2;                                           \
	p4 = t1 + t3;                                           \
	p1 = t0 + t3;                                           \
	p2 = t1 + t2;                                           \
	p5 = (p3 + p4) * __stbi_f2f(1.175875602f);              \
	t0 = t0 * __stbi_f2f(0.298631336f);                     \
	t1 = t1 * __stbi_f2f(2.053119869f);                     \
	t2 = t2 * __stbi_f2f(3.072711026f);                     \
	t3 = t3 * __stbi_f2f(1.501321110f);                     \
	p1 = p5 + p1 * __stbi_f2f(-0.899976223f);               \
	p2 = p5 + p2 * __stbi_f2f(-2.562915447f);               \
	p3 = p3 * __stbi_f2f(-1.961570560f);                    \
	p4 = p4 * __stbi_f2f(-0.390180644f);                    \
	t3 += p1 + p4;                                          \
	t2 += p2 + p3;                                          \
	t1 += p2 + p4;                                          \
	t0 += p1 + p3;

static void __stbi_idct_block(stbi_uc* out, int out_stride, short value[64])
{
	int i{}, val[64]{}, *v = val;
	stbi_uc* o{};
	short* d = value;

	// columns
	for (i = 0; i < 8; ++i, ++d, ++v)
	{
		// if all zeroes, shortcut -- this avoids dequantizing 0s and IDCTing
		if (d[8] == 0 && d[16] == 0 && d[24] == 0 && d[32] == 0 && d[40] == 0 && d[48] == 0 && d[56] == 0)
		{
			//    no shortcut                 0     seconds
			//    (1|2|3|4|5|6|7)==0          0     seconds
			//    all separate               -0.047 seconds
			//    1 && 2|3 && 4|5 && 6|7:    -0.047 seconds
			int dcterm = d[0] * 4;
			v[0] = v[8] = v[16] = v[24] = v[32] = v[40] = v[48] = v[56] = dcterm;
		}
		else
		{
			STBI__IDCT_1D(d[0], d[8], d[16], d[24], d[32], d[40], d[48], d[56])
			// constants scaled things up by 1<<12; let's bring them back
			// down, but keep 2 extra bits of precision
			x0 += 512;
			x1 += 512;
			x2 += 512;
			x3 += 512;
			v[0] = (x0 + t3) >> 10;
			v[56] = (x0 - t3) >> 10;
			v[8] = (x1 + t2) >> 10;
			v[48] = (x1 - t2) >> 10;
			v[16] = (x2 + t1) >> 10;
			v[40] = (x2 - t1) >> 10;
			v[24] = (x3 + t0) >> 10;
			v[32] = (x3 - t0) >> 10;
		}
	}

	for (i = 0, v = val, o = out; i < 8; ++i, v += 8, o += out_stride)
	{
		// no fast case since the first 1D IDCT spread components out
		STBI__IDCT_1D(v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7])
		// constants scaled things up by 1<<12, plus we had 1<<2 from first
		// loop, plus horizontal and vertical each scale by sqrt(8) so together
		// we've got an extra 1<<3, so 1<<17 total we need to remove.
		// so we want to round that, which means adding 0.5 * 1<<17,
		// aka 65536. Also, we'll end up with -128 to 127 that we want
		// to encode as 0..255 by adding 128, so we'll add that before the shift
		x0 += 65536 + (128 << 17);
		x1 += 65536 + (128 << 17);
		x2 += 65536 + (128 << 17);
		x3 += 65536 + (128 << 17);
		// tried computing the shifts into temps, or'ing the temps to see
		// if any were out of range, but that was slower
		o[0] = __stbi_clamp((x0 + t3) >> 17);
		o[7] = __stbi_clamp((x0 - t3) >> 17);
		o[1] = __stbi_clamp((x1 + t2) >> 17);
		o[6] = __stbi_clamp((x1 - t2) >> 17);
		o[2] = __stbi_clamp((x2 + t1) >> 17);
		o[5] = __stbi_clamp((x2 - t1) >> 17);
		o[3] = __stbi_clamp((x3 + t0) >> 17);
		o[4] = __stbi_clamp((x3 - t0) >> 17);
	}
}

#ifdef STBI_SSE2
// sse2 integer IDCT. not the fastest possible implementation but it
// produces bit-identical results to the generic C version so it's
// fully "transparent".
static void __stbi_idct_simd(stbi_uc* out, int out_stride, short value[64])
{
	// This is constructed to match our regular (generic) integer IDCT exactly.
	__m128i row0, row1, row2, row3, row4, row5, row6, row7;
	__m128i tmp;

	// dot product constant: even elems=x, odd elems=y
#define dct_const(x, y) _mm_setr_epi16((x), (y), (x), (y), (x), (y), (x), (y))

	// out(0) = c0[even]*x + c0[odd]*y   (c0, x, y 16-bit, out 32-bit)
	// out(1) = c1[even]*x + c1[odd]*y
#define dct_rot(out0, out1, x, y, c0, c1)          \
	__m128i c0##lo = _mm_unpacklo_epi16((x), (y)); \
	__m128i c0##hi = _mm_unpackhi_epi16((x), (y)); \
	__m128i out0##_l = _mm_madd_epi16(c0##lo, c0); \
	__m128i out0##_h = _mm_madd_epi16(c0##hi, c0); \
	__m128i out1##_l = _mm_madd_epi16(c0##lo, c1); \
	__m128i out1##_h = _mm_madd_epi16(c0##hi, c1)

	// out = in << 12  (in 16-bit, out 32-bit)
#define dct_widen(out, in)                                                              \
	__m128i out##_l = _mm_srai_epi32(_mm_unpacklo_epi16(_mm_setzero_si128(), (in)), 4); \
	__m128i out##_h = _mm_srai_epi32(_mm_unpackhi_epi16(_mm_setzero_si128(), (in)), 4)

	// wide add
#define dct_wadd(out, a, b)                        \
	__m128i out##_l = _mm_add_epi32(a##_l, b##_l); \
	__m128i out##_h = _mm_add_epi32(a##_h, b##_h)

	// wide sub
#define dct_wsub(out, a, b)                        \
	__m128i out##_l = _mm_sub_epi32(a##_l, b##_l); \
	__m128i out##_h = _mm_sub_epi32(a##_h, b##_h)

	// butterfly a/b, add bias, then shift by "s" and pack
#define dct_bfly32o(out0, out1, a, b, bias, s)                                      \
	{                                                                               \
		__m128i abiased_l = _mm_add_epi32(a##_l, bias);                             \
		__m128i abiased_h = _mm_add_epi32(a##_h, bias);                             \
		dct_wadd(sum, abiased, b);                                                  \
		dct_wsub(dif, abiased, b);                                                  \
		out0 = _mm_packs_epi32(_mm_srai_epi32(sum_l, s), _mm_srai_epi32(sum_h, s)); \
		out1 = _mm_packs_epi32(_mm_srai_epi32(dif_l, s), _mm_srai_epi32(dif_h, s)); \
	}

	// 8-bit interleave step (for transposes)
#define dct_interleave8(a, b)    \
	tmp = a;                     \
	a = _mm_unpacklo_epi8(a, b); \
	b = _mm_unpackhi_epi8(tmp, b)

	// 16-bit interleave step (for transposes)
#define dct_interleave16(a, b)    \
	tmp = a;                      \
	a = _mm_unpacklo_epi16(a, b); \
	b = _mm_unpackhi_epi16(tmp, b)

#define dct_pass(bias, shift)                            \
	{                                                    \
		/* even part */                                  \
		dct_rot(t2e, t3e, row2, row6, rot0_0, rot0_1);   \
		__m128i sum04 = _mm_add_epi16(row0, row4);       \
		__m128i dif04 = _mm_sub_epi16(row0, row4);       \
		dct_widen(t0e, sum04);                           \
		dct_widen(t1e, dif04);                           \
		dct_wadd(x0, t0e, t3e);                          \
		dct_wsub(x3, t0e, t3e);                          \
		dct_wadd(x1, t1e, t2e);                          \
		dct_wsub(x2, t1e, t2e);                          \
		/* odd part */                                   \
		dct_rot(y0o, y2o, row7, row3, rot2_0, rot2_1);   \
		dct_rot(y1o, y3o, row5, row1, rot3_0, rot3_1);   \
		__m128i sum17 = _mm_add_epi16(row1, row7);       \
		__m128i sum35 = _mm_add_epi16(row3, row5);       \
		dct_rot(y4o, y5o, sum17, sum35, rot1_0, rot1_1); \
		dct_wadd(x4, y0o, y4o);                          \
		dct_wadd(x5, y1o, y5o);                          \
		dct_wadd(x6, y2o, y5o);                          \
		dct_wadd(x7, y3o, y4o);                          \
		dct_bfly32o(row0, row7, x0, x7, bias, shift);    \
		dct_bfly32o(row1, row6, x1, x6, bias, shift);    \
		dct_bfly32o(row2, row5, x2, x5, bias, shift);    \
		dct_bfly32o(row3, row4, x3, x4, bias, shift);    \
	}

	__m128i rot0_0 = dct_const(__stbi_f2f(0.5411961f), __stbi_f2f(0.5411961f) + __stbi_f2f(-1.847759065f));
	__m128i rot0_1 = dct_const(__stbi_f2f(0.5411961f) + __stbi_f2f(0.765366865f), __stbi_f2f(0.5411961f));
	__m128i rot1_0 = dct_const(__stbi_f2f(1.175875602f) + __stbi_f2f(-0.899976223f), __stbi_f2f(1.175875602f));
	__m128i rot1_1 = dct_const(__stbi_f2f(1.175875602f), __stbi_f2f(1.175875602f) + __stbi_f2f(-2.562915447f));
	__m128i rot2_0 = dct_const(__stbi_f2f(-1.961570560f) + __stbi_f2f(0.298631336f), __stbi_f2f(-1.961570560f));
	__m128i rot2_1 = dct_const(__stbi_f2f(-1.961570560f), __stbi_f2f(-1.961570560f) + __stbi_f2f(3.072711026f));
	__m128i rot3_0 = dct_const(__stbi_f2f(-0.390180644f) + __stbi_f2f(2.053119869f), __stbi_f2f(-0.390180644f));
	__m128i rot3_1 = dct_const(__stbi_f2f(-0.390180644f), __stbi_f2f(-0.390180644f) + __stbi_f2f(1.501321110f));

	// rounding biases in column/row passes, see __stbi_idct_block for explanation.
	__m128i bias_0 = _mm_set1_epi32(512);
	__m128i bias_1 = _mm_set1_epi32(65536 + (128 << 17));

	// load
	row0 = _mm_load_si128((const __m128i*)(value + 0 * 8));
	row1 = _mm_load_si128((const __m128i*)(value + 1 * 8));
	row2 = _mm_load_si128((const __m128i*)(value + 2 * 8));
	row3 = _mm_load_si128((const __m128i*)(value + 3 * 8));
	row4 = _mm_load_si128((const __m128i*)(value + 4 * 8));
	row5 = _mm_load_si128((const __m128i*)(value + 5 * 8));
	row6 = _mm_load_si128((const __m128i*)(value + 6 * 8));
	row7 = _mm_load_si128((const __m128i*)(value + 7 * 8));

	// column pass
	dct_pass(bias_0, 10);

	{
		// 16bit 8x8 transpose pass 1
		dct_interleave16(row0, row4);
		dct_interleave16(row1, row5);
		dct_interleave16(row2, row6);
		dct_interleave16(row3, row7);

		// transpose pass 2
		dct_interleave16(row0, row2);
		dct_interleave16(row1, row3);
		dct_interleave16(row4, row6);
		dct_interleave16(row5, row7);

		// transpose pass 3
		dct_interleave16(row0, row1);
		dct_interleave16(row2, row3);
		dct_interleave16(row4, row5);
		dct_interleave16(row6, row7);
	}

	// row pass
	dct_pass(bias_1, 17);

	{
		// pack
		__m128i p0 = _mm_packus_epi16(row0, row1); // a0a1a2a3...a7b0b1b2b3...b7
		__m128i p1 = _mm_packus_epi16(row2, row3);
		__m128i p2 = _mm_packus_epi16(row4, row5);
		__m128i p3 = _mm_packus_epi16(row6, row7);

		// 8bit 8x8 transpose pass 1
		dct_interleave8(p0, p2); // a0e0a1e1...
		dct_interleave8(p1, p3); // c0g0c1g1...

		// transpose pass 2
		dct_interleave8(p0, p1); // a0c0e0g0...
		dct_interleave8(p2, p3); // b0d0f0h0...

		// transpose pass 3
		dct_interleave8(p0, p2); // a0b0c0d0...
		dct_interleave8(p1, p3); // a4b4c4d4...

		// store
		_mm_storel_epi64((__m128i*)out, p0);
		out += out_stride;
		_mm_storel_epi64((__m128i*)out, _mm_shuffle_epi32(p0, 0x4e));
		out += out_stride;
		_mm_storel_epi64((__m128i*)out, p2);
		out += out_stride;
		_mm_storel_epi64((__m128i*)out, _mm_shuffle_epi32(p2, 0x4e));
		out += out_stride;
		_mm_storel_epi64((__m128i*)out, p1);
		out += out_stride;
		_mm_storel_epi64((__m128i*)out, _mm_shuffle_epi32(p1, 0x4e));
		out += out_stride;
		_mm_storel_epi64((__m128i*)out, p3);
		out += out_stride;
		_mm_storel_epi64((__m128i*)out, _mm_shuffle_epi32(p3, 0x4e));
	}

#undef dct_const
#undef dct_rot
#undef dct_widen
#undef dct_wadd
#undef dct_wsub
#undef dct_bfly32o
#undef dct_interleave8
#undef dct_interleave16
#undef dct_pass
}

#endif // STBI_SSE2

#ifdef STBI_NEON

// NEON integer IDCT. should produce bit-identical
// results to the generic C version.
static void __stbi_idct_simd(stbi_uc* out, int out_stride, short value[64])
{
	int16x8_t row0, row1, row2, row3, row4, row5, row6, row7;

	int16x4_t rot0_0 = vdup_n_s16(__stbi_f2f(0.5411961f));
	int16x4_t rot0_1 = vdup_n_s16(__stbi_f2f(-1.847759065f));
	int16x4_t rot0_2 = vdup_n_s16(__stbi_f2f(0.765366865f));
	int16x4_t rot1_0 = vdup_n_s16(__stbi_f2f(1.175875602f));
	int16x4_t rot1_1 = vdup_n_s16(__stbi_f2f(-0.899976223f));
	int16x4_t rot1_2 = vdup_n_s16(__stbi_f2f(-2.562915447f));
	int16x4_t rot2_0 = vdup_n_s16(__stbi_f2f(-1.961570560f));
	int16x4_t rot2_1 = vdup_n_s16(__stbi_f2f(-0.390180644f));
	int16x4_t rot3_0 = vdup_n_s16(__stbi_f2f(0.298631336f));
	int16x4_t rot3_1 = vdup_n_s16(__stbi_f2f(2.053119869f));
	int16x4_t rot3_2 = vdup_n_s16(__stbi_f2f(3.072711026f));
	int16x4_t rot3_3 = vdup_n_s16(__stbi_f2f(1.501321110f));

#define dct_long_mul(out, inq, coeff)                        \
	int32x4_t out##_l = vmull_s16(vget_low_s16(inq), coeff); \
	int32x4_t out##_h = vmull_s16(vget_high_s16(inq), coeff)

#define dct_long_mac(out, acc, inq, coeff)                            \
	int32x4_t out##_l = vmlal_s16(acc##_l, vget_low_s16(inq), coeff); \
	int32x4_t out##_h = vmlal_s16(acc##_h, vget_high_s16(inq), coeff)

#define dct_widen(out, inq)                                 \
	int32x4_t out##_l = vshll_n_s16(vget_low_s16(inq), 12); \
	int32x4_t out##_h = vshll_n_s16(vget_high_s16(inq), 12)

	// wide add
#define dct_wadd(out, a, b)                      \
	int32x4_t out##_l = vaddq_s32(a##_l, b##_l); \
	int32x4_t out##_h = vaddq_s32(a##_h, b##_h)

	// wide sub
#define dct_wsub(out, a, b)                      \
	int32x4_t out##_l = vsubq_s32(a##_l, b##_l); \
	int32x4_t out##_h = vsubq_s32(a##_h, b##_h)

	// butterfly a/b, then shift using "shiftop" by "s" and pack
#define dct_bfly32o(out0, out1, a, b, shiftop, s)                  \
	{                                                              \
		dct_wadd(sum, a, b);                                       \
		dct_wsub(dif, a, b);                                       \
		out0 = vcombine_s16(shiftop(sum_l, s), shiftop(sum_h, s)); \
		out1 = vcombine_s16(shiftop(dif_l, s), shiftop(dif_h, s)); \
	}

#define dct_pass(shiftop, shift)                         \
	{                                                    \
		/* even part */                                  \
		int16x8_t sum26 = vaddq_s16(row2, row6);         \
		dct_long_mul(p1e, sum26, rot0_0);                \
		dct_long_mac(t2e, p1e, row6, rot0_1);            \
		dct_long_mac(t3e, p1e, row2, rot0_2);            \
		int16x8_t sum04 = vaddq_s16(row0, row4);         \
		int16x8_t dif04 = vsubq_s16(row0, row4);         \
		dct_widen(t0e, sum04);                           \
		dct_widen(t1e, dif04);                           \
		dct_wadd(x0, t0e, t3e);                          \
		dct_wsub(x3, t0e, t3e);                          \
		dct_wadd(x1, t1e, t2e);                          \
		dct_wsub(x2, t1e, t2e);                          \
		/* odd part */                                   \
		int16x8_t sum15 = vaddq_s16(row1, row5);         \
		int16x8_t sum17 = vaddq_s16(row1, row7);         \
		int16x8_t sum35 = vaddq_s16(row3, row5);         \
		int16x8_t sum37 = vaddq_s16(row3, row7);         \
		int16x8_t sumodd = vaddq_s16(sum17, sum35);      \
		dct_long_mul(p5o, sumodd, rot1_0);               \
		dct_long_mac(p1o, p5o, sum17, rot1_1);           \
		dct_long_mac(p2o, p5o, sum35, rot1_2);           \
		dct_long_mul(p3o, sum37, rot2_0);                \
		dct_long_mul(p4o, sum15, rot2_1);                \
		dct_wadd(sump13o, p1o, p3o);                     \
		dct_wadd(sump24o, p2o, p4o);                     \
		dct_wadd(sump23o, p2o, p3o);                     \
		dct_wadd(sump14o, p1o, p4o);                     \
		dct_long_mac(x4, sump13o, row7, rot3_0);         \
		dct_long_mac(x5, sump24o, row5, rot3_1);         \
		dct_long_mac(x6, sump23o, row3, rot3_2);         \
		dct_long_mac(x7, sump14o, row1, rot3_3);         \
		dct_bfly32o(row0, row7, x0, x7, shiftop, shift); \
		dct_bfly32o(row1, row6, x1, x6, shiftop, shift); \
		dct_bfly32o(row2, row5, x2, x5, shiftop, shift); \
		dct_bfly32o(row3, row4, x3, x4, shiftop, shift); \
	}

	// load
	row0 = vld1q_s16(value + 0 * 8);
	row1 = vld1q_s16(value + 1 * 8);
	row2 = vld1q_s16(value + 2 * 8);
	row3 = vld1q_s16(value + 3 * 8);
	row4 = vld1q_s16(value + 4 * 8);
	row5 = vld1q_s16(value + 5 * 8);
	row6 = vld1q_s16(value + 6 * 8);
	row7 = vld1q_s16(value + 7 * 8);

	// add DC bias
	row0 = vaddq_s16(row0, vsetq_lane_s16(1024, vdupq_n_s16(0), 0));

	// column pass
	dct_pass(vrshrn_n_s32, 10);

	// 16bit 8x8 transpose
	{
		// these three map to a single VTRN.16, VTRN.32, and VSWP, respectively.
		// whether compilers actually Get this is another story, sadly.
#define dct_trn16(x, y)                  \
	{                                    \
		int16x8x2_t t = vtrnq_s16(x, y); \
		x = t.val[0];                    \
		y = t.val[1];                    \
	}
#define dct_trn32(x, y)                                                                \
	{                                                                                  \
		int32x4x2_t t = vtrnq_s32(vreinterpretq_s32_s16(x), vreinterpretq_s32_s16(y)); \
		x = vreinterpretq_s16_s32(t.val[0]);                                           \
		y = vreinterpretq_s16_s32(t.val[1]);                                           \
	}
#define dct_trn64(x, y)                                         \
	{                                                           \
		int16x8_t x0 = x;                                       \
		int16x8_t y0 = y;                                       \
		x = vcombine_s16(vget_low_s16(x0), vget_low_s16(y0));   \
		y = vcombine_s16(vget_high_s16(x0), vget_high_s16(y0)); \
	}

		// pass 1
		dct_trn16(row0, row1); // a0b0a2b2a4b4a6b6
		dct_trn16(row2, row3);
		dct_trn16(row4, row5);
		dct_trn16(row6, row7);

		// pass 2
		dct_trn32(row0, row2); // a0b0c0d0a4b4c4d4
		dct_trn32(row1, row3);
		dct_trn32(row4, row6);
		dct_trn32(row5, row7);

		// pass 3
		dct_trn64(row0, row4); // a0b0c0d0e0f0g0h0
		dct_trn64(row1, row5);
		dct_trn64(row2, row6);
		dct_trn64(row3, row7);

#undef dct_trn16
#undef dct_trn32
#undef dct_trn64
	}

	// row pass
	// vrshrn_n_s32 only supports shifts up to 16, we need
	// 17. so do a non-rounding shift of 16 first then follow
	// up with a rounding shift by 1.
	dct_pass(vshrn_n_s32, 16);

	{
		// pack and round
		uint8x8_t p0 = vqrshrun_n_s16(row0, 1);
		uint8x8_t p1 = vqrshrun_n_s16(row1, 1);
		uint8x8_t p2 = vqrshrun_n_s16(row2, 1);
		uint8x8_t p3 = vqrshrun_n_s16(row3, 1);
		uint8x8_t p4 = vqrshrun_n_s16(row4, 1);
		uint8x8_t p5 = vqrshrun_n_s16(row5, 1);
		uint8x8_t p6 = vqrshrun_n_s16(row6, 1);
		uint8x8_t p7 = vqrshrun_n_s16(row7, 1);

		// again, these can translate into one instruction, but often don't.
#define dct_trn8_8(x, y)               \
	{                                  \
		uint8x8x2_t t = vtrn_u8(x, y); \
		x = t.val[0];                  \
		y = t.val[1];                  \
	}
#define dct_trn8_16(x, y)                                                          \
	{                                                                              \
		uint16x4x2_t t = vtrn_u16(vreinterpret_u16_u8(x), vreinterpret_u16_u8(y)); \
		x = vreinterpret_u8_u16(t.val[0]);                                         \
		y = vreinterpret_u8_u16(t.val[1]);                                         \
	}
#define dct_trn8_32(x, y)                                                          \
	{                                                                              \
		uint32x2x2_t t = vtrn_u32(vreinterpret_u32_u8(x), vreinterpret_u32_u8(y)); \
		x = vreinterpret_u8_u32(t.val[0]);                                         \
		y = vreinterpret_u8_u32(t.val[1]);                                         \
	}

		// sadly can't use interleaved stores here since we only write
		// 8 bytes to each scan line!

		// 8x8 8-bit transpose pass 1
		dct_trn8_8(p0, p1);
		dct_trn8_8(p2, p3);
		dct_trn8_8(p4, p5);
		dct_trn8_8(p6, p7);

		// pass 2
		dct_trn8_16(p0, p2);
		dct_trn8_16(p1, p3);
		dct_trn8_16(p4, p6);
		dct_trn8_16(p5, p7);

		// pass 3
		dct_trn8_32(p0, p4);
		dct_trn8_32(p1, p5);
		dct_trn8_32(p2, p6);
		dct_trn8_32(p3, p7);

		// store
		vst1_u8(out, p0);
		out += out_stride;
		vst1_u8(out, p1);
		out += out_stride;
		vst1_u8(out, p2);
		out += out_stride;
		vst1_u8(out, p3);
		out += out_stride;
		vst1_u8(out, p4);
		out += out_stride;
		vst1_u8(out, p5);
		out += out_stride;
		vst1_u8(out, p6);
		out += out_stride;
		vst1_u8(out, p7);

#undef dct_trn8_8
#undef dct_trn8_16
#undef dct_trn8_32
	}

#undef dct_long_mul
#undef dct_long_mac
#undef dct_widen
#undef dct_wadd
#undef dct_wsub
#undef dct_bfly32o
#undef dct_pass
}

#endif // STBI_NEON

#define STBI__MARKER_none 0xff
// if there's a pending marker from the entropy stream, return that
// otherwise, fetch from the stream and Get a marker. if there's no
// marker, return 0xff, which is never a valid marker value
static stbi_uc __stbi_get_marker(__stbi_jpeg* j)
{
	stbi_uc x;
	if (j->marker != STBI__MARKER_none)
	{
		x = j->marker;
		j->marker = STBI__MARKER_none;
		return x;
	}
	x = __stbi_get8(j->s);
	if (x != 0xff) return STBI__MARKER_none;
	while (x == 0xff) x = __stbi_get8(j->s); // consume repeated 0xff fill bytes
	return x;
}

// in each scan, we'll have scan_n components, and the order
// of the components is specified by order[]
#define STBI__RESTART(x) ((x) >= 0xd0 && (x) <= 0xd7)

// after a restart interval, __stbi_jpeg_reset the entropy decoder and
// the dc prediction
static void __stbi_jpeg_reset(__stbi_jpeg* j)
{
	j->code_bits = 0;
	j->code_buffer = 0;
	j->nomore = 0;
	j->img_comp[0].dc_pred = j->img_comp[1].dc_pred = j->img_comp[2].dc_pred = j->img_comp[3].dc_pred = 0;
	j->marker = STBI__MARKER_none;
	j->todo = j->restart_interval ? j->restart_interval : 0x7fffffff;
	j->eob_run = 0;
	// no more than 1<<31 MCUs if no restart_interal? that's plenty safe,
	// since we don't even allow 1<<30 pixels
}

static int __stbi_parse_entropy_coded_data(__stbi_jpeg* z)
{
	__stbi_jpeg_reset(z);
	if (!z->progressive)
	{
		if (z->scan_n == 1)
		{
			int i, j;
			STBI_SIMD_ALIGN(short, value[64]);
			int n = z->order[0];
			// non-interleaved value, we just need to process one block at a time,
			// in trivial scanline order
			// number of blocks to do just depends on how many actual "pixels" this
			// component has, independent of interleaved MCU blocking and such
			int w = (z->img_comp[n].x + 7) >> 3;
			int h = (z->img_comp[n].y + 7) >> 3;
			for (j = 0; j < h; ++j)
			{
				for (i = 0; i < w; ++i)
				{
					int ha = z->img_comp[n].ha;
					if (!__stbi_jpeg_decode_block(z, value, z->huff_dc + z->img_comp[n].hd, z->huff_ac + ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0;
					z->idct_block_kernel(z->img_comp[n].value + z->img_comp[n].w2 * j * 8 + i * 8, z->img_comp[n].w2, value);
					// every value block is an MCU, so countdown the restart interval
					if (--z->todo <= 0)
					{
						if (z->code_bits < 24) __stbi_grow_buffer_unsafe(z);
						// if it's NOT a restart, then just bail, so we Get corrupt value
						// rather than no value
						if (!STBI__RESTART(z->marker)) return 1;
						__stbi_jpeg_reset(z);
					}
				}
			}
			return 1;
		}
		else
		{ // interleaved
			int i, j, k, x, y;
			STBI_SIMD_ALIGN(short, value[64]);
			for (j = 0; j < z->img_mcu_y; ++j)
			{
				for (i = 0; i < z->img_mcu_x; ++i)
				{
					// scan an interleaved mcu... process scan_n components in order
					for (k = 0; k < z->scan_n; ++k)
					{
						int n = z->order[k];
						// scan out an mcu's worth of this component; that's just determined
						// by the basic H and V specified for the component
						for (y = 0; y < z->img_comp[n].v; ++y)
						{
							for (x = 0; x < z->img_comp[n].h; ++x)
							{
								int x2 = (i * z->img_comp[n].h + x) * 8;
								int y2 = (j * z->img_comp[n].v + y) * 8;
								int ha = z->img_comp[n].ha;
								if (!__stbi_jpeg_decode_block(z, value, z->huff_dc + z->img_comp[n].hd, z->huff_ac + ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0;
								z->idct_block_kernel(z->img_comp[n].value + z->img_comp[n].w2 * y2 + x2, z->img_comp[n].w2, value);
							}
						}
					}
					// after all interleaved components, that's an interleaved MCU,
					// so now count down the restart interval
					if (--z->todo <= 0)
					{
						if (z->code_bits < 24) __stbi_grow_buffer_unsafe(z);
						if (!STBI__RESTART(z->marker)) return 1;
						__stbi_jpeg_reset(z);
					}
				}
			}
			return 1;
		}
	}
	else
	{
		if (z->scan_n == 1)
		{
			int i, j;
			int n = z->order[0];
			// non-interleaved value, we just need to process one block at a time,
			// in trivial scanline order
			// number of blocks to do just depends on how many actual "pixels" this
			// component has, independent of interleaved MCU blocking and such
			int w = (z->img_comp[n].x + 7) >> 3;
			int h = (z->img_comp[n].y + 7) >> 3;
			for (j = 0; j < h; ++j)
			{
				for (i = 0; i < w; ++i)
				{
					short* value = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w);
					if (z->spec_start == 0)
					{
						if (!__stbi_jpeg_decode_block_prog_dc(z, value, &z->huff_dc[z->img_comp[n].hd], n)) return 0;
					}
					else
					{
						int ha = z->img_comp[n].ha;
						if (!__stbi_jpeg_decode_block_prog_ac(z, value, &z->huff_ac[ha], z->fast_ac[ha])) return 0;
					}
					// every value block is an MCU, so countdown the restart interval
					if (--z->todo <= 0)
					{
						if (z->code_bits < 24) __stbi_grow_buffer_unsafe(z);
						if (!STBI__RESTART(z->marker)) return 1;
						__stbi_jpeg_reset(z);
					}
				}
			}
			return 1;
		}
		else
		{ // interleaved
			int i, j, k, x, y;
			for (j = 0; j < z->img_mcu_y; ++j)
			{
				for (i = 0; i < z->img_mcu_x; ++i)
				{
					// scan an interleaved mcu... process scan_n components in order
					for (k = 0; k < z->scan_n; ++k)
					{
						int n = z->order[k];
						// scan out an mcu's worth of this component; that's just determined
						// by the basic H and V specified for the component
						for (y = 0; y < z->img_comp[n].v; ++y)
						{
							for (x = 0; x < z->img_comp[n].h; ++x)
							{
								int x2 = (i * z->img_comp[n].h + x);
								int y2 = (j * z->img_comp[n].v + y);
								short* value = z->img_comp[n].coeff + 64 * (x2 + y2 * z->img_comp[n].coeff_w);
								if (!__stbi_jpeg_decode_block_prog_dc(z, value, &z->huff_dc[z->img_comp[n].hd], n)) return 0;
							}
						}
					}
					// after all interleaved components, that's an interleaved MCU,
					// so now count down the restart interval
					if (--z->todo <= 0)
					{
						if (z->code_bits < 24) __stbi_grow_buffer_unsafe(z);
						if (!STBI__RESTART(z->marker)) return 1;
						__stbi_jpeg_reset(z);
					}
				}
			}
			return 1;
		}
	}
}

static void __stbi_jpeg_dequantize(short* value, __stbi_uint16* dequant)
{
	int i;
	for (i = 0; i < 64; ++i) value[i] *= dequant[i];
}

static void __stbi_jpeg_finish(__stbi_jpeg* z)
{
	if (z->progressive)
	{
		// dequantize and idct the value
		int i, j, n;
		for (n = 0; n < z->s->img_n; ++n)
		{
			int w = (z->img_comp[n].x + 7) >> 3;
			int h = (z->img_comp[n].y + 7) >> 3;
			for (j = 0; j < h; ++j)
			{
				for (i = 0; i < w; ++i)
				{
					short* value = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w);
					__stbi_jpeg_dequantize(value, z->dequant[z->img_comp[n].tq]);
					z->idct_block_kernel(z->img_comp[n].value + z->img_comp[n].w2 * j * 8 + i * 8, z->img_comp[n].w2, value);
				}
			}
		}
	}
}

static int __stbi_process_marker(__stbi_jpeg* z, int m)
{
	int L;
	switch (m)
	{
	case STBI__MARKER_none: // no marker found
		return __stbi_err("expected marker", "Corrupt JPEG");

	case 0xDD: // DRI - specify restart interval
		if (__stbi_get16be(z->s) != 4) return __stbi_err("bad DRI len", "Corrupt JPEG");
		z->restart_interval = __stbi_get16be(z->s);
		return 1;

	case 0xDB: // DQT - define quantization table
		L = __stbi_get16be(z->s) - 2;
		while (L > 0)
		{
			int q = __stbi_get8(z->s);
			int p = q >> 4, sixteen = (p != 0);
			int t = q & 15, i;
			if (p != 0 && p != 1) return __stbi_err("bad DQT type", "Corrupt JPEG");
			if (t > 3) return __stbi_err("bad DQT table", "Corrupt JPEG");

			for (i = 0; i < 64; ++i) z->dequant[t][__stbi_jpeg_dezigzag[i]] = (__stbi_uint16)(sixteen ? __stbi_get16be(z->s) : __stbi_get8(z->s));
			L -= (sixteen ? 129 : 65);
		}
		return L == 0;

	case 0xC4: // DHT - define huffman table
		L = __stbi_get16be(z->s) - 2;
		while (L > 0)
		{
			stbi_uc* v{};
			int sizes[16]{}, i{}, n = 0;
			int q = __stbi_get8(z->s);
			int tc = q >> 4;
			int th = q & 15;
			if (tc > 1 || th > 3) return __stbi_err("bad DHT header", "Corrupt JPEG");
			for (i = 0; i < 16; ++i)
			{
				sizes[i] = __stbi_get8(z->s);
				n += sizes[i];
			}
			L -= 17;
			if (tc == 0)
			{
				if (!__stbi_build_huffman(z->huff_dc + th, sizes)) return 0;
				v = z->huff_dc[th].values;
			}
			else
			{
				if (!__stbi_build_huffman(z->huff_ac + th, sizes)) return 0;
				v = z->huff_ac[th].values;
			}
			for (i = 0; i < n; ++i) v[i] = __stbi_get8(z->s);
			if (tc != 0) __stbi_build_fast_ac(z->fast_ac[th], z->huff_ac + th);
			L -= n;
		}
		return L == 0;
	}

	// check for comment block or APP blocks
	if ((m >= 0xE0 && m <= 0xEF) || m == 0xFE)
	{
		L = __stbi_get16be(z->s);
		if (L < 2)
		{
			if (m == 0xFE)
				return __stbi_err("bad COM len", "Corrupt JPEG");
			else
				return __stbi_err("bad APP len", "Corrupt JPEG");
		}
		L -= 2;

		if (m == 0xE0 && L >= 5)
		{ // JFIF APP0 segment
			static const unsigned char tag[5] = { 'J', 'F', 'I', 'F', '\0' };
			int ok = 1;
			int i;
			for (i = 0; i < 5; ++i)
				if (__stbi_get8(z->s) != tag[i]) ok = 0;
			L -= 5;
			if (ok) z->jfif = 1;
		}
		else if (m == 0xEE && L >= 12)
		{ // Adobe APP14 segment
			static const unsigned char tag[6] = { 'A', 'd', 'o', 'b', 'e', '\0' };
			int ok = 1;
			int i;
			for (i = 0; i < 6; ++i)
				if (__stbi_get8(z->s) != tag[i]) ok = 0;
			L -= 6;
			if (ok)
			{
				__stbi_get8(z->s); // version
				__stbi_get16be(z->s); // flags0
				__stbi_get16be(z->s); // flags1
				z->app14_color_transform = __stbi_get8(z->s); // color transform
				L -= 6;
			}
		}

		__stbi_skip(z->s, L);
		return 1;
	}

	return __stbi_err("unknown marker", "Corrupt JPEG");
}

// after we see SOS
static int __stbi_process_scan_header(__stbi_jpeg* z)
{
	int i;
	int Ls = __stbi_get16be(z->s);
	z->scan_n = __stbi_get8(z->s);
	if (z->scan_n < 1 || z->scan_n > 4 || z->scan_n > (int)z->s->img_n) return __stbi_err("bad SOS component count", "Corrupt JPEG");
	if (Ls != 6 + 2 * z->scan_n) return __stbi_err("bad SOS len", "Corrupt JPEG");
	for (i = 0; i < z->scan_n; ++i)
	{
		int id = __stbi_get8(z->s), which;
		int q = __stbi_get8(z->s);
		for (which = 0; which < z->s->img_n; ++which)
			if (z->img_comp[which].id == id) break;
		if (which == z->s->img_n) return 0; // no match
		z->img_comp[which].hd = q >> 4;
		if (z->img_comp[which].hd > 3) return __stbi_err("bad DC huff", "Corrupt JPEG");
		z->img_comp[which].ha = q & 15;
		if (z->img_comp[which].ha > 3) return __stbi_err("bad AC huff", "Corrupt JPEG");
		z->order[i] = which;
	}

	{
		int aa;
		z->spec_start = __stbi_get8(z->s);
		z->spec_end = __stbi_get8(z->s); // should be 63, but might be 0
		aa = __stbi_get8(z->s);
		z->succ_high = (aa >> 4);
		z->succ_low = (aa & 15);
		if (z->progressive)
		{
			if (z->spec_start > 63 || z->spec_end > 63 || z->spec_start > z->spec_end || z->succ_high > 13 || z->succ_low > 13) return __stbi_err("bad SOS", "Corrupt JPEG");
		}
		else
		{
			if (z->spec_start != 0) return __stbi_err("bad SOS", "Corrupt JPEG");
			if (z->succ_high != 0 || z->succ_low != 0) return __stbi_err("bad SOS", "Corrupt JPEG");
			z->spec_end = 63;
		}
	}

	return 1;
}

static int __stbi_free_jpeg_components(__stbi_jpeg* z, int ncomp, int why)
{
	int i;
	for (i = 0; i < ncomp; ++i)
	{
		if (z->img_comp[i].raw_data)
		{
			STBI_FREE(z->img_comp[i].raw_data);
			z->img_comp[i].raw_data = NULL;
			z->img_comp[i].value = NULL;
		}
		if (z->img_comp[i].raw_coeff)
		{
			STBI_FREE(z->img_comp[i].raw_coeff);
			z->img_comp[i].raw_coeff = 0;
			z->img_comp[i].coeff = 0;
		}
		if (z->img_comp[i].linebuf)
		{
			STBI_FREE(z->img_comp[i].linebuf);
			z->img_comp[i].linebuf = NULL;
		}
	}
	return why;
}

static int __stbi_process_frame_header(__stbi_jpeg* z, int scan)
{
	__stbi_context* s = z->s;
	int Lf, p, i, q, h_max = 1, v_max = 1, c;
	Lf = __stbi_get16be(s);
	if (Lf < 11) return __stbi_err("bad SOF len", "Corrupt JPEG"); // JPEG
	p = __stbi_get8(s);
	if (p != 8) return __stbi_err("only 8-bit", "JPEG format not supported: 8-bit only"); // JPEG baseline
	s->img_y = __stbi_get16be(s);
	if (s->img_y == 0) return __stbi_err("no header height", "JPEG format not supported: delayed height"); // Legal, but we don't handle it--but neither does IJG
	s->img_x = __stbi_get16be(s);
	if (s->img_x == 0) return __stbi_err("0 width", "Corrupt JPEG"); // JPEG requires
	if (s->img_y > STBI_MAX_DIMENSIONS) return __stbi_err("too large", "Very large image (corrupt?)");
	if (s->img_x > STBI_MAX_DIMENSIONS) return __stbi_err("too large", "Very large image (corrupt?)");
	c = __stbi_get8(s);
	if (c != 3 && c != 1 && c != 4) return __stbi_err("bad component count", "Corrupt JPEG");
	s->img_n = c;
	for (i = 0; i < c; ++i)
	{
		z->img_comp[i].value = NULL;
		z->img_comp[i].linebuf = NULL;
	}

	if (Lf != 8 + 3 * s->img_n) return __stbi_err("bad SOF len", "Corrupt JPEG");

	z->rgb = 0;
	for (i = 0; i < s->img_n; ++i)
	{
		static const unsigned char rgb[3] = { 'R', 'G', 'B' };
		z->img_comp[i].id = __stbi_get8(s);
		if (s->img_n == 3 && z->img_comp[i].id == rgb[i]) ++z->rgb;
		q = __stbi_get8(s);
		z->img_comp[i].h = (q >> 4);
		if (!z->img_comp[i].h || z->img_comp[i].h > 4) return __stbi_err("bad H", "Corrupt JPEG");
		z->img_comp[i].v = q & 15;
		if (!z->img_comp[i].v || z->img_comp[i].v > 4) return __stbi_err("bad V", "Corrupt JPEG");
		z->img_comp[i].tq = __stbi_get8(s);
		if (z->img_comp[i].tq > 3) return __stbi_err("bad TQ", "Corrupt JPEG");
	}

	if (scan != STBI__SCAN_load) return 1;

	if (!__stbi_mad3sizes_valid(s->img_x, s->img_y, s->img_n, 0)) return __stbi_err("too large", "Image too large to decode");

	for (i = 0; i < s->img_n; ++i)
	{
		if (z->img_comp[i].h > h_max) h_max = z->img_comp[i].h;
		if (z->img_comp[i].v > v_max) v_max = z->img_comp[i].v;
	}

	// compute interleaved mcu info
	z->img_h_max = h_max;
	z->img_v_max = v_max;
	z->img_mcu_w = h_max * 8;
	z->img_mcu_h = v_max * 8;
	// these sizes can't be more than 17 bits
	z->img_mcu_x = (s->img_x + z->img_mcu_w - 1) / z->img_mcu_w;
	z->img_mcu_y = (s->img_y + z->img_mcu_h - 1) / z->img_mcu_h;

	for (i = 0; i < s->img_n; ++i)
	{
		// number of effective pixels (e.g. for non-interleaved MCU)
		z->img_comp[i].x = (s->img_x * z->img_comp[i].h + h_max - 1) / h_max;
		z->img_comp[i].y = (s->img_y * z->img_comp[i].v + v_max - 1) / v_max;
		// to simplify generation, we'll allocate enough memory to decode
		// the bogus oversized value from using interleaved MCUs and their
		// big blocks (e.g. a 16x16 iMCU on an image of width 33); we won't
		// discard the extra value until colorspace conversion
		//
		// img_mcu_x, img_mcu_y: <=17 bits; comp[i].h and .v are <=4 (checked earlier)
		// so these muls can't overflow with 32-bit ints (which we require)
		z->img_comp[i].w2 = z->img_mcu_x * z->img_comp[i].h * 8;
		z->img_comp[i].h2 = z->img_mcu_y * z->img_comp[i].v * 8;
		z->img_comp[i].coeff = 0;
		z->img_comp[i].raw_coeff = 0;
		z->img_comp[i].linebuf = NULL;
		z->img_comp[i].raw_data = __stbi_malloc_mad2(z->img_comp[i].w2, z->img_comp[i].h2, 15);
		if (z->img_comp[i].raw_data == NULL) return __stbi_free_jpeg_components(z, i + 1, __stbi_err("outofmem", "Out of memory"));
		// align blocks for idct using mmx/sse
		z->img_comp[i].value = (stbi_uc*)(((size_t)z->img_comp[i].raw_data + 15) & ~15);
		if (z->progressive)
		{
			// w2, h2 are multiples of 8 (see above)
			z->img_comp[i].coeff_w = z->img_comp[i].w2 / 8;
			z->img_comp[i].coeff_h = z->img_comp[i].h2 / 8;
			z->img_comp[i].raw_coeff = __stbi_malloc_mad3(z->img_comp[i].w2, z->img_comp[i].h2, sizeof(short), 15);
			if (z->img_comp[i].raw_coeff == NULL) return __stbi_free_jpeg_components(z, i + 1, __stbi_err("outofmem", "Out of memory"));
			z->img_comp[i].coeff = (short*)(((size_t)z->img_comp[i].raw_coeff + 15) & ~15);
		}
	}

	return 1;
}

// use comparisons since in some cases we handle more than one case (e.g. SOF)
#define __stbi_DNL(x) ((x) == 0xdc)
#define __stbi_SOI(x) ((x) == 0xd8)
#define __stbi_EOI(x) ((x) == 0xd9)
#define __stbi_SOF(x) ((x) == 0xc0 || (x) == 0xc1 || (x) == 0xc2)
#define __stbi_SOS(x) ((x) == 0xda)

#define __stbi_SOF_progressive(x) ((x) == 0xc2)

static int __stbi_decode_jpeg_header(__stbi_jpeg* z, int scan)
{
	int m;
	z->jfif = 0;
	z->app14_color_transform = -1; // valid values are 0,1,2
	z->marker = STBI__MARKER_none; // Initialize cached marker to empty
	m = __stbi_get_marker(z);
	if (!__stbi_SOI(m)) return __stbi_err("no SOI", "Corrupt JPEG");
	if (scan == STBI__SCAN_type) return 1;
	m = __stbi_get_marker(z);
	while (!__stbi_SOF(m))
	{
		if (!__stbi_process_marker(z, m)) return 0;
		m = __stbi_get_marker(z);
		while (m == STBI__MARKER_none)
		{
			// some files have extra padding after their blocks, so ok, we'll scan
			if (__stbi_at_eof(z->s)) return __stbi_err("no SOF", "Corrupt JPEG");
			m = __stbi_get_marker(z);
		}
	}
	z->progressive = __stbi_SOF_progressive(m);
	if (!__stbi_process_frame_header(z, scan)) return 0;
	return 1;
}

// decode image to YCbCr format
static int __stbi_decode_jpeg_image(__stbi_jpeg* j)
{
	int m;
	for (m = 0; m < 4; m++)
	{
		j->img_comp[m].raw_data = NULL;
		j->img_comp[m].raw_coeff = NULL;
	}
	j->restart_interval = 0;
	if (!__stbi_decode_jpeg_header(j, STBI__SCAN_load)) return 0;
	m = __stbi_get_marker(j);
	while (!__stbi_EOI(m))
	{
		if (__stbi_SOS(m))
		{
			if (!__stbi_process_scan_header(j)) return 0;
			if (!__stbi_parse_entropy_coded_data(j)) return 0;
			if (j->marker == STBI__MARKER_none)
			{
				// handle 0s at the end of image value from IP Kamera 9060
				while (!__stbi_at_eof(j->s))
				{
					int x = __stbi_get8(j->s);
					if (x == 255)
					{
						j->marker = __stbi_get8(j->s);
						break;
					}
				}
				// if we reach eof without hitting a marker, __stbi_get_marker() below will fail and we'll eventually return 0
			}
		}
		else if (__stbi_DNL(m))
		{
			int Ld = __stbi_get16be(j->s);
			__stbi_uint32 NL = __stbi_get16be(j->s);
			if (Ld != 4) return __stbi_err("bad DNL len", "Corrupt JPEG");
			if (NL != j->s->img_y) return __stbi_err("bad DNL height", "Corrupt JPEG");
		}
		else
		{
			if (!__stbi_process_marker(j, m)) return 0;
		}
		m = __stbi_get_marker(j);
	}
	if (j->progressive) __stbi_jpeg_finish(j);
	return 1;
}

// static jfif-centered resampling (across block boundaries)

typedef stbi_uc* (*resample_row_func)(stbi_uc* out, stbi_uc* in0, stbi_uc* in1, int w, int hs);

#define __stbi_div4(x) ((stbi_uc)((x) >> 2))

static stbi_uc* resample_row_1(stbi_uc* out, stbi_uc* in_near, stbi_uc* in_far, int w, int hs)
{
	STBI_NOTUSED(out);
	STBI_NOTUSED(in_far);
	STBI_NOTUSED(w);
	STBI_NOTUSED(hs);
	return in_near;
}

static stbi_uc* __stbi_resample_row_v_2(stbi_uc* out, stbi_uc* in_near, stbi_uc* in_far, int w, int hs)
{
	// need to Generate two samples vertically for every one in input
	int i;
	STBI_NOTUSED(hs);
	for (i = 0; i < w; ++i) out[i] = __stbi_div4(3 * in_near[i] + in_far[i] + 2);
	return out;
}

static stbi_uc* __stbi_resample_row_h_2(stbi_uc* out, stbi_uc* in_near, stbi_uc* in_far, int w, int hs)
{
	// need to Generate two samples horizontally for every one in input
	int i;
	stbi_uc* input = in_near;

	if (w == 1)
	{
		// if only one sample, can't do any interpolation
		out[0] = out[1] = input[0];
		return out;
	}

	out[0] = input[0];
	out[1] = __stbi_div4(input[0] * 3 + input[1] + 2);
	for (i = 1; i < w - 1; ++i)
	{
		int n = 3 * input[i] + 2;
		out[i * 2 + 0] = __stbi_div4(n + input[i - 1]);
		out[i * 2 + 1] = __stbi_div4(n + input[i + 1]);
	}
	out[i * 2 + 0] = __stbi_div4(input[w - 2] * 3 + input[w - 1] + 2);
	out[i * 2 + 1] = input[w - 1];

	STBI_NOTUSED(in_far);
	STBI_NOTUSED(hs);

	return out;
}

#define __stbi_div16(x) ((stbi_uc)((x) >> 4))

static stbi_uc* __stbi_resample_row_hv_2(stbi_uc* out, stbi_uc* in_near, stbi_uc* in_far, int w, int hs)
{
	// need to Generate 2x2 samples for every one in input
	int i, t0, t1;
	if (w == 1)
	{
		out[0] = out[1] = __stbi_div4(3 * in_near[0] + in_far[0] + 2);
		return out;
	}

	t1 = 3 * in_near[0] + in_far[0];
	out[0] = __stbi_div4(t1 + 2);
	for (i = 1; i < w; ++i)
	{
		t0 = t1;
		t1 = 3 * in_near[i] + in_far[i];
		out[i * 2 - 1] = __stbi_div16(3 * t0 + t1 + 8);
		out[i * 2] = __stbi_div16(3 * t1 + t0 + 8);
	}
	out[w * 2 - 1] = __stbi_div4(t1 + 2);

	STBI_NOTUSED(hs);

	return out;
}

#if defined(STBI_SSE2) || defined(STBI_NEON)
static stbi_uc* __stbi_resample_row_hv_2_simd(stbi_uc* out, stbi_uc* in_near, stbi_uc* in_far, int w, int hs)
{
	// need to Generate 2x2 samples for every one in input
	int i = 0, t0, t1;

	if (w == 1)
	{
		out[0] = out[1] = __stbi_div4(3 * in_near[0] + in_far[0] + 2);
		return out;
	}

	t1 = 3 * in_near[0] + in_far[0];
	// process groups of 8 pixels for as long as we can.
	// note we can't handle the last pixel in a row in this loop
	// because we need to handle the filter boundary conditions.
	for (; i < ((w - 1) & ~7); i += 8)
	{
#if defined(STBI_SSE2)
		// load and perform the vertical filtering pass
		// this uses 3*x + y = 4*x + (y - x)
		__m128i zero = _mm_setzero_si128();
		__m128i farb = _mm_loadl_epi64((__m128i*)(in_far + i));
		__m128i nearb = _mm_loadl_epi64((__m128i*)(in_near + i));
		__m128i farw = _mm_unpacklo_epi8(farb, zero);
		__m128i nearw = _mm_unpacklo_epi8(nearb, zero);
		__m128i diff = _mm_sub_epi16(farw, nearw);
		__m128i nears = _mm_slli_epi16(nearw, 2);
		__m128i curr = _mm_add_epi16(nears, diff); // current row

		// horizontal filter works the same based on shifted vers of current
		// row. "prev" is current row shifted right by 1 pixel; we need to
		// insert the previous pixel value (from t1).
		// "next" is current row shifted left by 1 pixel, with first pixel
		// of next block of 8 pixels added in.
		__m128i prv0 = _mm_slli_si128(curr, 2);
		__m128i nxt0 = _mm_srli_si128(curr, 2);
		__m128i prev = _mm_insert_epi16(prv0, t1, 0);
		__m128i next = _mm_insert_epi16(nxt0, 3 * in_near[i + 8] + in_far[i + 8], 7);

		// horizontal filter, polyphase implementation since it's convenient:
		// even pixels = 3*cur + prev = cur*4 + (prev - cur)
		// odd  pixels = 3*cur + next = cur*4 + (next - cur)
		// note the shared term.
		__m128i bias = _mm_set1_epi16(8);
		__m128i curs = _mm_slli_epi16(curr, 2);
		__m128i prvd = _mm_sub_epi16(prev, curr);
		__m128i nxtd = _mm_sub_epi16(next, curr);
		__m128i curb = _mm_add_epi16(curs, bias);
		__m128i even = _mm_add_epi16(prvd, curb);
		__m128i odd = _mm_add_epi16(nxtd, curb);

		// interleave even and odd pixels, then undo scaling.
		__m128i int0 = _mm_unpacklo_epi16(even, odd);
		__m128i int1 = _mm_unpackhi_epi16(even, odd);
		__m128i de0 = _mm_srli_epi16(int0, 4);
		__m128i de1 = _mm_srli_epi16(int1, 4);

		// pack and write output
		__m128i outv = _mm_packus_epi16(de0, de1);
		_mm_storeu_si128((__m128i*)(out + i * 2), outv);
#elif defined(STBI_NEON)
		// load and perform the vertical filtering pass
		// this uses 3*x + y = 4*x + (y - x)
		uint8x8_t farb = vld1_u8(in_far + i);
		uint8x8_t nearb = vld1_u8(in_near + i);
		int16x8_t diff = vreinterpretq_s16_u16(vsubl_u8(farb, nearb));
		int16x8_t nears = vreinterpretq_s16_u16(vshll_n_u8(nearb, 2));
		int16x8_t curr = vaddq_s16(nears, diff); // current row

		// horizontal filter works the same based on shifted vers of current
		// row. "prev" is current row shifted right by 1 pixel; we need to
		// insert the previous pixel value (from t1).
		// "next" is current row shifted left by 1 pixel, with first pixel
		// of next block of 8 pixels added in.
		int16x8_t prv0 = vextq_s16(curr, curr, 7);
		int16x8_t nxt0 = vextq_s16(curr, curr, 1);
		int16x8_t prev = vsetq_lane_s16(t1, prv0, 0);
		int16x8_t next = vsetq_lane_s16(3 * in_near[i + 8] + in_far[i + 8], nxt0, 7);

		// horizontal filter, polyphase implementation since it's convenient:
		// even pixels = 3*cur + prev = cur*4 + (prev - cur)
		// odd  pixels = 3*cur + next = cur*4 + (next - cur)
		// note the shared term.
		int16x8_t curs = vshlq_n_s16(curr, 2);
		int16x8_t prvd = vsubq_s16(prev, curr);
		int16x8_t nxtd = vsubq_s16(next, curr);
		int16x8_t even = vaddq_s16(curs, prvd);
		int16x8_t odd = vaddq_s16(curs, nxtd);

		// undo scaling and round, then store with even/odd phases interleaved
		uint8x8x2_t o;
		o.val[0] = vqrshrun_n_s16(even, 4);
		o.val[1] = vqrshrun_n_s16(odd, 4);
		vst2_u8(out + i * 2, o);
#endif

		// "previous" value for next iter
		t1 = 3 * in_near[i + 7] + in_far[i + 7];
	}

	t0 = t1;
	t1 = 3 * in_near[i] + in_far[i];
	out[i * 2] = __stbi_div16(3 * t1 + t0 + 8);

	for (++i; i < w; ++i)
	{
		t0 = t1;
		t1 = 3 * in_near[i] + in_far[i];
		out[i * 2 - 1] = __stbi_div16(3 * t0 + t1 + 8);
		out[i * 2] = __stbi_div16(3 * t1 + t0 + 8);
	}
	out[w * 2 - 1] = __stbi_div4(t1 + 2);

	STBI_NOTUSED(hs);

	return out;
}
#endif

static stbi_uc* __stbi_resample_row_generic(stbi_uc* out, stbi_uc* in_near, stbi_uc* in_far, int w, int hs)
{
	// resample with nearest-neighbor
	int i, j;
	STBI_NOTUSED(in_far);
	for (i = 0; i < w; ++i)
		for (j = 0; j < hs; ++j) out[i * hs + j] = in_near[i];
	return out;
}

// this is a reduced-precision calculation of YCbCr-to-RGB introduced
// to make sure the code produces the same results in both SIMD and scalar
#define __stbi_float2fixed(x) (((int)((x)*4096.0f + 0.5f)) << 8)
static void __stbi_YCbCr_to_RGB_row(stbi_uc* out, const stbi_uc* y, const stbi_uc* pcb, const stbi_uc* pcr, int count, int step)
{
	int i;
	for (i = 0; i < count; ++i)
	{
		int y_fixed = (y[i] << 20) + (1 << 19); // rounding
		int r, g, b;
		int cr = pcr[i] - 128;
		int cb = pcb[i] - 128;
		r = y_fixed + cr * __stbi_float2fixed(1.40200f);
		g = y_fixed + (cr * -__stbi_float2fixed(0.71414f)) + ((cb * -__stbi_float2fixed(0.34414f)) & 0xffff0000);
		b = y_fixed + cb * __stbi_float2fixed(1.77200f);
		r >>= 20;
		g >>= 20;
		b >>= 20;
		if ((unsigned)r > 255)
		{
			if (r < 0)
				r = 0;
			else
				r = 255;
		}
		if ((unsigned)g > 255)
		{
			if (g < 0)
				g = 0;
			else
				g = 255;
		}
		if ((unsigned)b > 255)
		{
			if (b < 0)
				b = 0;
			else
				b = 255;
		}
		out[0] = (stbi_uc)r;
		out[1] = (stbi_uc)g;
		out[2] = (stbi_uc)b;
		out[3] = 255;
		out += step;
	}
}

#if defined(STBI_SSE2) || defined(STBI_NEON)
static void __stbi_YCbCr_to_RGB_simd(stbi_uc* out, stbi_uc const* y, stbi_uc const* pcb, stbi_uc const* pcr, int count, int step)
{
	int i = 0;

#ifdef STBI_SSE2
	// step == 3 is pretty ugly on the final interleave, and i'm not convinced
	// it's useful in practice (you wouldn't use it for textures, for example).
	// so just accelerate step == 4 case.
	if (step == 4)
	{
		// this is a fairly straightforward implementation and not super-optimized.
		__m128i signflip = _mm_set1_epi8(-0x80);
		__m128i cr_const0 = _mm_set1_epi16((short)(1.40200f * 4096.0f + 0.5f));
		__m128i cr_const1 = _mm_set1_epi16(-(short)(0.71414f * 4096.0f + 0.5f));
		__m128i cb_const0 = _mm_set1_epi16(-(short)(0.34414f * 4096.0f + 0.5f));
		__m128i cb_const1 = _mm_set1_epi16((short)(1.77200f * 4096.0f + 0.5f));
		__m128i y_bias = _mm_set1_epi8((char)(unsigned char)128);
		__m128i xw = _mm_set1_epi16(255); // alpha channel

		for (; i + 7 < count; i += 8)
		{
			// load
			__m128i y_bytes = _mm_loadl_epi64((__m128i*)(y + i));
			__m128i cr_bytes = _mm_loadl_epi64((__m128i*)(pcr + i));
			__m128i cb_bytes = _mm_loadl_epi64((__m128i*)(pcb + i));
			__m128i cr_biased = _mm_xor_si128(cr_bytes, signflip); // -128
			__m128i cb_biased = _mm_xor_si128(cb_bytes, signflip); // -128

			// unpack to short (and left-shift cr, cb by 8)
			__m128i yw = _mm_unpacklo_epi8(y_bias, y_bytes);
			__m128i crw = _mm_unpacklo_epi8(_mm_setzero_si128(), cr_biased);
			__m128i cbw = _mm_unpacklo_epi8(_mm_setzero_si128(), cb_biased);

			// color transform
			__m128i yws = _mm_srli_epi16(yw, 4);
			__m128i cr0 = _mm_mulhi_epi16(cr_const0, crw);
			__m128i cb0 = _mm_mulhi_epi16(cb_const0, cbw);
			__m128i cb1 = _mm_mulhi_epi16(cbw, cb_const1);
			__m128i cr1 = _mm_mulhi_epi16(crw, cr_const1);
			__m128i rws = _mm_add_epi16(cr0, yws);
			__m128i gwt = _mm_add_epi16(cb0, yws);
			__m128i bws = _mm_add_epi16(yws, cb1);
			__m128i gws = _mm_add_epi16(gwt, cr1);

			// descale
			__m128i rw = _mm_srai_epi16(rws, 4);
			__m128i bw = _mm_srai_epi16(bws, 4);
			__m128i gw = _mm_srai_epi16(gws, 4);

			// back to byte, set up for transpose
			__m128i brb = _mm_packus_epi16(rw, bw);
			__m128i gxb = _mm_packus_epi16(gw, xw);

			// transpose to interleave channels
			__m128i t0 = _mm_unpacklo_epi8(brb, gxb);
			__m128i t1 = _mm_unpackhi_epi8(brb, gxb);
			__m128i o0 = _mm_unpacklo_epi16(t0, t1);
			__m128i o1 = _mm_unpackhi_epi16(t0, t1);

			// store
			_mm_storeu_si128((__m128i*)(out + 0), o0);
			_mm_storeu_si128((__m128i*)(out + 16), o1);
			out += 32;
		}
	}
#endif

#ifdef STBI_NEON
	// in this version, step=3 support would be easy to add. but is there demand?
	if (step == 4)
	{
		// this is a fairly straightforward implementation and not super-optimized.
		uint8x8_t signflip = vdup_n_u8(0x80);
		int16x8_t cr_const0 = vdupq_n_s16((short)(1.40200f * 4096.0f + 0.5f));
		int16x8_t cr_const1 = vdupq_n_s16(-(short)(0.71414f * 4096.0f + 0.5f));
		int16x8_t cb_const0 = vdupq_n_s16(-(short)(0.34414f * 4096.0f + 0.5f));
		int16x8_t cb_const1 = vdupq_n_s16((short)(1.77200f * 4096.0f + 0.5f));

		for (; i + 7 < count; i += 8)
		{
			// load
			uint8x8_t y_bytes = vld1_u8(y + i);
			uint8x8_t cr_bytes = vld1_u8(pcr + i);
			uint8x8_t cb_bytes = vld1_u8(pcb + i);
			int8x8_t cr_biased = vreinterpret_s8_u8(vsub_u8(cr_bytes, signflip));
			int8x8_t cb_biased = vreinterpret_s8_u8(vsub_u8(cb_bytes, signflip));

			// expand to s16
			int16x8_t yws = vreinterpretq_s16_u16(vshll_n_u8(y_bytes, 4));
			int16x8_t crw = vshll_n_s8(cr_biased, 7);
			int16x8_t cbw = vshll_n_s8(cb_biased, 7);

			// color transform
			int16x8_t cr0 = vqdmulhq_s16(crw, cr_const0);
			int16x8_t cb0 = vqdmulhq_s16(cbw, cb_const0);
			int16x8_t cr1 = vqdmulhq_s16(crw, cr_const1);
			int16x8_t cb1 = vqdmulhq_s16(cbw, cb_const1);
			int16x8_t rws = vaddq_s16(yws, cr0);
			int16x8_t gws = vaddq_s16(vaddq_s16(yws, cb0), cr1);
			int16x8_t bws = vaddq_s16(yws, cb1);

			// undo scaling, round, convert to byte
			uint8x8x4_t o;
			o.val[0] = vqrshrun_n_s16(rws, 4);
			o.val[1] = vqrshrun_n_s16(gws, 4);
			o.val[2] = vqrshrun_n_s16(bws, 4);
			o.val[3] = vdup_n_u8(255);

			// store, interleaving r/g/b/a
			vst4_u8(out, o);
			out += 8 * 4;
		}
	}
#endif

	for (; i < count; ++i)
	{
		int y_fixed = (y[i] << 20) + (1 << 19); // rounding
		int r, g, b;
		int cr = pcr[i] - 128;
		int cb = pcb[i] - 128;
		r = y_fixed + cr * __stbi_float2fixed(1.40200f);
		g = y_fixed + cr * -__stbi_float2fixed(0.71414f) + ((cb * -__stbi_float2fixed(0.34414f)) & 0xffff0000);
		b = y_fixed + cb * __stbi_float2fixed(1.77200f);
		r >>= 20;
		g >>= 20;
		b >>= 20;
		if ((unsigned)r > 255)
		{
			if (r < 0)
				r = 0;
			else
				r = 255;
		}
		if ((unsigned)g > 255)
		{
			if (g < 0)
				g = 0;
			else
				g = 255;
		}
		if ((unsigned)b > 255)
		{
			if (b < 0)
				b = 0;
			else
				b = 255;
		}
		out[0] = (stbi_uc)r;
		out[1] = (stbi_uc)g;
		out[2] = (stbi_uc)b;
		out[3] = 255;
		out += step;
	}
}
#endif

// set up the kernels
static void __stbi_setup_jpeg(__stbi_jpeg* j)
{
	j->idct_block_kernel = __stbi_idct_block;
	j->YCbCr_to_RGB_kernel = __stbi_YCbCr_to_RGB_row;
	j->resample_row_hv_2_kernel = __stbi_resample_row_hv_2;

#ifdef STBI_SSE2
	if (__stbi_sse2_available())
	{
		j->idct_block_kernel = __stbi_idct_simd;
		j->YCbCr_to_RGB_kernel = __stbi_YCbCr_to_RGB_simd;
		j->resample_row_hv_2_kernel = __stbi_resample_row_hv_2_simd;
	}
#endif

#ifdef STBI_NEON
	j->idct_block_kernel = __stbi_idct_simd;
	j->YCbCr_to_RGB_kernel = __stbi_YCbCr_to_RGB_simd;
	j->resample_row_hv_2_kernel = __stbi_resample_row_hv_2_simd;
#endif
}

// clean up the temporary component buffers
static void __stbi_cleanup_jpeg(__stbi_jpeg* j)
{
	__stbi_free_jpeg_components(j, j->s->img_n, 0);
}

typedef struct
{
	resample_row_func resample;
	stbi_uc *line0, *line1;
	int hs, vs; // expansion factor in each axis
	int w_lores; // horizontal pixels pre-expansion
	int ystep; // how far through vertical expansion we are
	int ypos; // which pre-expansion row we're on
} __stbi_resample;

// fast 0..255 * 0..255 => 0..255 rounded multiplication
static stbi_uc __stbi_blinn_8x8(stbi_uc x, stbi_uc y)
{
	unsigned int t = x * y + 128;
	return (stbi_uc)((t + (t >> 8)) >> 8);
}

static stbi_uc* load_jpeg_image(__stbi_jpeg* z, int* out_x, int* out_y, int* comp, int req_comp)
{
	int n, decode_n, is_rgb;
	z->s->img_n = 0; // make __stbi_cleanup_jpeg safe

	// validate req_comp
	if (req_comp < 0 || req_comp > 4) return __stbi_errpuc("bad req_comp", "Internal error");

	// load a jpeg image from whichever source, but leave in YCbCr format
	if (!__stbi_decode_jpeg_image(z))
	{
		__stbi_cleanup_jpeg(z);
		return NULL;
	}

	// determine actual number of components to Generate
	n = req_comp ? req_comp : z->s->img_n >= 3 ? 3 : 1;

	is_rgb = z->s->img_n == 3 && (z->rgb == 3 || (z->app14_color_transform == 0 && !z->jfif));

	if (z->s->img_n == 3 && n < 3 && !is_rgb)
		decode_n = 1;
	else
		decode_n = z->s->img_n;

	// resample and color-convert
	{
		int k;
		unsigned int i, j;
		stbi_uc* output;
		stbi_uc* coutput[4] = { NULL, NULL, NULL, NULL };

		__stbi_resample res_comp[4]{};

		for (k = 0; k < decode_n; ++k)
		{
			__stbi_resample* r = &res_comp[k];

			// allocate line buffer big enough for upsampling off the edges
			// with upsample factor of 4
			z->img_comp[k].linebuf = (stbi_uc*)__stbi_malloc(z->s->img_x + 3);
			if (!z->img_comp[k].linebuf)
			{
				__stbi_cleanup_jpeg(z);
				return __stbi_errpuc("outofmem", "Out of memory");
			}

			r->hs = z->img_h_max / z->img_comp[k].h;
			r->vs = z->img_v_max / z->img_comp[k].v;
			r->ystep = r->vs >> 1;
			r->w_lores = (z->s->img_x + r->hs - 1) / r->hs;
			r->ypos = 0;
			r->line0 = r->line1 = z->img_comp[k].value;

			if (r->hs == 1 && r->vs == 1)
				r->resample = resample_row_1;
			else if (r->hs == 1 && r->vs == 2)
				r->resample = __stbi_resample_row_v_2;
			else if (r->hs == 2 && r->vs == 1)
				r->resample = __stbi_resample_row_h_2;
			else if (r->hs == 2 && r->vs == 2)
				r->resample = z->resample_row_hv_2_kernel;
			else
				r->resample = __stbi_resample_row_generic;
		}

		// can't error after this so, this is safe
		output = (stbi_uc*)__stbi_malloc_mad3(n, z->s->img_x, z->s->img_y, 1);
		if (!output)
		{
			__stbi_cleanup_jpeg(z);
			return __stbi_errpuc("outofmem", "Out of memory");
		}

		// now go ahead and resample
		for (j = 0; j < z->s->img_y; ++j)
		{
			stbi_uc* out = output + n * z->s->img_x * j;
			for (k = 0; k < decode_n; ++k)
			{
				__stbi_resample* r = &res_comp[k];
				int y_bot = r->ystep >= (r->vs >> 1);
				coutput[k] = r->resample(z->img_comp[k].linebuf, y_bot ? r->line1 : r->line0, y_bot ? r->line0 : r->line1, r->w_lores, r->hs);
				if (++r->ystep >= r->vs)
				{
					r->ystep = 0;
					r->line0 = r->line1;
					if (++r->ypos < z->img_comp[k].y) r->line1 += z->img_comp[k].w2;
				}
			}
			if (n >= 3)
			{
				stbi_uc* y = coutput[0];
				if (z->s->img_n == 3)
				{
					if (is_rgb)
					{
						for (i = 0; i < z->s->img_x; ++i)
						{
							out[0] = y[i];
							out[1] = coutput[1][i];
							out[2] = coutput[2][i];
							out[3] = 255;
							out += n;
						}
					}
					else
					{
						z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n);
					}
				}
				else if (z->s->img_n == 4)
				{
					if (z->app14_color_transform == 0)
					{ // CMYK
						for (i = 0; i < z->s->img_x; ++i)
						{
							stbi_uc m = coutput[3][i];
							out[0] = __stbi_blinn_8x8(coutput[0][i], m);
							out[1] = __stbi_blinn_8x8(coutput[1][i], m);
							out[2] = __stbi_blinn_8x8(coutput[2][i], m);
							out[3] = 255;
							out += n;
						}
					}
					else if (z->app14_color_transform == 2)
					{ // YCCK
						z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n);
						for (i = 0; i < z->s->img_x; ++i)
						{
							stbi_uc m = coutput[3][i];
							out[0] = __stbi_blinn_8x8(255 - out[0], m);
							out[1] = __stbi_blinn_8x8(255 - out[1], m);
							out[2] = __stbi_blinn_8x8(255 - out[2], m);
							out += n;
						}
					}
					else
					{ // YCbCr + alpha?  Ignore the fourth channel for now
						z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n);
					}
				}
				else
					for (i = 0; i < z->s->img_x; ++i)
					{
						out[0] = out[1] = out[2] = y[i];
						out[3] = 255; // not used if n==3
						out += n;
					}
			}
			else
			{
				if (is_rgb)
				{
					if (n == 1)
						for (i = 0; i < z->s->img_x; ++i) *out++ = __stbi_compute_y(coutput[0][i], coutput[1][i], coutput[2][i]);
					else
					{
						for (i = 0; i < z->s->img_x; ++i, out += 2)
						{
							out[0] = __stbi_compute_y(coutput[0][i], coutput[1][i], coutput[2][i]);
							out[1] = 255;
						}
					}
				}
				else if (z->s->img_n == 4 && z->app14_color_transform == 0)
				{
					for (i = 0; i < z->s->img_x; ++i)
					{
						stbi_uc m = coutput[3][i];
						stbi_uc r = __stbi_blinn_8x8(coutput[0][i], m);
						stbi_uc g = __stbi_blinn_8x8(coutput[1][i], m);
						stbi_uc b = __stbi_blinn_8x8(coutput[2][i], m);
						out[0] = __stbi_compute_y(r, g, b);
						out[1] = 255;
						out += n;
					}
				}
				else if (z->s->img_n == 4 && z->app14_color_transform == 2)
				{
					for (i = 0; i < z->s->img_x; ++i)
					{
						out[0] = __stbi_blinn_8x8(255 - coutput[0][i], coutput[3][i]);
						out[1] = 255;
						out += n;
					}
				}
				else
				{
					stbi_uc* y = coutput[0];
					if (n == 1)
						for (i = 0; i < z->s->img_x; ++i) out[i] = y[i];
					else
						for (i = 0; i < z->s->img_x; ++i)
						{
							*out++ = y[i];
							*out++ = 255;
						}
				}
			}
		}
		__stbi_cleanup_jpeg(z);
		*out_x = z->s->img_x;
		*out_y = z->s->img_y;
		if (comp) *comp = z->s->img_n >= 3 ? 3 : 1; // report original components, not output
		return output;
	}
}

static void* __stbi_jpeg_load(__stbi_context* s, int* x, int* y, int* comp, int req_comp, __stbi_result_info* ri)
{
	unsigned char* result;
	__stbi_jpeg* j = (__stbi_jpeg*)__stbi_malloc(sizeof(__stbi_jpeg));
	STBI_NOTUSED(ri);
	j->s = s;
	__stbi_setup_jpeg(j);
	result = load_jpeg_image(j, x, y, comp, req_comp);
	STBI_FREE(j);
	return result;
}

static int __stbi_jpeg_test(__stbi_context* s)
{
	int r;
	__stbi_jpeg* j = (__stbi_jpeg*)__stbi_malloc(sizeof(__stbi_jpeg));
	j->s = s;
	__stbi_setup_jpeg(j);
	r = __stbi_decode_jpeg_header(j, STBI__SCAN_type);
	__stbi_rewind(s);
	STBI_FREE(j);
	return r;
}

static int __stbi_jpeg_info_raw(__stbi_jpeg* j, int* x, int* y, int* comp)
{
	if (!__stbi_decode_jpeg_header(j, STBI__SCAN_header))
	{
		__stbi_rewind(j->s);
		return 0;
	}
	if (x) *x = j->s->img_x;
	if (y) *y = j->s->img_y;
	if (comp) *comp = j->s->img_n >= 3 ? 3 : 1;
	return 1;
}

static int __stbi_jpeg_info(__stbi_context* s, int* x, int* y, int* comp)
{
	int result;
	__stbi_jpeg* j = (__stbi_jpeg*)(__stbi_malloc(sizeof(__stbi_jpeg)));
	j->s = s;
	result = __stbi_jpeg_info_raw(j, x, y, comp);
	STBI_FREE(j);
	return result;
}
#endif

// public domain zlib decode    v0.2  Sean Barrett 2006-11-18
//    simple implementation
//      - all input __zen_must be provided in an upfront buffer
//      - all output is written to a single output buffer (can malloc/realloc)
//    performance
//      - fast huffman

#ifndef STBI_NO_ZLIB

// fast-way is faster to check than jpeg huffman, but slow way is slower
#define STBI__ZFAST_BITS 9 // accelerate all cases in default tables
#define STBI__ZFAST_MASK ((1 << STBI__ZFAST_BITS) - 1)

// zlib-style huffman encoding
// (jpegs packs from left, zlib from right, so can't share code)
typedef struct
{
	__stbi_uint16 fast[1 << STBI__ZFAST_BITS];
	__stbi_uint16 firstcode[16];
	int maxcode[17];
	__stbi_uint16 firstsymbol[16];
	stbi_uc size[288];
	__stbi_uint16 value[288];
} __stbi_zhuffman;

stbi_inline static int __stbi_bitreverse16(int n)
{
	n = ((n & 0xAAAA) >> 1) | ((n & 0x5555) << 1);
	n = ((n & 0xCCCC) >> 2) | ((n & 0x3333) << 2);
	n = ((n & 0xF0F0) >> 4) | ((n & 0x0F0F) << 4);
	n = ((n & 0xFF00) >> 8) | ((n & 0x00FF) << 8);
	return n;
}

stbi_inline static int __stbi_bit_reverse(int v, int bits)
{
	STBI_ASSERT(bits <= 16);
	// to bit reverse n bits, reverse 16 and shift
	// e.g. 11 bits, bit reverse and shift away 5
	return __stbi_bitreverse16(v) >> (16 - bits);
}

static int __stbi_zbuild_huffman(__stbi_zhuffman* z, const stbi_uc* sizelist, int num)
{
	int i = 0, k = 0;
	int code{}, next_code[16]{}, sizes[17]{};

	// DEFLATE spec for generating codes
	memset(sizes, 0, sizeof(sizes));
	memset(z->fast, 0, sizeof(z->fast));
	for (i = 0; i < num; ++i) ++sizes[sizelist[i]];
	sizes[0] = 0;
	for (i = 1; i < 16; ++i)
		if (sizes[i] > (1 << i)) return __stbi_err("bad sizes", "Corrupt PNG");
	code = 0;
	for (i = 1; i < 16; ++i)
	{
		next_code[i] = code;
		z->firstcode[i] = (__stbi_uint16)code;
		z->firstsymbol[i] = (__stbi_uint16)k;
		code = (code + sizes[i]);
		if (sizes[i])
			if (code - 1 >= (1 << i)) return __stbi_err("bad codelengths", "Corrupt PNG");
		z->maxcode[i] = code << (16 - i); // preshift for inner loop
		code <<= 1;
		k += sizes[i];
	}
	z->maxcode[16] = 0x10000; // sentinel
	for (i = 0; i < num; ++i)
	{
		int s = sizelist[i];
		if (s)
		{
			int c = next_code[s] - z->firstcode[s] + z->firstsymbol[s];
			__stbi_uint16 fastv = (__stbi_uint16)((s << 9) | i);
			z->size[c] = (stbi_uc)s;
			z->value[c] = (__stbi_uint16)i;
			if (s <= STBI__ZFAST_BITS)
			{
				int j = __stbi_bit_reverse(next_code[s], s);
				while (j < (1 << STBI__ZFAST_BITS))
				{
					z->fast[j] = fastv;
					j += (1 << s);
				}
			}
			++next_code[s];
		}
	}
	return 1;
}

// zlib-from-memory implementation for PNG reading
//    because PNG allows splitting the zlib stream arbitrarily,
//    and it's annoying structurally to have PNG call ZLIB call PNG,
//    we require PNG read all the IDATs and combine them into a single
//    memory buffer

typedef struct
{
	stbi_uc *zbuffer, *zbuffer_end;
	int num_bits;
	__stbi_uint32 code_buffer;

	char* zout;
	char* zout_start;
	char* zout_end;
	int z_expandable;

	__stbi_zhuffman z_length, z_distance;
} __stbi_zbuf;

stbi_inline static int __stbi_zeof(__stbi_zbuf* z)
{
	return (z->zbuffer >= z->zbuffer_end);
}

stbi_inline static stbi_uc __stbi_zget8(__stbi_zbuf* z)
{
	return __stbi_zeof(z) ? 0 : *z->zbuffer++;
}

static void __stbi_fill_bits(__stbi_zbuf* z)
{
	do
	{
		if (z->code_buffer >= (1U << z->num_bits))
		{
			z->zbuffer = z->zbuffer_end; /* treat this as EOF so we fail. */
			return;
		}
		z->code_buffer |= (unsigned int)__stbi_zget8(z) << z->num_bits;
		z->num_bits += 8;
	} while (z->num_bits <= 24);
}

stbi_inline static unsigned int __stbi_zreceive(__stbi_zbuf* z, int n)
{
	unsigned int k;
	if (z->num_bits < n) __stbi_fill_bits(z);
	k = z->code_buffer & ((1 << n) - 1);
	z->code_buffer >>= n;
	z->num_bits -= n;
	return k;
}

static int __stbi_zhuffman_decode_slowpath(__stbi_zbuf* a, __stbi_zhuffman* z)
{
	int b, s, k;
	// not resolved by fast table, so compute it the slow way
	// use jpeg approach, which requires MSbits at top
	k = __stbi_bit_reverse(a->code_buffer, 16);
	for (s = STBI__ZFAST_BITS + 1;; ++s)
		if (k < z->maxcode[s]) break;
	if (s >= 16) return -1; // invalid code!
	// code size is s, so:
	b = (k >> (16 - s)) - z->firstcode[s] + z->firstsymbol[s];
	if (b >= sizeof(z->size)) return -1; // some value was corrupt somewhere!
	if (z->size[b] != s) return -1; // was originally an assert, but report failure instead.
	a->code_buffer >>= s;
	a->num_bits -= s;
	return z->value[b];
}

stbi_inline static int __stbi_zhuffman_decode(__stbi_zbuf* a, __stbi_zhuffman* z)
{
	int b, s;
	if (a->num_bits < 16)
	{
		if (__stbi_zeof(a))
		{
			return -1; /* report error for unexpected end of value. */
		}
		__stbi_fill_bits(a);
	}
	b = z->fast[a->code_buffer & STBI__ZFAST_MASK];
	if (b)
	{
		s = b >> 9;
		a->code_buffer >>= s;
		a->num_bits -= s;
		return b & 511;
	}
	return __stbi_zhuffman_decode_slowpath(a, z);
}

static int __stbi_zexpand(__stbi_zbuf* z, char* zout, int n) // need to make room for n bytes
{
	char* q;
	unsigned int cur, limit /*, old_limit*/;
	z->zout = zout;
	if (!z->z_expandable) return __stbi_err("output buffer limit", "Corrupt PNG");
	cur = (unsigned int)(z->zout - z->zout_start);
	limit = /*old_limit =*/(unsigned)(z->zout_end - z->zout_start);
	if (UINT_MAX - cur < (unsigned)n) return __stbi_err("outofmem", "Out of memory");
	while (cur + n > limit)
	{
		if (limit > UINT_MAX / 2) return __stbi_err("outofmem", "Out of memory");
		limit *= 2;
	}
	q = (char*)STBI_REALLOC_SIZED(z->zout_start, old_limit, limit);
	// STBI_NOTUSED(old_limit);
	if (q == NULL) return __stbi_err("outofmem", "Out of memory");
	z->zout_start = q;
	z->zout = q + cur;
	z->zout_end = q + limit;
	return 1;
}

static const int __stbi_zlength_base[31] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0 };

static const int __stbi_zlength_extra[31] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0 };

static const int __stbi_zdist_base[32] = { 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0 };

static const int __stbi_zdist_extra[32] = { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13 };

static int __stbi_parse_huffman_block(__stbi_zbuf* a)
{
	char* zout = a->zout;
	for (;;)
	{
		int z = __stbi_zhuffman_decode(a, &a->z_length);
		if (z < 256)
		{
			if (z < 0) return __stbi_err("bad huffman code", "Corrupt PNG"); // error in huffman codes
			if (zout >= a->zout_end)
			{
				if (!__stbi_zexpand(a, zout, 1)) return 0;
				zout = a->zout;
			}
			*zout++ = (char)z;
		}
		else
		{
			stbi_uc* p;
			int len, dist;
			if (z == 256)
			{
				a->zout = zout;
				return 1;
			}
			z -= 257;
			len = __stbi_zlength_base[z];
			if (__stbi_zlength_extra[z]) len += __stbi_zreceive(a, __stbi_zlength_extra[z]);
			z = __stbi_zhuffman_decode(a, &a->z_distance);
			if (z < 0) return __stbi_err("bad huffman code", "Corrupt PNG");
			dist = __stbi_zdist_base[z];
			if (__stbi_zdist_extra[z]) dist += __stbi_zreceive(a, __stbi_zdist_extra[z]);
			if (zout - a->zout_start < dist) return __stbi_err("bad dist", "Corrupt PNG");
			if (zout + len > a->zout_end)
			{
				if (!__stbi_zexpand(a, zout, len)) return 0;
				zout = a->zout;
			}
			p = (stbi_uc*)(zout - dist);
			if (dist == 1)
			{ // run of one byte; common in images.
				stbi_uc v = *p;
				if (len)
				{
					do *zout++ = v;
					while (--len);
				}
			}
			else
			{
				if (len)
				{
					do *zout++ = *p++;
					while (--len);
				}
			}
		}
	}
}

static int __stbi_compute_huffman_codes(__stbi_zbuf* a)
{
	static const stbi_uc length_dezigzag[19] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 };
	__stbi_zhuffman z_codelength;
	stbi_uc lencodes[286 + 32 + 137]{}; //padding for maximum single op
	stbi_uc codelength_sizes[19];
	int i, n;

	int hlit = __stbi_zreceive(a, 5) + 257;
	int hdist = __stbi_zreceive(a, 5) + 1;
	int hclen = __stbi_zreceive(a, 4) + 4;
	int ntot = hlit + hdist;

	memset(codelength_sizes, 0, sizeof(codelength_sizes));
	for (i = 0; i < hclen; ++i)
	{
		int s = __stbi_zreceive(a, 3);
		codelength_sizes[length_dezigzag[i]] = (stbi_uc)s;
	}
	if (!__stbi_zbuild_huffman(&z_codelength, codelength_sizes, 19)) return 0;

	n = 0;
	while (n < ntot)
	{
		int c = __stbi_zhuffman_decode(a, &z_codelength);
		if (c < 0 || c >= 19) return __stbi_err("bad codelengths", "Corrupt PNG");
		if (c < 16)
			lencodes[n++] = (stbi_uc)c;
		else
		{
			stbi_uc fill = 0;
			if (c == 16)
			{
				c = __stbi_zreceive(a, 2) + 3;
				if (n == 0) return __stbi_err("bad codelengths", "Corrupt PNG");
				fill = lencodes[n - 1];
			}
			else if (c == 17)
			{
				c = __stbi_zreceive(a, 3) + 3;
			}
			else if (c == 18)
			{
				c = __stbi_zreceive(a, 7) + 11;
			}
			else
			{
				return __stbi_err("bad codelengths", "Corrupt PNG");
			}
			if (ntot - n < c) return __stbi_err("bad codelengths", "Corrupt PNG");
			memset(lencodes + n, fill, c);
			n += c;
		}
	}
	if (n != ntot) return __stbi_err("bad codelengths", "Corrupt PNG");
	if (!__stbi_zbuild_huffman(&a->z_length, lencodes, hlit)) return 0;
	if (!__stbi_zbuild_huffman(&a->z_distance, lencodes + hlit, hdist)) return 0;
	return 1;
}

static int __stbi_parse_uncompressed_block(__stbi_zbuf* a)
{
	stbi_uc header[4]{};
	int len{}, nlen{}, k{};
	if (a->num_bits & 7) __stbi_zreceive(a, a->num_bits & 7); // discard
	// drain the bit-packed value into header
	k = 0;
	while (a->num_bits > 0)
	{
		header[k++] = (stbi_uc)(a->code_buffer & 255); // suppress MSVC run-time check
		a->code_buffer >>= 8;
		a->num_bits -= 8;
	}
	if (a->num_bits < 0) return __stbi_err("zlib corrupt", "Corrupt PNG");
	// now fill header the normal way
	while (k < 4) header[k++] = __stbi_zget8(a);
	len = header[1] * 256 + header[0];
	nlen = header[3] * 256 + header[2];
	if (nlen != (len ^ 0xffff)) return __stbi_err("zlib corrupt", "Corrupt PNG");
	if (a->zbuffer + len > a->zbuffer_end) return __stbi_err("read past buffer", "Corrupt PNG");
	if (a->zout + len > a->zout_end)
		if (!__stbi_zexpand(a, a->zout, len)) return 0;
	memcpy(a->zout, a->zbuffer, len);
	a->zbuffer += len;
	a->zout += len;
	return 1;
}

static int __stbi_parse_zlib_header(__stbi_zbuf* a)
{
	int cmf = __stbi_zget8(a);
	int cm = cmf & 15;
	/* int cinfo = cmf >> 4; */
	int flg = __stbi_zget8(a);
	if (__stbi_zeof(a)) return __stbi_err("bad zlib header", "Corrupt PNG"); // zlib spec
	if ((cmf * 256 + flg) % 31 != 0) return __stbi_err("bad zlib header", "Corrupt PNG"); // zlib spec
	if (flg & 32) return __stbi_err("no preset dict", "Corrupt PNG"); // preset dictionary not allowed in png
	if (cm != 8) return __stbi_err("bad compression", "Corrupt PNG"); // DEFLATE required for png
	// window = 1 << (8 + cinfo)... but who cares, we fully buffer output
	return 1;
}

static const stbi_uc __stbi_zdefault_length[288] = { 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
	8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
	8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
	9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
	9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8 };
static const stbi_uc __stbi_zdefault_distance[32] = { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 };
/*
 Init algorithm:
 {
 int i;   // use <= to match clearly with spec
 for (i=0; i <= 143; ++i)     __stbi_zdefault_length[i]   = 8;
 for (   ; i <= 255; ++i)     __stbi_zdefault_length[i]   = 9;
 for (   ; i <= 279; ++i)     __stbi_zdefault_length[i]   = 7;
 for (   ; i <= 287; ++i)     __stbi_zdefault_length[i]   = 8;

 for (i=0; i <=  31; ++i)     __stbi_zdefault_distance[i] = 5;
 }
 */

static int __stbi_parse_zlib(__stbi_zbuf* a, int parse_header)
{
	int final, type;
	if (parse_header)
		if (!__stbi_parse_zlib_header(a)) return 0;
	a->num_bits = 0;
	a->code_buffer = 0;
	do
	{
		final = __stbi_zreceive(a, 1);
		type = __stbi_zreceive(a, 2);
		if (type == 0)
		{
			if (!__stbi_parse_uncompressed_block(a)) return 0;
		}
		else if (type == 3)
		{
			return 0;
		}
		else
		{
			if (type == 1)
			{
				// use fixed code lengths
				if (!__stbi_zbuild_huffman(&a->z_length, __stbi_zdefault_length, 288)) return 0;
				if (!__stbi_zbuild_huffman(&a->z_distance, __stbi_zdefault_distance, 32)) return 0;
			}
			else
			{
				if (!__stbi_compute_huffman_codes(a)) return 0;
			}
			if (!__stbi_parse_huffman_block(a)) return 0;
		}
	} while (!final);
	return 1;
}

static int __stbi_do_zlib(__stbi_zbuf* a, char* obuf, int olen, int exp, int parse_header)
{
	a->zout_start = obuf;
	a->zout = obuf;
	a->zout_end = obuf + olen;
	a->z_expandable = exp;

	return __stbi_parse_zlib(a, parse_header);
}

STBIDEF char* stbi_zlib_decode_malloc_guesssize(const char* buffer, int len, int initial_size, int* outlen)
{
	__stbi_zbuf a{};
	char* p = (char*)__stbi_malloc(initial_size);
	if (p == NULL) return NULL;
	a.zbuffer = (stbi_uc*)buffer;
	a.zbuffer_end = (stbi_uc*)buffer + len;
	if (__stbi_do_zlib(&a, p, initial_size, 1, 1))
	{
		if (outlen) *outlen = (int)(a.zout - a.zout_start);
		return a.zout_start;
	}
	else
	{
		STBI_FREE(a.zout_start);
		return NULL;
	}
}

STBIDEF char* stbi_zlib_decode_malloc(char const* buffer, int len, int* outlen)
{
	return stbi_zlib_decode_malloc_guesssize(buffer, len, 16384, outlen);
}

STBIDEF char* stbi_zlib_decode_malloc_guesssize_headerflag(const char* buffer, int len, int initial_size, int* outlen, int parse_header)
{
	__stbi_zbuf a{};
	char* p = (char*)__stbi_malloc(initial_size);
	if (p == NULL) return NULL;
	a.zbuffer = (stbi_uc*)buffer;
	a.zbuffer_end = (stbi_uc*)buffer + len;
	if (__stbi_do_zlib(&a, p, initial_size, 1, parse_header))
	{
		if (outlen) *outlen = (int)(a.zout - a.zout_start);
		return a.zout_start;
	}
	else
	{
		STBI_FREE(a.zout_start);
		return NULL;
	}
}

STBIDEF int stbi_zlib_decode_buffer(char* obuffer, int olen, char const* ibuffer, int ilen)
{
	__stbi_zbuf a{};
	a.zbuffer = (stbi_uc*)ibuffer;
	a.zbuffer_end = (stbi_uc*)ibuffer + ilen;
	if (__stbi_do_zlib(&a, obuffer, olen, 0, 1))
		return (int)(a.zout - a.zout_start);
	else
		return -1;
}

STBIDEF char* stbi_zlib_decode_noheader_malloc(char const* buffer, int len, int* outlen)
{
	__stbi_zbuf a{};
	char* p = (char*)__stbi_malloc(16384);
	if (p == NULL) return NULL;
	a.zbuffer = (stbi_uc*)buffer;
	a.zbuffer_end = (stbi_uc*)buffer + len;
	if (__stbi_do_zlib(&a, p, 16384, 1, 0))
	{
		if (outlen) *outlen = (int)(a.zout - a.zout_start);
		return a.zout_start;
	}
	else
	{
		STBI_FREE(a.zout_start);
		return NULL;
	}
}

STBIDEF int stbi_zlib_decode_noheader_buffer(char* obuffer, int olen, const char* ibuffer, int ilen)
{
	__stbi_zbuf a{};
	a.zbuffer = (stbi_uc*)ibuffer;
	a.zbuffer_end = (stbi_uc*)ibuffer + ilen;
	if (__stbi_do_zlib(&a, obuffer, olen, 0, 0))
		return (int)(a.zout - a.zout_start);
	else
		return -1;
}
#endif

// public domain "baseline" PNG decoder   v0.10  Sean Barrett 2006-11-18
//    simple implementation
//      - only 8-bit samples
//      - no CRC checking
//      - allocates lots of intermediate memory
//        - avoids problem of streaming value between subsystems
//        - avoids explicit window management
//    performance
//      - uses stb_zlib, a PD zlib implementation with fast huffman decoding

#ifndef STBI_NO_PNG
typedef struct
{
	__stbi_uint32 length;
	__stbi_uint32 type;
} __stbi_pngchunk;

static __stbi_pngchunk __stbi_get_chunk_header(__stbi_context* s)
{
	__stbi_pngchunk c{};
	c.length = __stbi_get32be(s);
	c.type = __stbi_get32be(s);
	return c;
}

static int __stbi_check_png_header(__stbi_context* s)
{
	static const stbi_uc png_sig[8] = { 137, 80, 78, 71, 13, 10, 26, 10 };
	int i;
	for (i = 0; i < 8; ++i)
		if (__stbi_get8(s) != png_sig[i]) return __stbi_err("bad png sig", "Not a PNG");
	return 1;
}

typedef struct
{
	__stbi_context* s;
	stbi_uc *idata, *expanded, *out;
	int depth;
} __stbi_png;

enum
{
	STBI__F_none = 0,
	STBI__F_sub = 1,
	STBI__F_up = 2,
	STBI__F_avg = 3,
	STBI__F_paeth = 4,
	// synthetic filters used for first scanline to avoid needing a dummy row of 0s
	STBI__F_avg_first,
	STBI__F_paeth_first
};

static stbi_uc first_row_filter[5] = { STBI__F_none, STBI__F_sub, STBI__F_none, STBI__F_avg_first, STBI__F_paeth_first };

static int __stbi_paeth(int a, int b, int c)
{
	int p = a + b - c;
	int pa = abs(p - a);
	int pb = abs(p - b);
	int pc = abs(p - c);
	if (pa <= pb && pa <= pc) return a;
	if (pb <= pc) return b;
	return c;
}

static const stbi_uc __stbi_depth_scale_table[9] = { 0, 0xff, 0x55, 0, 0x11, 0, 0, 0, 0x01 };

// Generate the png value from post-deflated value
static int __stbi_create_png_image_raw(__stbi_png* a, stbi_uc* raw, __stbi_uint32 raw_len, int out_n, __stbi_uint32 x, __stbi_uint32 y, int depth, int color)
{
	int bytes = (depth == 16 ? 2 : 1);
	__stbi_context* s = a->s;
	__stbi_uint32 i, j, stride = x * out_n * bytes;
	__stbi_uint32 img_len, img_width_bytes;
	int k;
	int img_n = s->img_n; // copy it into a local for later

	int output_bytes = out_n * bytes;
	int filter_bytes = img_n * bytes;
	int width = x;

	STBI_ASSERT(out_n == s->img_n || out_n == s->img_n + 1);
	a->out = (stbi_uc*)__stbi_malloc_mad3(x, y, output_bytes, 0); // extra bytes to write off the end into
	if (!a->out) return __stbi_err("outofmem", "Out of memory");

	if (!__stbi_mad3sizes_valid(img_n, x, depth, 7)) return __stbi_err("too large", "Corrupt PNG");
	img_width_bytes = (((img_n * x * depth) + 7) >> 3);
	img_len = (img_width_bytes + 1) * y;

	// we used to check for exact match between raw_len and img_len on non-interlaced PNGs,
	// but issue #276 reported a PNG in the wild that had extra value at the end (all zeros),
	// so just check for raw_len < img_len always.
	if (raw_len < img_len) return __stbi_err("not enough pixels", "Corrupt PNG");

	for (j = 0; j < y; ++j)
	{
		stbi_uc* cur = a->out + stride * j;
		stbi_uc* prior;
		int filter = *raw++;

		if (filter > 4) return __stbi_err("invalid filter", "Corrupt PNG");

		if (depth < 8)
		{
			if (img_width_bytes > x) return __stbi_err("invalid width", "Corrupt PNG");
			cur += x * out_n - img_width_bytes; // store output to the rightmost img_len bytes, so we can decode in place
			filter_bytes = 1;
			width = img_width_bytes;
		}
		prior = cur - stride; // bugfix: need to compute this after 'cur +=' computation above

		// if first row, use special filter that doesn't sample previous row
		if (j == 0) filter = first_row_filter[filter];

		// handle first byte explicitly
		for (k = 0; k < filter_bytes; ++k)
		{
			switch (filter)
			{
			case STBI__F_none: cur[k] = raw[k]; break;
			case STBI__F_sub: cur[k] = raw[k]; break;
			case STBI__F_up: cur[k] = STBI__BYTECAST(raw[k] + prior[k]); break;
			case STBI__F_avg: cur[k] = STBI__BYTECAST(raw[k] + (prior[k] >> 1)); break;
			case STBI__F_paeth: cur[k] = STBI__BYTECAST(raw[k] + __stbi_paeth(0, prior[k], 0)); break;
			case STBI__F_avg_first: cur[k] = raw[k]; break;
			case STBI__F_paeth_first: cur[k] = raw[k]; break;
			}
		}

		if (depth == 8)
		{
			if (img_n != out_n) cur[img_n] = 255; // first pixel
			raw += img_n;
			cur += out_n;
			prior += out_n;
		}
		else if (depth == 16)
		{
			if (img_n != out_n)
			{
				cur[filter_bytes] = 255; // first pixel top byte
				cur[filter_bytes + 1] = 255; // first pixel bottom byte
			}
			raw += filter_bytes;
			cur += output_bytes;
			prior += output_bytes;
		}
		else
		{
			raw += 1;
			cur += 1;
			prior += 1;
		}

		// this is a little gross, so that we don't switch per-pixel or per-component
		if (depth < 8 || img_n == out_n)
		{
			int nk = (width - 1) * filter_bytes;
#define STBI__CASE(f) \
	case f:           \
		for (k = 0; k < nk; ++k)
			switch (filter)
			{
				// "none" filter turns into a memcpy here; make that explicit.
			case STBI__F_none:
				memcpy(cur, raw, nk);
				break;
				STBI__CASE(STBI__F_sub)
				{
					cur[k] = STBI__BYTECAST(raw[k] + cur[k - filter_bytes]);
				}
				break;
				STBI__CASE(STBI__F_up)
				{
					cur[k] = STBI__BYTECAST(raw[k] + prior[k]);
				}
				break;
				STBI__CASE(STBI__F_avg)
				{
					cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k - filter_bytes]) >> 1));
				}
				break;
				STBI__CASE(STBI__F_paeth)
				{
					cur[k] = STBI__BYTECAST(raw[k] + __stbi_paeth(cur[k - filter_bytes], prior[k], prior[k - filter_bytes]));
				}
				break;
				STBI__CASE(STBI__F_avg_first)
				{
					cur[k] = STBI__BYTECAST(raw[k] + (cur[k - filter_bytes] >> 1));
				}
				break;
				STBI__CASE(STBI__F_paeth_first)
				{
					cur[k] = STBI__BYTECAST(raw[k] + __stbi_paeth(cur[k - filter_bytes], 0, 0));
				}
				break;
			}
#undef STBI__CASE
			raw += nk;
		}
		else
		{
			STBI_ASSERT(img_n + 1 == out_n);
#define STBI__CASE(f)                                                                                                          \
	case f:                                                                                                                    \
		for (i = x - 1; i >= 1; --i, cur[filter_bytes] = 255, raw += filter_bytes, cur += output_bytes, prior += output_bytes) \
			for (k = 0; k < filter_bytes; ++k)
			switch (filter)
			{
				STBI__CASE(STBI__F_none)
				{
					cur[k] = raw[k];
				}
				break;
				STBI__CASE(STBI__F_sub)
				{
					cur[k] = STBI__BYTECAST(raw[k] + cur[k - output_bytes]);
				}
				break;
				STBI__CASE(STBI__F_up)
				{
					cur[k] = STBI__BYTECAST(raw[k] + prior[k]);
				}
				break;
				STBI__CASE(STBI__F_avg)
				{
					cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k - output_bytes]) >> 1));
				}
				break;
				STBI__CASE(STBI__F_paeth)
				{
					cur[k] = STBI__BYTECAST(raw[k] + __stbi_paeth(cur[k - output_bytes], prior[k], prior[k - output_bytes]));
				}
				break;
				STBI__CASE(STBI__F_avg_first)
				{
					cur[k] = STBI__BYTECAST(raw[k] + (cur[k - output_bytes] >> 1));
				}
				break;
				STBI__CASE(STBI__F_paeth_first)
				{
					cur[k] = STBI__BYTECAST(raw[k] + __stbi_paeth(cur[k - output_bytes], 0, 0));
				}
				break;
			}
#undef STBI__CASE

			// the loop above sets the high byte of the pixels' alpha, but for
			// 16 bit png files we also need the low byte set. we'll do that here.
			if (depth == 16)
			{
				cur = a->out + stride * j; // start at the beginning of the row again
				for (i = 0; i < x; ++i, cur += output_bytes)
				{
					cur[filter_bytes + 1] = 255;
				}
			}
		}
	}

	// we make a separate pass to expand bits to pixels; for performance,
	// this could run two scanlines behind the above code, so it won't
	// intefere with filtering but will still be in the cache.
	if (depth < 8)
	{
		for (j = 0; j < y; ++j)
		{
			stbi_uc* cur = a->out + stride * j;
			stbi_uc* in = a->out + stride * j + x * out_n - img_width_bytes;
			// unpack 1/2/4-bit into a 8-bit buffer. allows us to keep the common 8-bit path optimal at minimal cost for 1/2/4-bit
			// png guarante byte alignment, if width is not multiple of 8/4/2 we'll decode dummy trailing value that will be skipped in the later loop
			stbi_uc scale = (color == 0) ? __stbi_depth_scale_table[depth] : 1; // scale grayscale values to 0..255 range

			// note that the final byte might overshoot and write more value than desired.
			// we can allocate enough value that this never writes out of memory, but it
			// could also overwrite the next scanline. can it overwrite non-empty value
			// on the next scanline? yes, consider 1-pixel-wide scanlines with 1-bit-per-pixel.
			// so we need to explicitly clamp the final ones

			if (depth == 4)
			{
				for (k = x * img_n; k >= 2; k -= 2, ++in)
				{
					*cur++ = scale * ((*in >> 4));
					*cur++ = scale * ((*in) & 0x0f);
				}
				if (k > 0) *cur++ = scale * ((*in >> 4));
			}
			else if (depth == 2)
			{
				for (k = x * img_n; k >= 4; k -= 4, ++in)
				{
					*cur++ = scale * ((*in >> 6));
					*cur++ = scale * ((*in >> 4) & 0x03);
					*cur++ = scale * ((*in >> 2) & 0x03);
					*cur++ = scale * ((*in) & 0x03);
				}
				if (k > 0) *cur++ = scale * ((*in >> 6));
				if (k > 1) *cur++ = scale * ((*in >> 4) & 0x03);
				if (k > 2) *cur++ = scale * ((*in >> 2) & 0x03);
			}
			else if (depth == 1)
			{
				for (k = x * img_n; k >= 8; k -= 8, ++in)
				{
					*cur++ = scale * ((*in >> 7));
					*cur++ = scale * ((*in >> 6) & 0x01);
					*cur++ = scale * ((*in >> 5) & 0x01);
					*cur++ = scale * ((*in >> 4) & 0x01);
					*cur++ = scale * ((*in >> 3) & 0x01);
					*cur++ = scale * ((*in >> 2) & 0x01);
					*cur++ = scale * ((*in >> 1) & 0x01);
					*cur++ = scale * ((*in) & 0x01);
				}
				if (k > 0) *cur++ = scale * ((*in >> 7));
				if (k > 1) *cur++ = scale * ((*in >> 6) & 0x01);
				if (k > 2) *cur++ = scale * ((*in >> 5) & 0x01);
				if (k > 3) *cur++ = scale * ((*in >> 4) & 0x01);
				if (k > 4) *cur++ = scale * ((*in >> 3) & 0x01);
				if (k > 5) *cur++ = scale * ((*in >> 2) & 0x01);
				if (k > 6) *cur++ = scale * ((*in >> 1) & 0x01);
			}
			if (img_n != out_n)
			{
				int q;
				// insert alpha = 255
				cur = a->out + stride * j;
				if (img_n == 1)
				{
					for (q = x - 1; q >= 0; --q)
					{
						cur[q * 2 + 1] = 255;
						cur[q * 2 + 0] = cur[q];
					}
				}
				else
				{
					STBI_ASSERT(img_n == 3);
					for (q = x - 1; q >= 0; --q)
					{
						cur[q * 4 + 3] = 255;
						cur[q * 4 + 2] = cur[q * 3 + 2];
						cur[q * 4 + 1] = cur[q * 3 + 1];
						cur[q * 4 + 0] = cur[q * 3 + 0];
					}
				}
			}
		}
	}
	else if (depth == 16)
	{
		// force the image value from big-endian to platform-native.
		// this is done in a separate pass due to the decoding relying
		// on the value being untouched, but could probably be done
		// per-line during decode if care is taken.
		stbi_uc* cur = a->out;
		__stbi_uint16* cur16 = (__stbi_uint16*)cur;

		for (i = 0; i < x * y * out_n; ++i, cur16++, cur += 2)
		{
			*cur16 = (cur[0] << 8) | cur[1];
		}
	}

	return 1;
}

static int __stbi_create_png_image(__stbi_png* a, stbi_uc* image_data, __stbi_uint32 image_data_len, int out_n, int depth, int color, int interlaced)
{
	int bytes = (depth == 16 ? 2 : 1);
	int out_bytes = out_n * bytes;
	stbi_uc* final;
	int p;
	if (!interlaced) return __stbi_create_png_image_raw(a, image_data, image_data_len, out_n, a->s->img_x, a->s->img_y, depth, color);

	// de-interlacing
	final = (stbi_uc*)__stbi_malloc_mad3(a->s->img_x, a->s->img_y, out_bytes, 0);
	for (p = 0; p < 7; ++p)
	{
		int xorig[] = { 0, 4, 0, 2, 0, 1, 0 };
		int yorig[] = { 0, 0, 4, 0, 2, 0, 1 };
		int xspc[] = { 8, 8, 4, 4, 2, 2, 1 };
		int yspc[] = { 8, 8, 8, 4, 4, 2, 2 };
		int i, j, x, y;
		// pass1_x[4] = 0, pass1_x[5] = 1, pass1_x[12] = 1
		x = (a->s->img_x - xorig[p] + xspc[p] - 1) / xspc[p];
		y = (a->s->img_y - yorig[p] + yspc[p] - 1) / yspc[p];
		if (x && y)
		{
			__stbi_uint32 img_len = ((((a->s->img_n * x * depth) + 7) >> 3) + 1) * y;
			if (!__stbi_create_png_image_raw(a, image_data, image_data_len, out_n, x, y, depth, color))
			{
				STBI_FREE(final);
				return 0;
			}
			for (j = 0; j < y; ++j)
			{
				for (i = 0; i < x; ++i)
				{
					int out_y = j * yspc[p] + yorig[p];
					int out_x = i * xspc[p] + xorig[p];
					memcpy(final + out_y * a->s->img_x * out_bytes + out_x * out_bytes, a->out + (j * x + i) * out_bytes, out_bytes);
				}
			}
			STBI_FREE(a->out);
			image_data += img_len;
			image_data_len -= img_len;
		}
	}
	a->out = final;

	return 1;
}

static int __stbi_compute_transparency(__stbi_png* z, stbi_uc tc[3], int out_n)
{
	__stbi_context* s = z->s;
	__stbi_uint32 i, pixel_count = s->img_x * s->img_y;
	stbi_uc* p = z->out;

	// compute color-based transparency, assuming we've
	// already got 255 as the alpha value in the output
	STBI_ASSERT(out_n == 2 || out_n == 4);

	if (out_n == 2)
	{
		for (i = 0; i < pixel_count; ++i)
		{
			p[1] = (p[0] == tc[0] ? 0 : 255);
			p += 2;
		}
	}
	else
	{
		for (i = 0; i < pixel_count; ++i)
		{
			if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) p[3] = 0;
			p += 4;
		}
	}
	return 1;
}

static int __stbi_compute_transparency16(__stbi_png* z, __stbi_uint16 tc[3], int out_n)
{
	__stbi_context* s = z->s;
	__stbi_uint32 i{}, pixel_count = s->img_x * s->img_y;
	__stbi_uint16* p = (__stbi_uint16*)z->out;

	// compute color-based transparency, assuming we've
	// already got 65535 as the alpha value in the output
	STBI_ASSERT(out_n == 2 || out_n == 4);

	if (out_n == 2)
	{
		for (i = 0; i < pixel_count; ++i)
		{
			p[1] = (p[0] == tc[0] ? 0 : 65535);
			p += 2;
		}
	}
	else
	{
		for (i = 0; i < pixel_count; ++i)
		{
			if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) p[3] = 0;
			p += 4;
		}
	}
	return 1;
}

static int __stbi_expand_png_palette(__stbi_png* a, stbi_uc* palette, int len, int pal_img_n)
{
	__stbi_uint32 i{}, pixel_count = a->s->img_x * a->s->img_y;
	stbi_uc *p{}, *temp_out{}, *orig = a->out;

	p = (stbi_uc*)__stbi_malloc_mad2(pixel_count, pal_img_n, 0);
	if (p == NULL) return __stbi_err("outofmem", "Out of memory");

	// between here and free(out) below, exitting would leak
	temp_out = p;

	if (pal_img_n == 3)
	{
		for (i = 0; i < pixel_count; ++i)
		{
			int n = orig[i] * 4;
			p[0] = palette[n];
			p[1] = palette[n + 1];
			p[2] = palette[n + 2];
			p += 3;
		}
	}
	else
	{
		for (i = 0; i < pixel_count; ++i)
		{
			int n = orig[i] * 4;
			p[0] = palette[n];
			p[1] = palette[n + 1];
			p[2] = palette[n + 2];
			p[3] = palette[n + 3];
			p += 4;
		}
	}
	STBI_FREE(a->out);
	a->out = temp_out;

	STBI_NOTUSED(len);

	return 1;
}

static int __stbi_unpremultiply_on_load = 0;
static int __stbi_de_iphone_flag = 0;

STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply)
{
	__stbi_unpremultiply_on_load = flag_true_if_should_unpremultiply;
}

STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert)
{
	__stbi_de_iphone_flag = flag_true_if_should_convert;
}

static void __stbi_de_iphone(__stbi_png* z)
{
	__stbi_context* s = z->s;
	__stbi_uint32 i{}, pixel_count = s->img_x * s->img_y;
	stbi_uc* p = z->out;

	if (s->img_out_n == 3)
	{ // convert bgr to rgb
		for (i = 0; i < pixel_count; ++i)
		{
			stbi_uc t = p[0];
			p[0] = p[2];
			p[2] = t;
			p += 3;
		}
	}
	else
	{
		STBI_ASSERT(s->img_out_n == 4);
		if (__stbi_unpremultiply_on_load)
		{
			// convert bgr to rgb and unpremultiply
			for (i = 0; i < pixel_count; ++i)
			{
				stbi_uc a = p[3];
				stbi_uc t = p[0];
				if (a)
				{
					stbi_uc half = a / 2;
					p[0] = (p[2] * 255 + half) / a;
					p[1] = (p[1] * 255 + half) / a;
					p[2] = (t * 255 + half) / a;
				}
				else
				{
					p[0] = p[2];
					p[2] = t;
				}
				p += 4;
			}
		}
		else
		{
			// convert bgr to rgb
			for (i = 0; i < pixel_count; ++i)
			{
				stbi_uc t = p[0];
				p[0] = p[2];
				p[2] = t;
				p += 4;
			}
		}
	}
}

#define STBI__PNG_TYPE(a, b, c, d) (((unsigned)(a) << 24) + ((unsigned)(b) << 16) + ((unsigned)(c) << 8) + (unsigned)(d))

static int __stbi_parse_png_file(__stbi_png* z, int scan, int req_comp)
{
	stbi_uc palette[1024]{}, pal_img_n = 0;
	stbi_uc has_trans = 0, tc[3] = { 0 };
	__stbi_uint16 tc16[3]{};
	__stbi_uint32 ioff = 0, idata_limit = 0, i, pal_len = 0;
	int first = 1, k{}, interlace = 0, color = 0, is_iphone = 0;
	__stbi_context* s = z->s;

	z->expanded = NULL;
	z->idata = NULL;
	z->out = NULL;

	if (!__stbi_check_png_header(s)) return 0;

	if (scan == STBI__SCAN_type) return 1;

	for (;;)
	{
		__stbi_pngchunk c = __stbi_get_chunk_header(s);
		switch (c.type)
		{
		case STBI__PNG_TYPE('C', 'g', 'B', 'I'):
			is_iphone = 1;
			__stbi_skip(s, c.length);
			break;
		case STBI__PNG_TYPE('I', 'H', 'D', 'R'): {
			int comp, filter;
			if (!first) return __stbi_err("multiple IHDR", "Corrupt PNG");
			first = 0;
			if (c.length != 13) return __stbi_err("bad IHDR len", "Corrupt PNG");
			s->img_x = __stbi_get32be(s);
			s->img_y = __stbi_get32be(s);
			if (s->img_y > STBI_MAX_DIMENSIONS) return __stbi_err("too large", "Very large image (corrupt?)");
			if (s->img_x > STBI_MAX_DIMENSIONS) return __stbi_err("too large", "Very large image (corrupt?)");
			z->depth = __stbi_get8(s);
			if (z->depth != 1 && z->depth != 2 && z->depth != 4 && z->depth != 8 && z->depth != 16) return __stbi_err("1/2/4/8/16-bit only", "PNG not supported: 1/2/4/8/16-bit only");
			color = __stbi_get8(s);
			if (color > 6) return __stbi_err("bad ctype", "Corrupt PNG");
			if (color == 3 && z->depth == 16) return __stbi_err("bad ctype", "Corrupt PNG");
			if (color == 3)
				pal_img_n = 3;
			else if (color & 1)
				return __stbi_err("bad ctype", "Corrupt PNG");
			comp = __stbi_get8(s);
			if (comp) return __stbi_err("bad comp method", "Corrupt PNG");
			filter = __stbi_get8(s);
			if (filter) return __stbi_err("bad filter method", "Corrupt PNG");
			interlace = __stbi_get8(s);
			if (interlace > 1) return __stbi_err("bad interlace method", "Corrupt PNG");
			if (!s->img_x || !s->img_y) return __stbi_err("0-pixel image", "Corrupt PNG");
			if (!pal_img_n)
			{
				s->img_n = (color & 2 ? 3 : 1) + (color & 4 ? 1 : 0);
				if ((1 << 30) / s->img_x / s->img_n < s->img_y) return __stbi_err("too large", "Image too large to decode");
				if (scan == STBI__SCAN_header) return 1;
			}
			else
			{
				// if paletted, then pal_n is our final components, and
				// img_n is # components to decompress/filter.
				s->img_n = 1;
				if ((1 << 30) / s->img_x / 4 < s->img_y) return __stbi_err("too large", "Corrupt PNG");
				// if SCAN_header, have to scan to see if we have a tRNS
			}
			break;
		}

		case STBI__PNG_TYPE('P', 'L', 'T', 'E'): {
			if (first) return __stbi_err("first not IHDR", "Corrupt PNG");
			if (c.length > 256 * 3) return __stbi_err("invalid PLTE", "Corrupt PNG");
			pal_len = c.length / 3;
			if (pal_len * 3 != c.length) return __stbi_err("invalid PLTE", "Corrupt PNG");
			for (i = 0; i < pal_len; ++i)
			{
				palette[i * 4 + 0] = __stbi_get8(s);
				palette[i * 4 + 1] = __stbi_get8(s);
				palette[i * 4 + 2] = __stbi_get8(s);
				palette[i * 4 + 3] = 255;
			}
			break;
		}

		case STBI__PNG_TYPE('t', 'R', 'N', 'S'): {
			if (first) return __stbi_err("first not IHDR", "Corrupt PNG");
			if (z->idata) return __stbi_err("tRNS after IDAT", "Corrupt PNG");
			if (pal_img_n)
			{
				if (scan == STBI__SCAN_header)
				{
					s->img_n = 4;
					return 1;
				}
				if (pal_len == 0) return __stbi_err("tRNS before PLTE", "Corrupt PNG");
				if (c.length > pal_len) return __stbi_err("bad tRNS len", "Corrupt PNG");
				pal_img_n = 4;
				for (i = 0; i < c.length; ++i) palette[i * 4 + 3] = __stbi_get8(s);
			}
			else
			{
				if (!(s->img_n & 1)) return __stbi_err("tRNS with alpha", "Corrupt PNG");
				if (c.length != (__stbi_uint32)s->img_n * 2) return __stbi_err("bad tRNS len", "Corrupt PNG");
				has_trans = 1;
				if (z->depth == 16)
				{
					for (k = 0; k < s->img_n; ++k) tc16[k] = (__stbi_uint16)__stbi_get16be(s); // copy the values as-is
				}
				else
				{
					for (k = 0; k < s->img_n; ++k) tc[k] = (stbi_uc)(__stbi_get16be(s) & 255) * __stbi_depth_scale_table[z->depth]; // non 8-bit images will be larger
				}
			}
			break;
		}

		case STBI__PNG_TYPE('I', 'D', 'A', 'T'): {
			if (first) return __stbi_err("first not IHDR", "Corrupt PNG");
			if (pal_img_n && !pal_len) return __stbi_err("no PLTE", "Corrupt PNG");
			if (scan == STBI__SCAN_header)
			{
				s->img_n = pal_img_n;
				return 1;
			}
			if ((int)(ioff + c.length) < (int)ioff) return 0;
			if (ioff + c.length > idata_limit)
			{
				//__stbi_uint32 idata_limit_old = idata_limit;
				stbi_uc* p;
				if (idata_limit == 0) idata_limit = c.length > 4096 ? c.length : 4096;
				while (ioff + c.length > idata_limit) idata_limit *= 2;
				//STBI_NOTUSED(idata_limit_old);
				p = (stbi_uc*)STBI_REALLOC_SIZED(z->idata, idata_limit_old, idata_limit);
				if (p == NULL) return __stbi_err("outofmem", "Out of memory");
				z->idata = p;
			}
			if (!__stbi_getn(s, z->idata + ioff, c.length)) return __stbi_err("outofdata", "Corrupt PNG");
			ioff += c.length;
			break;
		}

		case STBI__PNG_TYPE('I', 'E', 'N', 'D'): {
			__stbi_uint32 raw_len, bpl;
			if (first) return __stbi_err("first not IHDR", "Corrupt PNG");
			if (scan != STBI__SCAN_load) return 1;
			if (z->idata == NULL) return __stbi_err("no IDAT", "Corrupt PNG");
			// initial guess for decoded value size to avoid unnecessary reallocs
			bpl = (s->img_x * z->depth + 7) / 8; // bytes per line, per component
			raw_len = bpl * s->img_y * s->img_n /* pixels */ + s->img_y /* filter figureMode per row */;
			z->expanded = (stbi_uc*)stbi_zlib_decode_malloc_guesssize_headerflag((char*)z->idata, ioff, raw_len, (int*)&raw_len, (is_iphone ? 0 : 1));
			if (z->expanded == NULL) return 0; // zlib should set error
			STBI_FREE(z->idata);
			z->idata = NULL;
			if ((req_comp == s->img_n + 1 && req_comp != 3 && !pal_img_n) || has_trans)
				s->img_out_n = s->img_n + 1;
			else
				s->img_out_n = s->img_n;
			if (!__stbi_create_png_image(z, z->expanded, raw_len, s->img_out_n, z->depth, color, interlace)) return 0;
			if (has_trans)
			{
				if (z->depth == 16)
				{
					if (!__stbi_compute_transparency16(z, tc16, s->img_out_n)) return 0;
				}
				else
				{
					if (!__stbi_compute_transparency(z, tc, s->img_out_n)) return 0;
				}
			}
			if (is_iphone && __stbi_de_iphone_flag && s->img_out_n > 2) __stbi_de_iphone(z);
			if (pal_img_n)
			{
				// pal_img_n == 3 or 4
				s->img_n = pal_img_n; // record the actual colors we had
				s->img_out_n = pal_img_n;
				if (req_comp >= 3) s->img_out_n = req_comp;
				if (!__stbi_expand_png_palette(z, palette, pal_len, s->img_out_n)) return 0;
			}
			else if (has_trans)
			{
				// non-paletted image with tRNS -> source image has (constant) alpha
				++s->img_n;
			}
			STBI_FREE(z->expanded);
			z->expanded = NULL;
			// end of PNG chunk, read and skip CRC
			__stbi_get32be(s);
			return 1;
		}

		default:
			// if critical, fail
			if (first) return __stbi_err("first not IHDR", "Corrupt PNG");
			if ((c.type & (1 << 29)) == 0)
			{
#ifndef STBI_NO_FAILURE_STRINGS
				// not threadsafe
				static char invalid_chunk[] = "XXXX PNG chunk not known";
				invalid_chunk[0] = STBI__BYTECAST(c.type >> 24);
				invalid_chunk[1] = STBI__BYTECAST(c.type >> 16);
				invalid_chunk[2] = STBI__BYTECAST(c.type >> 8);
				invalid_chunk[3] = STBI__BYTECAST(c.type >> 0);
#endif
				return __stbi_err(invalid_chunk, "PNG not supported: unknown PNG chunk type");
			}
			__stbi_skip(s, c.length);
			break;
		}
		// end of PNG chunk, read and skip CRC
		__stbi_get32be(s);
	}
}

static void* __stbi_do_png(__stbi_png* p, int* x, int* y, int* n, int req_comp, __stbi_result_info* ri)
{
	void* result = NULL;
	if (req_comp < 0 || req_comp > 4) return __stbi_errpuc("bad req_comp", "Internal error");
	if (__stbi_parse_png_file(p, STBI__SCAN_load, req_comp))
	{
		if (p->depth <= 8)
			ri->bits_per_channel = 8;
		else if (p->depth == 16)
			ri->bits_per_channel = 16;
		else
			return __stbi_errpuc("bad bits_per_channel", "PNG not supported: unsupported color depth");
		result = p->out;
		p->out = NULL;
		if (req_comp && req_comp != p->s->img_out_n)
		{
			if (ri->bits_per_channel == 8)
				result = __stbi_convert_format((unsigned char*)result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y);
			else
				result = __stbi_convert_format16((__stbi_uint16*)result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y);
			p->s->img_out_n = req_comp;
			if (result == NULL) return result;
		}
		*x = p->s->img_x;
		*y = p->s->img_y;
		if (n) *n = p->s->img_n;
	}
	STBI_FREE(p->out);
	p->out = NULL;
	STBI_FREE(p->expanded);
	p->expanded = NULL;
	STBI_FREE(p->idata);
	p->idata = NULL;

	return result;
}

static void* __stbi_png_load(__stbi_context* s, int* x, int* y, int* comp, int req_comp, __stbi_result_info* ri)
{
	__stbi_png p{};
	p.s = s;
	return __stbi_do_png(&p, x, y, comp, req_comp, ri);
}

static int __stbi_png_test(__stbi_context* s)
{
	int r = __stbi_check_png_header(s);
	__stbi_rewind(s);
	return r;
}

static int __stbi_png_info_raw(__stbi_png* p, int* x, int* y, int* comp)
{
	if (!__stbi_parse_png_file(p, STBI__SCAN_header, 0))
	{
		__stbi_rewind(p->s);
		return 0;
	}
	if (x) *x = p->s->img_x;
	if (y) *y = p->s->img_y;
	if (comp) *comp = p->s->img_n;
	return 1;
}

static int __stbi_png_info(__stbi_context* s, int* x, int* y, int* comp)
{
	__stbi_png p{};
	p.s = s;
	return __stbi_png_info_raw(&p, x, y, comp);
}

static int __stbi_png_is16(__stbi_context* s)
{
	__stbi_png p{};
	p.s = s;
	if (!__stbi_png_info_raw(&p, NULL, NULL, NULL)) return 0;
	if (p.depth != 16)
	{
		__stbi_rewind(p.s);
		return 0;
	}
	return 1;
}
#endif

// Microsoft/Windows BMP image

#ifndef STBI_NO_BMP
static int __stbi_bmp_test_raw(__stbi_context* s)
{
	int r{};
	int sz{};
	if (__stbi_get8(s) != 'B') return 0;
	if (__stbi_get8(s) != 'M') return 0;
	__stbi_get32le(s); // discard filesize
	__stbi_get16le(s); // discard reserved
	__stbi_get16le(s); // discard reserved
	__stbi_get32le(s); // discard value offset
	sz = __stbi_get32le(s);
	r = (sz == 12 || sz == 40 || sz == 56 || sz == 108 || sz == 124);
	return r;
}

static int __stbi_bmp_test(__stbi_context* s)
{
	int r = __stbi_bmp_test_raw(s);
	__stbi_rewind(s);
	return r;
}

// returns 0..31 for the highest set bit
static int __stbi_high_bit(unsigned int z)
{
	int n = 0;
	if (z == 0) return -1;
	if (z >= 0x10000)
	{
		n += 16;
		z >>= 16;
	}
	if (z >= 0x00100)
	{
		n += 8;
		z >>= 8;
	}
	if (z >= 0x00010)
	{
		n += 4;
		z >>= 4;
	}
	if (z >= 0x00004)
	{
		n += 2;
		z >>= 2;
	}
	if (z >= 0x00002)
	{
		n += 1; /* >>=  1;*/
	}
	return n;
}

static int __stbi_bitcount(unsigned int a)
{
	a = (a & 0x55555555) + ((a >> 1) & 0x55555555); // max 2
	a = (a & 0x33333333) + ((a >> 2) & 0x33333333); // max 4
	a = (a + (a >> 4)) & 0x0f0f0f0f; // max 8 per 4, now 8 bits
	a = (a + (a >> 8)); // max 16 per 8 bits
	a = (a + (a >> 16)); // max 32 per 8 bits
	return a & 0xff;
}

// extract an arbitrarily-aligned N-bit value (N=bits)
// from v, and then make it 8-bits long and fractionally
// extend it to full full range.
static int __stbi_shiftsigned(unsigned int v, int shift, int bits)
{
	static unsigned int mul_table[9] = {
		0,
		0xff /*0b11111111*/,
		0x55 /*0b01010101*/,
		0x49 /*0b01001001*/,
		0x11 /*0b00010001*/,
		0x21 /*0b00100001*/,
		0x41 /*0b01000001*/,
		0x81 /*0b10000001*/,
		0x01 /*0b00000001*/,
	};
	static unsigned int shift_table[9] = {
		0,
		0,
		0,
		1,
		0,
		2,
		4,
		6,
		0,
	};
	if (shift < 0)
		v <<= -shift;
	else
		v >>= shift;
	STBI_ASSERT(v < 256);
	v >>= (8 - bits);
	STBI_ASSERT(bits >= 0 && bits <= 8);
	return (int)((unsigned)v * mul_table[bits]) >> shift_table[bits];
}

typedef struct
{
	int bpp, offset, hsz;
	unsigned int mr, mg, mb, ma, all_a;
	int extra_read;
} __stbi_bmp_data;

static void* __stbi_bmp_parse_header(__stbi_context* s, __stbi_bmp_data* info)
{
	int hsz{};
	if (__stbi_get8(s) != 'B' || __stbi_get8(s) != 'M') return __stbi_errpuc("not BMP", "Corrupt BMP");
	__stbi_get32le(s); // discard filesize
	__stbi_get16le(s); // discard reserved
	__stbi_get16le(s); // discard reserved
	info->offset = __stbi_get32le(s);
	info->hsz = hsz = __stbi_get32le(s);
	info->mr = info->mg = info->mb = info->ma = 0;
	info->extra_read = 14;

	if (info->offset < 0) return __stbi_errpuc("bad BMP", "bad BMP");

	if (hsz != 12 && hsz != 40 && hsz != 56 && hsz != 108 && hsz != 124) return __stbi_errpuc("unknown BMP", "BMP type not supported: unknown");
	if (hsz == 12)
	{
		s->img_x = __stbi_get16le(s);
		s->img_y = __stbi_get16le(s);
	}
	else
	{
		s->img_x = __stbi_get32le(s);
		s->img_y = __stbi_get32le(s);
	}
	if (__stbi_get16le(s) != 1) return __stbi_errpuc("bad BMP", "bad BMP");
	info->bpp = __stbi_get16le(s);
	if (hsz != 12)
	{
		int compress = __stbi_get32le(s);
		if (compress == 1 || compress == 2) return __stbi_errpuc("BMP RLE", "BMP type not supported: RLE");
		__stbi_get32le(s); // discard sizeof
		__stbi_get32le(s); // discard hres
		__stbi_get32le(s); // discard vres
		__stbi_get32le(s); // discard colorsused
		__stbi_get32le(s); // discard max important
		if (hsz == 40 || hsz == 56)
		{
			if (hsz == 56)
			{
				__stbi_get32le(s);
				__stbi_get32le(s);
				__stbi_get32le(s);
				__stbi_get32le(s);
			}
			if (info->bpp == 16 || info->bpp == 32)
			{
				if (compress == 0)
				{
					if (info->bpp == 32)
					{
						info->mr = 0xffu << 16;
						info->mg = 0xffu << 8;
						info->mb = 0xffu << 0;
						info->ma = 0xffu << 24;
						info->all_a = 0; // if all_a is 0 at end, then we loaded alpha channel but it was all 0
					}
					else
					{
						info->mr = 31u << 10;
						info->mg = 31u << 5;
						info->mb = 31u << 0;
					}
				}
				else if (compress == 3)
				{
					info->mr = __stbi_get32le(s);
					info->mg = __stbi_get32le(s);
					info->mb = __stbi_get32le(s);
					info->extra_read += 12;
					// not documented, but generated by photoshop and handled by mspaint
					if (info->mr == info->mg && info->mg == info->mb)
					{
						// ?!?!?
						return __stbi_errpuc("bad BMP", "bad BMP");
					}
				}
				else
					return __stbi_errpuc("bad BMP", "bad BMP");
			}
		}
		else
		{
			int i;
			if (hsz != 108 && hsz != 124) return __stbi_errpuc("bad BMP", "bad BMP");
			info->mr = __stbi_get32le(s);
			info->mg = __stbi_get32le(s);
			info->mb = __stbi_get32le(s);
			info->ma = __stbi_get32le(s);
			__stbi_get32le(s); // discard color space
			for (i = 0; i < 12; ++i) __stbi_get32le(s); // discard color space parameters
			if (hsz == 124)
			{
				__stbi_get32le(s); // discard rendering intent
				__stbi_get32le(s); // discard offset of profile value
				__stbi_get32le(s); // discard size of profile value
				__stbi_get32le(s); // discard reserved
			}
		}
	}
	return (void*)1;
}

static void* __stbi_bmp_load(__stbi_context* s, int* x, int* y, int* comp, int req_comp, __stbi_result_info* ri)
{
	stbi_uc* out{};
	unsigned int mr = 0, mg = 0, mb = 0, ma = 0, all_a;
	stbi_uc pal[256][4]{};
	int point_size = 0, i, j, width;
	int flip_vertically, pad, target;
	__stbi_bmp_data info{};
	STBI_NOTUSED(ri);

	info.all_a = 255;
	if (__stbi_bmp_parse_header(s, &info) == NULL) return NULL; // error code already set

	flip_vertically = ((int)s->img_y) > 0;
	s->img_y = abs((int)s->img_y);

	if (s->img_y > STBI_MAX_DIMENSIONS) return __stbi_errpuc("too large", "Very large image (corrupt?)");
	if (s->img_x > STBI_MAX_DIMENSIONS) return __stbi_errpuc("too large", "Very large image (corrupt?)");

	mr = info.mr;
	mg = info.mg;
	mb = info.mb;
	ma = info.ma;
	all_a = info.all_a;

	if (info.hsz == 12)
	{
		if (info.bpp < 24) point_size = (info.offset - info.extra_read - 24) / 3;
	}
	else
	{
		if (info.bpp < 16) point_size = (info.offset - info.extra_read - info.hsz) >> 2;
	}
	if (point_size == 0)
	{
		STBI_ASSERT(info.offset == s->callback_already_read + (int)(s->img_buffer - s->img_buffer_original));
		auto off = s->callback_already_read + (s->img_buffer - s->img_buffer_original);
		if (info.offset != off)
		{
			return __stbi_errpuc("bad offset", "Corrupt BMP");
		}
	}

	if (info.bpp == 24 && ma == 0xff000000)
		s->img_n = 3;
	else
		s->img_n = ma ? 4 : 3;
	if (req_comp && req_comp >= 3) // we can directly decode 3 or 4
		target = req_comp;
	else
		target = s->img_n; // if they want monochrome, we'll post-convert

	// sanity-check size
	if (!__stbi_mad3sizes_valid(target, s->img_x, s->img_y, 0)) return __stbi_errpuc("too large", "Corrupt BMP");

	out = (stbi_uc*)__stbi_malloc_mad3(target, s->img_x, s->img_y, 0);
	if (!out) return __stbi_errpuc("outofmem", "Out of memory");
	if (info.bpp < 16)
	{
		int z = 0;
		if (point_size == 0 || point_size > 256)
		{
			STBI_FREE(out);
			return __stbi_errpuc("invalid", "Corrupt BMP");
		}
		for (i = 0; i < point_size; ++i)
		{
			pal[i][2] = __stbi_get8(s);
			pal[i][1] = __stbi_get8(s);
			pal[i][0] = __stbi_get8(s);
			if (info.hsz != 12) __stbi_get8(s);
			pal[i][3] = 255;
		}
		__stbi_skip(s, info.offset - info.extra_read - info.hsz - point_size * (info.hsz == 12 ? 3 : 4));
		if (info.bpp == 1)
			width = (s->img_x + 7) >> 3;
		else if (info.bpp == 4)
			width = (s->img_x + 1) >> 1;
		else if (info.bpp == 8)
			width = s->img_x;
		else
		{
			STBI_FREE(out);
			return __stbi_errpuc("bad bpp", "Corrupt BMP");
		}
		pad = (-width) & 3;
		if (info.bpp == 1)
		{
			for (j = 0; j < (int)s->img_y; ++j)
			{
				int bit_offset = 7, v = __stbi_get8(s);
				for (i = 0; i < (int)s->img_x; ++i)
				{
					int color = (v >> bit_offset) & 0x1;
					out[z++] = pal[color][0];
					out[z++] = pal[color][1];
					out[z++] = pal[color][2];
					if (target == 4) out[z++] = 255;
					if (i + 1 == (int)s->img_x) break;
					if ((--bit_offset) < 0)
					{
						bit_offset = 7;
						v = __stbi_get8(s);
					}
				}
				__stbi_skip(s, pad);
			}
		}
		else
		{
			for (j = 0; j < (int)s->img_y; ++j)
			{
				for (i = 0; i < (int)s->img_x; i += 2)
				{
					int v = __stbi_get8(s), v2 = 0;
					if (info.bpp == 4)
					{
						v2 = v & 15;
						v >>= 4;
					}
					out[z++] = pal[v][0];
					out[z++] = pal[v][1];
					out[z++] = pal[v][2];
					if (target == 4) out[z++] = 255;
					if (i + 1 == (int)s->img_x) break;
					v = (info.bpp == 8) ? __stbi_get8(s) : v2;
					out[z++] = pal[v][0];
					out[z++] = pal[v][1];
					out[z++] = pal[v][2];
					if (target == 4) out[z++] = 255;
				}
				__stbi_skip(s, pad);
			}
		}
	}
	else
	{
		int rshift = 0, gshift = 0, bshift = 0, ashift = 0, rcount = 0, gcount = 0, bcount = 0, acount = 0;
		int z = 0;
		int easy = 0;
		__stbi_skip(s, info.offset - info.extra_read - info.hsz);
		if (info.bpp == 24)
			width = 3 * s->img_x;
		else if (info.bpp == 16)
			width = 2 * s->img_x;
		else /* bpp = 32 and pad = 0 */
			width = 0;
		pad = (-width) & 3;
		if (info.bpp == 24)
		{
			easy = 1;
		}
		else if (info.bpp == 32)
		{
			if (mb == 0xff && mg == 0xff00 && mr == 0x00ff0000 && ma == 0xff000000) easy = 2;
		}
		if (!easy)
		{
			if (!mr || !mg || !mb)
			{
				STBI_FREE(out);
				return __stbi_errpuc("bad masks", "Corrupt BMP");
			}
			// right shift amt to put high bit in position #7
			rshift = __stbi_high_bit(mr) - 7;
			rcount = __stbi_bitcount(mr);
			gshift = __stbi_high_bit(mg) - 7;
			gcount = __stbi_bitcount(mg);
			bshift = __stbi_high_bit(mb) - 7;
			bcount = __stbi_bitcount(mb);
			ashift = __stbi_high_bit(ma) - 7;
			acount = __stbi_bitcount(ma);
			if (rcount > 8 || gcount > 8 || bcount > 8 || acount > 8)
			{
				STBI_FREE(out);
				return __stbi_errpuc("bad masks", "Corrupt BMP");
			}
		}
		for (j = 0; j < (int)s->img_y; ++j)
		{
			if (easy)
			{
				for (i = 0; i < (int)s->img_x; ++i)
				{
					unsigned char a;
					out[z + 2] = __stbi_get8(s);
					out[z + 1] = __stbi_get8(s);
					out[z + 0] = __stbi_get8(s);
					z += 3;
					a = (easy == 2 ? __stbi_get8(s) : 255);
					all_a |= a;
					if (target == 4) out[z++] = a;
				}
			}
			else
			{
				int bpp = info.bpp;
				for (i = 0; i < (int)s->img_x; ++i)
				{
					__stbi_uint32 v = (bpp == 16 ? (__stbi_uint32)__stbi_get16le(s) : __stbi_get32le(s));
					unsigned int a;
					out[z++] = STBI__BYTECAST(__stbi_shiftsigned(v & mr, rshift, rcount));
					out[z++] = STBI__BYTECAST(__stbi_shiftsigned(v & mg, gshift, gcount));
					out[z++] = STBI__BYTECAST(__stbi_shiftsigned(v & mb, bshift, bcount));
					a = (ma ? __stbi_shiftsigned(v & ma, ashift, acount) : 255);
					all_a |= a;
					if (target == 4) out[z++] = STBI__BYTECAST(a);
				}
			}
			__stbi_skip(s, pad);
		}
	}

	// if alpha channel is all 0s, replace with all 255s
	if (target == 4 && all_a == 0)
		for (i = 4 * s->img_x * s->img_y - 1; i >= 0; i -= 4) out[i] = 255;

	if (flip_vertically)
	{
		stbi_uc t;
		for (j = 0; j < (int)s->img_y >> 1; ++j)
		{
			stbi_uc* p1 = out + j * s->img_x * target;
			stbi_uc* p2 = out + (s->img_y - 1 - j) * s->img_x * target;
			for (i = 0; i < (int)s->img_x * target; ++i)
			{
				t = p1[i];
				p1[i] = p2[i];
				p2[i] = t;
			}
		}
	}

	if (req_comp && req_comp != target)
	{
		out = __stbi_convert_format(out, target, req_comp, s->img_x, s->img_y);
		if (out == NULL) return out; // __stbi_convert_format frees input on failure
	}

	*x = s->img_x;
	*y = s->img_y;
	if (comp) *comp = s->img_n;
	return out;
}
#endif

// Targa Truevision - TGA
// by Jonathan Dummer
#ifndef STBI_NO_TGA
// returns STBI_rgb or whatever, 0 on error
static int __stbi_tga_get_comp(int bits_per_pixel, int is_grey, int* is_rgb16)
{
	// only RGB or RGBA (incl. 16bit) or grey allowed
	if (is_rgb16) *is_rgb16 = 0;
	switch (bits_per_pixel)
	{
	case 8: return STBI_grey;
	case 16:
		if (is_grey) return STBI_grey_alpha;
		// fallthrough
	case 15:
		if (is_rgb16) *is_rgb16 = 1;
		return STBI_rgb;
	case 24: // fallthrough
	case 32: return bits_per_pixel / 8;
	default: return 0;
	}
}

static int __stbi_tga_info(__stbi_context* s, int* x, int* y, int* comp)
{
	int tga_w, tga_h, tga_comp, tga_image_type, tga_bits_per_pixel, tga_colormap_bpp;
	int sz, tga_colormap_type;
	__stbi_get8(s); // discard Offset
	tga_colormap_type = __stbi_get8(s); // colormap type
	if (tga_colormap_type > 1)
	{
		__stbi_rewind(s);
		return 0; // only RGB or indexed allowed
	}
	tga_image_type = __stbi_get8(s); // image type
	if (tga_colormap_type == 1)
	{ // colormapped (paletted) image
		if (tga_image_type != 1 && tga_image_type != 9)
		{
			__stbi_rewind(s);
			return 0;
		}
		__stbi_skip(s, 4); // skip index of first colormap entry and number of entries
		sz = __stbi_get8(s); //   check bits per palette color entry
		if ((sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32))
		{
			__stbi_rewind(s);
			return 0;
		}
		__stbi_skip(s, 4); // skip image x and y origin
		tga_colormap_bpp = sz;
	}
	else
	{ // "normal" image w/o colormap - only RGB or grey allowed, +/- RLE
		if ((tga_image_type != 2) && (tga_image_type != 3) && (tga_image_type != 10) && (tga_image_type != 11))
		{
			__stbi_rewind(s);
			return 0; // only RGB or grey allowed, +/- RLE
		}
		__stbi_skip(s, 9); // skip colormap specification and image x/y origin
		tga_colormap_bpp = 0;
	}
	tga_w = __stbi_get16le(s);
	if (tga_w < 1)
	{
		__stbi_rewind(s);
		return 0; // test width
	}
	tga_h = __stbi_get16le(s);
	if (tga_h < 1)
	{
		__stbi_rewind(s);
		return 0; // test height
	}
	tga_bits_per_pixel = __stbi_get8(s); // bits per pixel
	__stbi_get8(s); // ignore alpha bits
	if (tga_colormap_bpp != 0)
	{
		if ((tga_bits_per_pixel != 8) && (tga_bits_per_pixel != 16))
		{
			// when using a colormap, tga_bits_per_pixel is the size of the indexes
			// I don't think anything but 8 or 16bit indexes makes sense
			__stbi_rewind(s);
			return 0;
		}
		tga_comp = __stbi_tga_get_comp(tga_colormap_bpp, 0, NULL);
	}
	else
	{
		tga_comp = __stbi_tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3) || (tga_image_type == 11), NULL);
	}
	if (!tga_comp)
	{
		__stbi_rewind(s);
		return 0;
	}
	if (x) *x = tga_w;
	if (y) *y = tga_h;
	if (comp) *comp = tga_comp;
	return 1; // seems to have passed everything
}

static int __stbi_tga_test(__stbi_context* s)
{
	int res = 0;
	int sz, tga_color_type;
	__stbi_get8(s); //   discard Offset
	tga_color_type = __stbi_get8(s); //   color type
	if (tga_color_type > 1) goto errorEnd; //   only RGB or indexed allowed
	sz = __stbi_get8(s); //   image type
	if (tga_color_type == 1)
	{ // colormapped (paletted) image
		if (sz != 1 && sz != 9) goto errorEnd; // colortype 1 demands image type 1 or 9
		__stbi_skip(s, 4); // skip index of first colormap entry and number of entries
		sz = __stbi_get8(s); //   check bits per palette color entry
		if ((sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32)) goto errorEnd;
		__stbi_skip(s, 4); // skip image x and y origin
	}
	else
	{ // "normal" image w/o colormap
		if ((sz != 2) && (sz != 3) && (sz != 10) && (sz != 11)) goto errorEnd; // only RGB or grey allowed, +/- RLE
		__stbi_skip(s, 9); // skip colormap specification and image x/y origin
	}
	if (__stbi_get16le(s) < 1) goto errorEnd; //   test width
	if (__stbi_get16le(s) < 1) goto errorEnd; //   test height
	sz = __stbi_get8(s); //   bits per pixel
	if ((tga_color_type == 1) && (sz != 8) && (sz != 16)) goto errorEnd; // for colormapped images, bpp is size of an index
	if ((sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32)) goto errorEnd;

	res = 1; // if we got this far, everything's good and we can return 1 instead of 0

errorEnd:
	__stbi_rewind(s);
	return res;
}

// read 16bit value and convert to 24bit RGB
static void __stbi_tga_read_rgb16(__stbi_context* s, stbi_uc* out)
{
	__stbi_uint16 px = (__stbi_uint16)__stbi_get16le(s);
	__stbi_uint16 fiveBitMask = 31;
	// we have 3 channels with 5bits each
	int r = (px >> 10) & fiveBitMask;
	int g = (px >> 5) & fiveBitMask;
	int b = px & fiveBitMask;
	// Note that this saves the value in RGB(A) order, so it doesn't need to be swapped later
	out[0] = (stbi_uc)((r * 255) / 31);
	out[1] = (stbi_uc)((g * 255) / 31);
	out[2] = (stbi_uc)((b * 255) / 31);

	// some people claim that the most significant bit might be used for alpha
	// (possibly if an alpha-bit is set in the "image descriptor byte")
	// but that only made 16bit test images completely translucent..
	// so let's treat all 15 and 16bit TGAs as RGB with no alpha.
}

static void* __stbi_tga_load(__stbi_context* s, int* x, int* y, int* comp, int req_comp, __stbi_result_info* ri)
{
	//   read in the TGA header stuff
	int tga_offset = __stbi_get8(s);
	int tga_indexed = __stbi_get8(s);
	int tga_image_type = __stbi_get8(s);
	int tga_is_RLE = 0;
	int tga_palette_start = __stbi_get16le(s);
	int tga_palette_len = __stbi_get16le(s);
	int tga_palette_bits = __stbi_get8(s);
	/*int tga_x_origin = */ __stbi_get16le(s);
	/*int tga_y_origin = */ __stbi_get16le(s);
	int tga_width = __stbi_get16le(s);
	int tga_height = __stbi_get16le(s);
	int tga_bits_per_pixel = __stbi_get8(s);
	int tga_comp, tga_rgb16 = 0;
	int tga_inverted = __stbi_get8(s);
	// int tga_alpha_bits = tga_inverted & 15; // the 4 lowest bits - unused (useless?)
	//   image value
	unsigned char* tga_data;
	unsigned char* tga_palette = NULL;
	int i, j;
	unsigned char raw_data[4] = { 0 };
	int RLE_count = 0;
	int RLE_repeating = 0;
	int read_next_pixel = 1;
	STBI_NOTUSED(ri);
	//STBI_NOTUSED(tga_x_origin); // @TODO
	//STBI_NOTUSED(tga_y_origin); // @TODO

	if (tga_height > STBI_MAX_DIMENSIONS) return __stbi_errpuc("too large", "Very large image (corrupt?)");
	if (tga_width > STBI_MAX_DIMENSIONS) return __stbi_errpuc("too large", "Very large image (corrupt?)");

	//   do a tiny bit of precessing
	if (tga_image_type >= 8)
	{
		tga_image_type -= 8;
		tga_is_RLE = 1;
	}
	tga_inverted = 1 - ((tga_inverted >> 5) & 1);

	//   If I'm paletted, then I'll use the number of bits from the palette
	if (tga_indexed)
		tga_comp = __stbi_tga_get_comp(tga_palette_bits, 0, &tga_rgb16);
	else
		tga_comp = __stbi_tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3), &tga_rgb16);

	if (!tga_comp) // shouldn't really happen, __stbi_tga_test() should have ensured basic consistency
		return __stbi_errpuc("bad format", "Can't find out TGA pixelformat");

	//   tga info
	*x = tga_width;
	*y = tga_height;
	if (comp) *comp = tga_comp;

	if (!__stbi_mad3sizes_valid(tga_width, tga_height, tga_comp, 0)) return __stbi_errpuc("too large", "Corrupt TGA");

	tga_data = (unsigned char*)__stbi_malloc_mad3(tga_width, tga_height, tga_comp, 0);
	if (!tga_data) return __stbi_errpuc("outofmem", "Out of memory");

	// skip to the data's starting position (offset usually = 0)
	__stbi_skip(s, tga_offset);

	if (!tga_indexed && !tga_is_RLE && !tga_rgb16)
	{
		for (i = 0; i < tga_height; ++i)
		{
			int row = tga_inverted ? tga_height - i - 1 : i;
			stbi_uc* tga_row = tga_data + row * tga_width * tga_comp;
			__stbi_getn(s, tga_row, tga_width * tga_comp);
		}
	}
	else
	{
		//   do I need to load a palette?
		if (tga_indexed)
		{
			if (tga_palette_len == 0)
			{ /* you have to have at least one entry! */
				STBI_FREE(tga_data);
				return __stbi_errpuc("bad palette", "Corrupt TGA");
			}

			//   any value to skip? (offset usually = 0)
			__stbi_skip(s, tga_palette_start);
			//   load the palette
			tga_palette = (unsigned char*)__stbi_malloc_mad2(tga_palette_len, tga_comp, 0);
			if (!tga_palette)
			{
				STBI_FREE(tga_data);
				return __stbi_errpuc("outofmem", "Out of memory");
			}
			if (tga_rgb16)
			{
				stbi_uc* pal_entry = tga_palette;
				STBI_ASSERT(tga_comp == STBI_rgb);
				for (i = 0; i < tga_palette_len; ++i)
				{
					__stbi_tga_read_rgb16(s, pal_entry);
					pal_entry += tga_comp;
				}
			}
			else if (!__stbi_getn(s, tga_palette, tga_palette_len * tga_comp))
			{
				STBI_FREE(tga_data);
				STBI_FREE(tga_palette);
				return __stbi_errpuc("bad palette", "Corrupt TGA");
			}
		}
		//   load the value
		for (i = 0; i < tga_width * tga_height; ++i)
		{
			//   if I'm in RLE figureMode, do I need to Get a RLE __stbi_pngchunk?
			if (tga_is_RLE)
			{
				if (RLE_count == 0)
				{
					//   yep, Get the next byte as a RLE command
					int RLE_cmd = __stbi_get8(s);
					RLE_count = 1 + (RLE_cmd & 127);
					RLE_repeating = RLE_cmd >> 7;
					read_next_pixel = 1;
				}
				else if (!RLE_repeating)
				{
					read_next_pixel = 1;
				}
			}
			else
			{
				read_next_pixel = 1;
			}
			//   OK, if I need to read a pixel, do it now
			if (read_next_pixel)
			{
				//   load however much value we did have
				if (tga_indexed)
				{
					// read in index, then perform the lookup
					int pal_idx = (tga_bits_per_pixel == 8) ? __stbi_get8(s) : __stbi_get16le(s);
					if (pal_idx >= tga_palette_len)
					{
						// invalid index
						pal_idx = 0;
					}
					pal_idx *= tga_comp;
					for (j = 0; j < tga_comp; ++j)
					{
						raw_data[j] = tga_palette[pal_idx + j];
					}
				}
				else if (tga_rgb16)
				{
					STBI_ASSERT(tga_comp == STBI_rgb);
					__stbi_tga_read_rgb16(s, raw_data);
				}
				else
				{
					//   read in the value raw
					for (j = 0; j < tga_comp; ++j)
					{
						raw_data[j] = __stbi_get8(s);
					}
				}
				//   clear the reading flag for the next pixel
				read_next_pixel = 0;
			} // end of reading a pixel

			// copy value
			for (j = 0; j < tga_comp; ++j) tga_data[i * tga_comp + j] = raw_data[j];

			//   in case we're in RLE figureMode, keep counting down
			--RLE_count;
		}
		//   do I need to invert the image?
		if (tga_inverted)
		{
			for (j = 0; j * 2 < tga_height; ++j)
			{
				int index1 = j * tga_width * tga_comp;
				int index2 = (tga_height - 1 - j) * tga_width * tga_comp;
				for (i = tga_width * tga_comp; i > 0; --i)
				{
					unsigned char temp = tga_data[index1];
					tga_data[index1] = tga_data[index2];
					tga_data[index2] = temp;
					++index1;
					++index2;
				}
			}
		}
		//   clear my palette, if I had one
		if (tga_palette != NULL)
		{
			STBI_FREE(tga_palette);
		}
	}

	// swap RGB - if the source value was RGB16, it already is in the right order
	if (tga_comp >= 3 && !tga_rgb16)
	{
		unsigned char* tga_pixel = tga_data;
		for (i = 0; i < tga_width * tga_height; ++i)
		{
			unsigned char temp = tga_pixel[0];
			tga_pixel[0] = tga_pixel[2];
			tga_pixel[2] = temp;
			tga_pixel += tga_comp;
		}
	}

	// convert to target component count
	if (req_comp && req_comp != tga_comp) tga_data = __stbi_convert_format(tga_data, tga_comp, req_comp, tga_width, tga_height);

	//   the things I do to Get rid of an error message, and yet keep
	//   Microsoft's C compilers happy... [8^(
	// tga_palette_start = tga_palette_len = tga_palette_bits =
	// tga_x_origin = tga_y_origin = 0;
	// STBI_NOTUSED(tga_palette_start);
	//   OK, done
	return tga_data;
}
#endif

// *************************************************************************************************
// Photoshop PSD loader -- PD by Thatcher Ulrich, integration by Nicolas Schulz, tweaked by STB

#ifndef STBI_NO_PSD
static int __stbi_psd_test(__stbi_context* s)
{
	int r = (__stbi_get32be(s) == 0x38425053);
	__stbi_rewind(s);
	return r;
}

static int __stbi_psd_decode_rle(__stbi_context* s, stbi_uc* p, int pixelCount)
{
	int count, nleft, len;

	count = 0;
	while ((nleft = pixelCount - count) > 0)
	{
		len = __stbi_get8(s);
		if (len == 128)
		{
			// No-op.
		}
		else if (len < 128)
		{
			// Copy next len+1 bytes literally.
			len++;
			if (len > nleft) return 0; // corrupt value
			count += len;
			while (len)
			{
				*p = __stbi_get8(s);
				p += 4;
				len--;
			}
		}
		else if (len > 128)
		{
			stbi_uc val;
			// Next -len+1 bytes in the dest are replicated from next source byte.
			// (Interpret len as a negative 8-bit int.)
			len = 257 - len;
			if (len > nleft) return 0; // corrupt value
			val = __stbi_get8(s);
			count += len;
			while (len)
			{
				*p = val;
				p += 4;
				len--;
			}
		}
	}

	return 1;
}

static void* __stbi_psd_load(__stbi_context* s, int* x, int* y, int* comp, int req_comp, __stbi_result_info* ri, int bpc)
{
	int pixelCount;
	int channelCount, compression;
	int channel, i;
	int bitdepth;
	int w, h;
	stbi_uc* out;
	STBI_NOTUSED(ri);

	// Check identifier
	if (__stbi_get32be(s) != 0x38425053) // "8BPS"
		return __stbi_errpuc("not PSD", "Corrupt PSD image");

	// Check file type version.
	if (__stbi_get16be(s) != 1) return __stbi_errpuc("wrong version", "Unsupported version of PSD image");

	// Skip 6 reserved bytes.
	__stbi_skip(s, 6);

	// Read the number of channels (R, G, B, A, etc).
	channelCount = __stbi_get16be(s);
	if (channelCount < 0 || channelCount > 16) return __stbi_errpuc("wrong channel count", "Unsupported number of channels in PSD image");

	// Read the rows and columns of the image.
	h = __stbi_get32be(s);
	w = __stbi_get32be(s);

	if (h > STBI_MAX_DIMENSIONS) return __stbi_errpuc("too large", "Very large image (corrupt?)");
	if (w > STBI_MAX_DIMENSIONS) return __stbi_errpuc("too large", "Very large image (corrupt?)");

	// Make sure the depth is 8 bits.
	bitdepth = __stbi_get16be(s);
	if (bitdepth != 8 && bitdepth != 16) return __stbi_errpuc("unsupported bit depth", "PSD bit depth is not 8 or 16 bit");

	// Make sure the color figureMode is RGB.
	// Valid options are:
	//   0: Bitmap
	//   1: Grayscale
	//   2: Indexed color
	//   3: RGB color
	//   4: CMYK color
	//   7: Multichannel
	//   8: Duotone
	//   9: Lab color
	if (__stbi_get16be(s) != 3) return __stbi_errpuc("wrong color format", "PSD is not in RGB color format");

	// Skip the Mode Data.  (It's the palette for indexed color; other info for other modes.)
	__stbi_skip(s, __stbi_get32be(s));

	// Skip the image resources.  (resolution, pen tool paths, etc)
	__stbi_skip(s, __stbi_get32be(s));

	// Skip the reserved value.
	__stbi_skip(s, __stbi_get32be(s));

	// Find out if the value is compressed.
	// Known values:
	//   0: no compression
	//   1: RLE compressed
	compression = __stbi_get16be(s);
	if (compression > 1) return __stbi_errpuc("bad compression", "PSD has an unknown compression format");

	// Check size
	if (!__stbi_mad3sizes_valid(4, w, h, 0)) return __stbi_errpuc("too large", "Corrupt PSD");

	// Generate the destination image.

	if (!compression && bitdepth == 16 && bpc == 16)
	{
		out = (stbi_uc*)__stbi_malloc_mad3(8, w, h, 0);
		ri->bits_per_channel = 16;
	}
	else
		out = (stbi_uc*)__stbi_malloc(4 * w * h);

	if (!out) return __stbi_errpuc("outofmem", "Out of memory");
	pixelCount = w * h;

	// Initialize the value to zero.
	//memset( out, 0, pixelCount * 4 );

	// Finally, the image value.
	if (compression)
	{
		// RLE as used by .PSD and .TIFF
		// Loop until you Get the number of unpacked bytes you are expecting:
		//     Read the next source byte into n.
		//     If n is between 0 and 127 inclusive, copy the next n+1 bytes literally.
		//     Else if n is between -127 and -1 inclusive, copy the next byte -n+1 times.
		//     Else if n is 128, noop.
		// Endloop

		// The RLE-compressed value is preceded by a 2-byte value count for each row in the value,
		// which we're going to just skip.
		__stbi_skip(s, h * channelCount * 2);

		// Read the RLE value by channel.
		for (channel = 0; channel < 4; channel++)
		{
			stbi_uc* p;

			p = out + channel;
			if (channel >= channelCount)
			{
				// Fill this channel with default value.
				for (i = 0; i < pixelCount; i++, p += 4) *p = (channel == 3 ? 255 : 0);
			}
			else
			{
				// Read the RLE value.
				if (!__stbi_psd_decode_rle(s, p, pixelCount))
				{
					STBI_FREE(out);
					return __stbi_errpuc("corrupt", "bad RLE value");
				}
			}
		}
	}
	else
	{
		// We're at the raw image value.  It's each channel in order (Red, Green, Blue, Alpha, ...)
		// where each channel consists of an 8-bit (or 16-bit) value for each pixel in the image.

		// Read the value by channel.
		for (channel = 0; channel < 4; channel++)
		{
			if (channel >= channelCount)
			{
				// Fill this channel with default value.
				if (bitdepth == 16 && bpc == 16)
				{
					__stbi_uint16* q = ((__stbi_uint16*)out) + channel;
					__stbi_uint16 val = channel == 3 ? 65535 : 0;
					for (i = 0; i < pixelCount; i++, q += 4) *q = val;
				}
				else
				{
					stbi_uc* p = out + channel;
					stbi_uc val = channel == 3 ? 255 : 0;
					for (i = 0; i < pixelCount; i++, p += 4) *p = val;
				}
			}
			else
			{
				if (ri->bits_per_channel == 16)
				{ // output bpc
					__stbi_uint16* q = ((__stbi_uint16*)out) + channel;
					for (i = 0; i < pixelCount; i++, q += 4) *q = (__stbi_uint16)__stbi_get16be(s);
				}
				else
				{
					stbi_uc* p = out + channel;
					if (bitdepth == 16)
					{ // input bpc
						for (i = 0; i < pixelCount; i++, p += 4) *p = (stbi_uc)(__stbi_get16be(s) >> 8);
					}
					else
					{
						for (i = 0; i < pixelCount; i++, p += 4) *p = __stbi_get8(s);
					}
				}
			}
		}
	}

	// remove weird white matte from PSD
	if (channelCount >= 4)
	{
		if (ri->bits_per_channel == 16)
		{
			for (i = 0; i < w * h; ++i)
			{
				__stbi_uint16* pixel = (__stbi_uint16*)out + 4 * i;
				if (pixel[3] != 0 && pixel[3] != 65535)
				{
					float a = pixel[3] / 65535.0f;
					float ra = 1.0f / a;
					float inv_a = 65535.0f * (1 - ra);
					pixel[0] = (__stbi_uint16)(pixel[0] * ra + inv_a);
					pixel[1] = (__stbi_uint16)(pixel[1] * ra + inv_a);
					pixel[2] = (__stbi_uint16)(pixel[2] * ra + inv_a);
				}
			}
		}
		else
		{
			for (i = 0; i < w * h; ++i)
			{
				unsigned char* pixel = out + 4 * i;
				if (pixel[3] != 0 && pixel[3] != 255)
				{
					float a = pixel[3] / 255.0f;
					float ra = 1.0f / a;
					float inv_a = 255.0f * (1 - ra);
					pixel[0] = (unsigned char)(pixel[0] * ra + inv_a);
					pixel[1] = (unsigned char)(pixel[1] * ra + inv_a);
					pixel[2] = (unsigned char)(pixel[2] * ra + inv_a);
				}
			}
		}
	}

	// convert to desired output format
	if (req_comp && req_comp != 4)
	{
		if (ri->bits_per_channel == 16)
			out = (stbi_uc*)__stbi_convert_format16((__stbi_uint16*)out, 4, req_comp, w, h);
		else
			out = __stbi_convert_format(out, 4, req_comp, w, h);
		if (out == NULL) return out; // __stbi_convert_format frees input on failure
	}

	if (comp) *comp = 4;
	*y = h;
	*x = w;

	return out;
}
#endif

// *************************************************************************************************
// Softimage PIC loader
// by Tom Seddon
//
// See http://softimage.wiki.softimage.com/index.php/INFO:_PIC_file_format
// See http://ozviz.wasp.uwa.edu.au/~pbourke/dataformats/softimagepic/

#ifndef STBI_NO_PIC
static int __stbi_pic_is4(__stbi_context* s, const char* str)
{
	int i;
	for (i = 0; i < 4; ++i)
		if (__stbi_get8(s) != (stbi_uc)str[i]) return 0;

	return 1;
}

static int __stbi_pic_test_core(__stbi_context* s)
{
	int i;

	if (!__stbi_pic_is4(s, "\x53\x80\xF6\x34")) return 0;

	for (i = 0; i < 84; ++i) __stbi_get8(s);

	if (!__stbi_pic_is4(s, "PICT")) return 0;

	return 1;
}

typedef struct
{
	stbi_uc size, type, channel;
} __stbi_pic_packet;

static stbi_uc* __stbi_readval(__stbi_context* s, int channel, stbi_uc* dest)
{
	int mask = 0x80, i;

	for (i = 0; i < 4; ++i, mask >>= 1)
	{
		if (channel & mask)
		{
			if (__stbi_at_eof(s)) return __stbi_errpuc("bad file", "PIC file too short");
			dest[i] = __stbi_get8(s);
		}
	}

	return dest;
}

static void __stbi_copyval(int channel, stbi_uc* dest, const stbi_uc* src)
{
	int mask = 0x80, i;

	for (i = 0; i < 4; ++i, mask >>= 1)
		if (channel & mask) dest[i] = src[i];
}

static stbi_uc* __stbi_pic_load_core(__stbi_context* s, int width, int height, int* comp, stbi_uc* result)
{
	int act_comp = 0, num_packets = 0, y, chained;
	__stbi_pic_packet packets[10]{};

	// this will (should...) cater for even some bizarre stuff like having value
	// for the same channel in multiple packets.
	do
	{
		__stbi_pic_packet* packet;

		if (num_packets == sizeof(packets) / sizeof(packets[0])) return __stbi_errpuc("bad format", "too many packets");

		packet = &packets[num_packets++];

		chained = __stbi_get8(s);
		packet->size = __stbi_get8(s);
		packet->type = __stbi_get8(s);
		packet->channel = __stbi_get8(s);

		act_comp |= packet->channel;

		if (__stbi_at_eof(s)) return __stbi_errpuc("bad file", "file too short (reading packets)");
		if (packet->size != 8) return __stbi_errpuc("bad format", "packet isn't 8bpp");
	} while (chained);

	*comp = (act_comp & 0x10 ? 4 : 3); // has alpha channel?

	for (y = 0; y < height; ++y)
	{
		int packet_idx;

		for (packet_idx = 0; packet_idx < num_packets; ++packet_idx)
		{
			__stbi_pic_packet* packet = &packets[packet_idx];
			stbi_uc* dest = result + y * width * 4;

			switch (packet->type)
			{
			default: return __stbi_errpuc("bad format", "packet has bad compression type");

			case 0: { //uncompressed
				int x;

				for (x = 0; x < width; ++x, dest += 4)
					if (!__stbi_readval(s, packet->channel, dest)) return 0;
				break;
			}

			case 1: //Pure RLE
			{
				int left = width, i;

				while (left > 0)
				{
					stbi_uc count, value[4];

					count = __stbi_get8(s);
					if (__stbi_at_eof(s)) return __stbi_errpuc("bad file", "file too short (pure read count)");

					if (count > left) count = (stbi_uc)left;

					if (!__stbi_readval(s, packet->channel, value)) return 0;

					for (i = 0; i < count; ++i, dest += 4) __stbi_copyval(packet->channel, dest, value);
					left -= count;
				}
			}
			break;

			case 2: { //Mixed RLE
				int left = width;
				while (left > 0)
				{
					int count = __stbi_get8(s), i;
					if (__stbi_at_eof(s)) return __stbi_errpuc("bad file", "file too short (mixed read count)");

					if (count >= 128)
					{ // Repeated
						stbi_uc value[4];

						if (count == 128)
							count = __stbi_get16be(s);
						else
							count -= 127;
						if (count > left) return __stbi_errpuc("bad file", "scanline overrun");

						if (!__stbi_readval(s, packet->channel, value)) return 0;

						for (i = 0; i < count; ++i, dest += 4) __stbi_copyval(packet->channel, dest, value);
					}
					else
					{ // Raw
						++count;
						if (count > left) return __stbi_errpuc("bad file", "scanline overrun");

						for (i = 0; i < count; ++i, dest += 4)
							if (!__stbi_readval(s, packet->channel, dest)) return 0;
					}
					left -= count;
				}
				break;
			}
			}
		}
	}

	return result;
}

static void* __stbi_pic_load(__stbi_context* s, int* px, int* py, int* comp, int req_comp, __stbi_result_info* ri)
{
	stbi_uc* result;
	int i, x, y, internal_comp{};
	STBI_NOTUSED(ri);

	if (!comp) comp = &internal_comp;

	for (i = 0; i < 92; ++i) __stbi_get8(s);

	x = __stbi_get16be(s);
	y = __stbi_get16be(s);

	if (y > STBI_MAX_DIMENSIONS) return __stbi_errpuc("too large", "Very large image (corrupt?)");
	if (x > STBI_MAX_DIMENSIONS) return __stbi_errpuc("too large", "Very large image (corrupt?)");

	if (__stbi_at_eof(s)) return __stbi_errpuc("bad file", "file too short (pic header)");
	if (!__stbi_mad3sizes_valid(x, y, 4, 0)) return __stbi_errpuc("too large", "PIC image too large to decode");

	__stbi_get32be(s); //skip `ratio'
	__stbi_get16be(s); //skip `fields'
	__stbi_get16be(s); //skip `pad'

	// intermediate buffer is RGBA
	result = (stbi_uc*)__stbi_malloc_mad3(x, y, 4, 0);
	memset(result, 0xff, x * y * 4);

	if (!__stbi_pic_load_core(s, x, y, comp, result))
	{
		STBI_FREE(result);
		result = 0;
	}
	*px = x;
	*py = y;
	if (req_comp == 0) req_comp = *comp;
	result = __stbi_convert_format(result, 4, req_comp, x, y);

	return result;
}

static int __stbi_pic_test(__stbi_context* s)
{
	int r = __stbi_pic_test_core(s);
	__stbi_rewind(s);
	return r;
}
#endif

// *************************************************************************************************
// GIF loader -- public domain by Jean-Marc Lienher -- simplified/shrunk by stb

#ifndef STBI_NO_GIF
typedef struct
{
	__stbi_int16 prefix;
	stbi_uc first;
	stbi_uc suffix;
} __stbi_gif_lzw;

typedef struct
{
	int w, h;
	stbi_uc* out; // output buffer (always 4 components)
	stbi_uc* background; // The current "background" as far as a gif is concerned
	stbi_uc* history;
	int flags, bgindex, ratio, transparent, eflags;
	stbi_uc pal[256][4];
	stbi_uc lpal[256][4];
	__stbi_gif_lzw codes[8192];
	stbi_uc* color_table;
	int parse, step;
	int lflags;
	int start_x, start_y;
	int max_x, max_y;
	int cur_x, cur_y;
	int line_size;
	int Delay;
} __stbi_gif;

static int __stbi_gif_test_raw(__stbi_context* s)
{
	int sz;
	if (__stbi_get8(s) != 'G' || __stbi_get8(s) != 'I' || __stbi_get8(s) != 'F' || __stbi_get8(s) != '8') return 0;
	sz = __stbi_get8(s);
	if (sz != '9' && sz != '7') return 0;
	if (__stbi_get8(s) != 'a') return 0;
	return 1;
}

static int __stbi_gif_test(__stbi_context* s)
{
	int r = __stbi_gif_test_raw(s);
	__stbi_rewind(s);
	return r;
}

static void __stbi_gif_parse_colortable(__stbi_context* s, stbi_uc pal[256][4], int num_entries, int transp)
{
	int i;
	for (i = 0; i < num_entries; ++i)
	{
		pal[i][2] = __stbi_get8(s);
		pal[i][1] = __stbi_get8(s);
		pal[i][0] = __stbi_get8(s);
		pal[i][3] = transp == i ? 0 : 255;
	}
}

static int __stbi_gif_header(__stbi_context* s, __stbi_gif* g, int* comp, int is_info)
{
	stbi_uc version;
	if (__stbi_get8(s) != 'G' || __stbi_get8(s) != 'I' || __stbi_get8(s) != 'F' || __stbi_get8(s) != '8') return __stbi_err("not GIF", "Corrupt GIF");

	version = __stbi_get8(s);
	if (version != '7' && version != '9') return __stbi_err("not GIF", "Corrupt GIF");
	if (__stbi_get8(s) != 'a') return __stbi_err("not GIF", "Corrupt GIF");

	__stbi_g_failure_reason = "";
	g->w = __stbi_get16le(s);
	g->h = __stbi_get16le(s);
	g->flags = __stbi_get8(s);
	g->bgindex = __stbi_get8(s);
	g->ratio = __stbi_get8(s);
	g->transparent = -1;

	if (g->w > STBI_MAX_DIMENSIONS) return __stbi_err("too large", "Very large image (corrupt?)");
	if (g->h > STBI_MAX_DIMENSIONS) return __stbi_err("too large", "Very large image (corrupt?)");

	if (comp != 0) *comp = 4; // can't actually tell whether it's 3 or 4 until we parse the comments

	if (is_info) return 1;

	if (g->flags & 0x80) __stbi_gif_parse_colortable(s, g->pal, 2 << (g->flags & 7), -1);

	return 1;
}

static int __stbi_gif_info_raw(__stbi_context* s, int* x, int* y, int* comp)
{
	__stbi_gif* g = (__stbi_gif*)__stbi_malloc(sizeof(__stbi_gif));
	if (!__stbi_gif_header(s, g, comp, 1))
	{
		STBI_FREE(g);
		__stbi_rewind(s);
		return 0;
	}
	if (x) *x = g->w;
	if (y) *y = g->h;
	STBI_FREE(g);
	return 1;
}

static void __stbi_out_gif_code(__stbi_gif* g, __stbi_uint16 code)
{
	stbi_uc *p, *c;
	int idx;

	// recurse to decode the prefixes, since the linked-list is backwards,
	// and working backwards through an interleaved image would be nasty
	if (g->codes[code].prefix >= 0) __stbi_out_gif_code(g, g->codes[code].prefix);

	if (g->cur_y >= g->max_y) return;

	idx = g->cur_x + g->cur_y;
	p = &g->out[idx];
	g->history[idx / 4] = 1;

	c = &g->color_table[g->codes[code].suffix * 4];
	if (c[3] > 128)
	{ // don't render transparent pixels;
		p[0] = c[2];
		p[1] = c[1];
		p[2] = c[0];
		p[3] = c[3];
	}
	g->cur_x += 4;

	if (g->cur_x >= g->max_x)
	{
		g->cur_x = g->start_x;
		g->cur_y += g->step;

		while (g->cur_y >= g->max_y && g->parse > 0)
		{
			g->step = (1 << g->parse) * g->line_size;
			g->cur_y = g->start_y + (g->step >> 1);
			--g->parse;
		}
	}
}

static stbi_uc* __stbi_process_gif_raster(__stbi_context* s, __stbi_gif* g)
{
	stbi_uc lzw_cs;
	__stbi_int32 len, init_code;
	__stbi_uint32 first;
	__stbi_int32 codesize, codemask, avail, oldcode, bits, valid_bits, clear;
	__stbi_gif_lzw* p;

	lzw_cs = __stbi_get8(s);
	if (lzw_cs > 12) return NULL;
	clear = 1 << lzw_cs;
	first = 1;
	codesize = lzw_cs + 1;
	codemask = (1 << codesize) - 1;
	bits = 0;
	valid_bits = 0;
	for (init_code = 0; init_code < clear; init_code++)
	{
		g->codes[init_code].prefix = -1;
		g->codes[init_code].first = (stbi_uc)init_code;
		g->codes[init_code].suffix = (stbi_uc)init_code;
	}

	// support no starting clear code
	avail = clear + 2;
	oldcode = -1;

	len = 0;
	for (;;)
	{
		if (valid_bits < codesize)
		{
			if (len == 0)
			{
				len = __stbi_get8(s); // start new block
				if (len == 0) return g->out;
			}
			--len;
			bits |= (__stbi_int32)__stbi_get8(s) << valid_bits;
			valid_bits += 8;
		}
		else
		{
			__stbi_int32 code = bits & codemask;
			bits >>= codesize;
			valid_bits -= codesize;
			// @OPTIMIZE: is there some way we can accelerate the non-clear path?
			if (code == clear)
			{ // clear code
				codesize = lzw_cs + 1;
				codemask = (1 << codesize) - 1;
				avail = clear + 2;
				oldcode = -1;
				first = 0;
			}
			else if (code == clear + 1)
			{ // end of stream code
				__stbi_skip(s, len);
				while ((len = __stbi_get8(s)) > 0) __stbi_skip(s, len);
				return g->out;
			}
			else if (code <= avail)
			{
				if (first)
				{
					return __stbi_errpuc("no clear code", "Corrupt GIF");
				}

				if (oldcode >= 0)
				{
					p = &g->codes[avail++];
					if (avail > 8192)
					{
						return __stbi_errpuc("too many codes", "Corrupt GIF");
					}

					p->prefix = (__stbi_int16)oldcode;
					p->first = g->codes[oldcode].first;
					p->suffix = (code == avail) ? p->first : g->codes[code].first;
				}
				else if (code == avail)
					return __stbi_errpuc("illegal code in raster", "Corrupt GIF");

				__stbi_out_gif_code(g, (__stbi_uint16)code);

				if ((avail & codemask) == 0 && avail <= 0x0FFF)
				{
					codesize++;
					codemask = (1 << codesize) - 1;
				}

				oldcode = code;
			}
			else
			{
				return __stbi_errpuc("illegal code in raster", "Corrupt GIF");
			}
		}
	}
}

// this function is designed to support animated gifs, although stb_image doesn't support it
// two back is the image from two frames ago, used for a very specific disposal format
static stbi_uc* __stbi_gif_load_next(__stbi_context* s, __stbi_gif* g, int* comp, int req_comp, stbi_uc* two_back)
{
	int dispose;
	int first_frame;
	int pi;
	int pcount;
	STBI_NOTUSED(req_comp);

	// on first frame, any non-written pixels Get the background colour (non-transparent)
	first_frame = 0;
	if (g->out == 0)
	{
		if (!__stbi_gif_header(s, g, comp, 0)) return 0; // __stbi_g_failure_reason set by __stbi_gif_header
		if (!__stbi_mad3sizes_valid(4, g->w, g->h, 0)) return __stbi_errpuc("too large", "GIF image is too large");
		pcount = g->w * g->h;
		g->out = (stbi_uc*)__stbi_malloc(4 * pcount);
		g->background = (stbi_uc*)__stbi_malloc(4 * pcount);
		g->history = (stbi_uc*)__stbi_malloc(pcount);
		if (!g->out || !g->background || !g->history) return __stbi_errpuc("outofmem", "Out of memory");

		// image is treated as "transparent" at the start - ie, nothing overwrites the current background;
		// background colour is only used for pixels that are not rendered first frame, after that "background"
		// color refers to the color that was there the previous frame.
		memset(g->out, 0x00, 4 * pcount);
		memset(g->background, 0x00, 4 * pcount); // state of the background (starts transparent)
		memset(g->history, 0x00, pcount); // pixels that were affected previous frame
		first_frame = 1;
	}
	else
	{
		// second frame - how do we dispose of the previous one?
		dispose = (g->eflags & 0x1C) >> 2;
		pcount = g->w * g->h;

		if ((dispose == 3) && (two_back == 0))
		{
			dispose = 2; // if I don't have an image to revert back to, default to the old background
		}

		if (dispose == 3)
		{ // use previous graphic
			for (pi = 0; pi < pcount; ++pi)
			{
				if (g->history[pi])
				{
					memcpy(&g->out[pi * 4], &two_back[pi * 4], 4);
				}
			}
		}
		else if (dispose == 2)
		{
			// restore what was changed last frame to background before that frame;
			for (pi = 0; pi < pcount; ++pi)
			{
				if (g->history[pi])
				{
					memcpy(&g->out[pi * 4], &g->background[pi * 4], 4);
				}
			}
		}
		else
		{
			// This is a non-disposal case eithe way, so just
			// leave the pixels as is, and they will become the new background
			// 1: do not dispose
			// 0:  not specified.
		}

		// background is what out is after the undoing of the previou frame;
		memcpy(g->background, g->out, 4 * g->w * g->h);
	}

	// clear my history;
	memset(g->history, 0x00, g->w * g->h); // pixels that were affected previous frame

	for (;;)
	{
		int tag = __stbi_get8(s);
		switch (tag)
		{
		case 0x2C: /* Image Descriptor */
		{
			__stbi_int32 x, y, w, h;
			stbi_uc* o;

			x = __stbi_get16le(s);
			y = __stbi_get16le(s);
			w = __stbi_get16le(s);
			h = __stbi_get16le(s);
			if (((x + w) > (g->w)) || ((y + h) > (g->h))) return __stbi_errpuc("bad Image Descriptor", "Corrupt GIF");

			g->line_size = g->w * 4;
			g->start_x = x * 4;
			g->start_y = y * g->line_size;
			g->max_x = g->start_x + w * 4;
			g->max_y = g->start_y + h * g->line_size;
			g->cur_x = g->start_x;
			g->cur_y = g->start_y;

			// if the width of the specified rectangle is 0, that means
			// we may not see *any* pixels or the image is malformed;
			// to make sure this is caught, move the current y down to
			// max_y (which is what out_gif_code checks).
			if (w == 0) g->cur_y = g->max_y;

			g->lflags = __stbi_get8(s);

			if (g->lflags & 0x40)
			{
				g->step = 8 * g->line_size; // first interlaced spacing
				g->parse = 3;
			}
			else
			{
				g->step = g->line_size;
				g->parse = 0;
			}

			if (g->lflags & 0x80)
			{
				__stbi_gif_parse_colortable(s, g->lpal, 2 << (g->lflags & 7), g->eflags & 0x01 ? g->transparent : -1);
				g->color_table = (stbi_uc*)g->lpal;
			}
			else if (g->flags & 0x80)
			{
				g->color_table = (stbi_uc*)g->pal;
			}
			else
				return __stbi_errpuc("missing color table", "Corrupt GIF");

			o = __stbi_process_gif_raster(s, g);
			if (!o) return NULL;

			// if this was the first frame,
			pcount = g->w * g->h;
			if (first_frame && (g->bgindex > 0))
			{
				// if first frame, any pixel not drawn to Gets the background color
				for (pi = 0; pi < pcount; ++pi)
				{
					if (g->history[pi] == 0)
					{
						g->pal[g->bgindex][3] = 255; // just in case it was made transparent, undo that; It will be Reset next frame if need be;
						memcpy(&g->out[pi * 4], &g->pal[g->bgindex], 4);
					}
				}
			}

			return o;
		}

		case 0x21: // Comment Extension.
		{
			int len;
			int ext = __stbi_get8(s);
			if (ext == 0xF9)
			{ // Graphic Control Extension.
				len = __stbi_get8(s);
				if (len == 4)
				{
					g->eflags = __stbi_get8(s);
					g->Delay = 10 * __stbi_get16le(s); // Delay - 1/100th of a second, saving as 1/1000ths.

					// unset old transparent
					if (g->transparent >= 0)
					{
						g->pal[g->transparent][3] = 255;
					}
					if (g->eflags & 0x01)
					{
						g->transparent = __stbi_get8(s);
						if (g->transparent >= 0)
						{
							g->pal[g->transparent][3] = 0;
						}
					}
					else
					{
						// don't need transparent
						__stbi_skip(s, 1);
						g->transparent = -1;
					}
				}
				else
				{
					__stbi_skip(s, len);
					break;
				}
			}
			while ((len = __stbi_get8(s)) != 0)
			{
				__stbi_skip(s, len);
			}
			break;
		}

		case 0x3B: // gif stream termination code
			return (stbi_uc*)s; // using '1' causes warning on some compilers

		default: return __stbi_errpuc("unknown code", "Corrupt GIF");
		}
	}
}

static void* __stbi_load_gif_main(__stbi_context* s, int** delays, int* x, int* y, int* z, int* comp, int req_comp)
{
	if (__stbi_gif_test(s))
	{
		int layers = 0;
		stbi_uc* u = 0;
		stbi_uc* out = 0;
		stbi_uc* two_back = 0;
		__stbi_gif g;
		int stride;
		//int out_size = 0;
		//int delays_size = 0;
		memset(&g, 0, sizeof(g));
		if (delays)
		{
			*delays = 0;
		}

		do
		{
			u = __stbi_gif_load_next(s, &g, comp, req_comp, two_back);
			if (u == (stbi_uc*)s) u = 0; // end of animated gif marker

			if (u)
			{
				*x = g.w;
				*y = g.h;
				++layers;
				stride = g.w * g.h * 4;

				if (out)
				{
					void* tmp = (stbi_uc*)STBI_REALLOC_SIZED(out, out_size, layers * stride);
					if (NULL == tmp)
					{
						STBI_FREE(g.out);
						STBI_FREE(g.history);
						STBI_FREE(g.background);
						return __stbi_errpuc("outofmem", "Out of memory");
					}
					else
					{
						out = (stbi_uc*)tmp;
						// out_size = layers * stride;
					}

					if (delays)
					{
						*delays = (int*)STBI_REALLOC_SIZED(*delays, delays_size, sizeof(int) * layers);
						// delays_size = layers * sizeof(int);
					}
				}
				else
				{
					out = (stbi_uc*)__stbi_malloc(layers * stride);
					//out_size = layers * stride;
					if (delays)
					{
						*delays = (int*)__stbi_malloc(layers * sizeof(int));
						// delays_size = layers * sizeof(int);
					}
				}
				memcpy(out + ((layers - 1) * stride), u, stride);
				if (layers >= 2)
				{
					two_back = out - 2 * stride;
				}

				if (delays)
				{
					(*delays)[layers - 1U] = g.Delay;
				}
			}
		} while (u != 0);

		// free temp buffer;
		STBI_FREE(g.out);
		STBI_FREE(g.history);
		STBI_FREE(g.background);

		// do the final conversion after loading everything;
		if (req_comp && req_comp != 4) out = __stbi_convert_format(out, 4, req_comp, layers * g.w, g.h);

		*z = layers;
		return out;
	}
	else
	{
		return __stbi_errpuc("not GIF", "Image was not as a gif type.");
	}
}

static void* __stbi_gif_load(__stbi_context* s, int* x, int* y, int* comp, int req_comp, __stbi_result_info* ri)
{
	stbi_uc* u = 0;
	__stbi_gif g;
	memset(&g, 0, sizeof(g));
	STBI_NOTUSED(ri);

	u = __stbi_gif_load_next(s, &g, comp, req_comp, 0);
	if (u == (stbi_uc*)s) u = 0; // end of animated gif marker
	if (u)
	{
		*x = g.w;
		*y = g.h;

		// moved conversion to after successful load so that the same
		// can be done for multiple frames.
		if (req_comp && req_comp != 4) u = __stbi_convert_format(u, 4, req_comp, g.w, g.h);
	}
	else if (g.out)
	{
		// if there was an error and we allocated an image buffer, free it!
		STBI_FREE(g.out);
	}

	// free buffers needed for multiple frame loading;
	STBI_FREE(g.history);
	STBI_FREE(g.background);

	return u;
}

static int __stbi_gif_info(__stbi_context* s, int* x, int* y, int* comp)
{
	return __stbi_gif_info_raw(s, x, y, comp);
}
#endif

// *************************************************************************************************
// Radiance RGBE HDR loader
// originally by Nicolas Schulz
#ifndef STBI_NO_HDR
static int __stbi_hdr_test_core(__stbi_context* s, const char* signature)
{
	int i;
	for (i = 0; signature[i]; ++i)
		if (__stbi_get8(s) != signature[i]) return 0;
	__stbi_rewind(s);
	return 1;
}

static int __stbi_hdr_test(__stbi_context* s)
{
	int r = __stbi_hdr_test_core(s, "#?RADIANCE\n");
	__stbi_rewind(s);
	if (!r)
	{
		r = __stbi_hdr_test_core(s, "#?RGBE\n");
		__stbi_rewind(s);
	}
	return r;
}

#define STBI__HDR_BUFLEN 1024
static char* __stbi_hdr_gettoken(__stbi_context* z, char* buffer)
{
	int len = 0;
	char c = '\0';

	c = (char)__stbi_get8(z);

	while (!__stbi_at_eof(z) && c != '\n')
	{
		buffer[len++] = c;
		if (len == STBI__HDR_BUFLEN - 1)
		{
			// flush to end of line
			while (!__stbi_at_eof(z) && __stbi_get8(z) != '\n')
				;
			break;
		}
		c = (char)__stbi_get8(z);
	}

	buffer[len] = 0;
	return buffer;
}

static void __stbi_hdr_convert(float* output, stbi_uc* input, int req_comp)
{
	if (input[3] != 0)
	{
		float f1;
		// Exponent
		f1 = (float)ldexp(1.0f, input[3] - (int)(128 + 8));
		if (req_comp <= 2)
			output[0] = (input[0] + input[1] + input[2]) * f1 / 3;
		else
		{
			output[0] = input[0] * f1;
			output[1] = input[1] * f1;
			output[2] = input[2] * f1;
		}
		if (req_comp == 2) output[1] = 1;
		if (req_comp == 4) output[3] = 1;
	}
	else
	{
		switch (req_comp)
		{
		case 4: output[3] = 1; /* fallthrough */
		case 3: output[0] = output[1] = output[2] = 0; break;
		case 2: output[1] = 1; /* fallthrough */
		case 1: output[0] = 0; break;
		}
	}
}

static float* __stbi_hdr_load(__stbi_context* s, int* x, int* y, int* comp, int req_comp, __stbi_result_info* ri)
{
	char buffer[STBI__HDR_BUFLEN];
	char* token;
	int valid = 0;
	int width, height;
	stbi_uc* scanline;
	float* hdr_data;
	int len;
	unsigned char count, value;
	int i, j, k, c1, c2, z;
	const char* headerToken;
	STBI_NOTUSED(ri);

	// Check identifier
	headerToken = __stbi_hdr_gettoken(s, buffer);
	if (strcmp(headerToken, "#?RADIANCE") != 0 && strcmp(headerToken, "#?RGBE") != 0) return __stbi_errpf("not HDR", "Corrupt HDR image");

	// Parse header
	for (;;)
	{
		token = __stbi_hdr_gettoken(s, buffer);
		if (token[0] == 0) break;
		if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1;
	}

	if (!valid) return __stbi_errpf("unsupported format", "Unsupported HDR format");

	// Parse width and height
	// can't use sscanf() if we're not using stdio!
	token = __stbi_hdr_gettoken(s, buffer);
	if (strncmp(token, "-Y ", 3)) return __stbi_errpf("unsupported value layout", "Unsupported HDR format");
	token += 3;
	height = (int)strtol(token, &token, 10);
	while (*token == ' ') ++token;
	if (strncmp(token, "+X ", 3)) return __stbi_errpf("unsupported value layout", "Unsupported HDR format");
	token += 3;
	width = (int)strtol(token, NULL, 10);

	if (height > STBI_MAX_DIMENSIONS) return __stbi_errpf("too large", "Very large image (corrupt?)");
	if (width > STBI_MAX_DIMENSIONS) return __stbi_errpf("too large", "Very large image (corrupt?)");

	*x = width;
	*y = height;

	if (comp) *comp = 3;
	if (req_comp == 0) req_comp = 3;

	if (!__stbi_mad4sizes_valid(width, height, req_comp, sizeof(float), 0)) return __stbi_errpf("too large", "HDR image is too large");

	// Read value
	hdr_data = (float*)__stbi_malloc_mad4(width, height, req_comp, sizeof(float), 0);
	if (!hdr_data) return __stbi_errpf("outofmem", "Out of memory");

	// Load image value
	// image value is stored as some number of sca
	if (width < 8 || width >= 32768)
	{
		// Read flat value
		for (j = 0; j < height; ++j)
		{
			for (i = 0; i < width; ++i)
			{
				stbi_uc rgbe[4];
			main_decode_loop:
				__stbi_getn(s, rgbe, 4);
				__stbi_hdr_convert(hdr_data + j * width * req_comp + i * req_comp, rgbe, req_comp);
			}
		}
	}
	else
	{
		// Read RLE-encoded value
		scanline = NULL;

		for (j = 0; j < height; ++j)
		{
			c1 = __stbi_get8(s);
			c2 = __stbi_get8(s);
			len = __stbi_get8(s);
			if (c1 != 2 || c2 != 2 || (len & 0x80))
			{
				// not run-length encoded, so we have to actually use THIS value as a decoded
				// pixel (note this can't be a valid pixel--one of RGB __zen_must be >= 128)
				stbi_uc rgbe[4]{};
				rgbe[0] = (stbi_uc)c1;
				rgbe[1] = (stbi_uc)c2;
				rgbe[2] = (stbi_uc)len;
				rgbe[3] = (stbi_uc)__stbi_get8(s);
				__stbi_hdr_convert(hdr_data, rgbe, req_comp);
				i = 1;
				j = 0;
				STBI_FREE(scanline);
				goto main_decode_loop; // yes, this makes no sense
			}
			len <<= 8;
			len |= __stbi_get8(s);
			if (len != width)
			{
				STBI_FREE(hdr_data);
				STBI_FREE(scanline);
				return __stbi_errpf("invalid decoded scanline length", "corrupt HDR");
			}
			if (scanline == NULL)
			{
				scanline = (stbi_uc*)__stbi_malloc_mad2(width, 4, 0);
				if (!scanline)
				{
					STBI_FREE(hdr_data);
					return __stbi_errpf("outofmem", "Out of memory");
				}
			}

			for (k = 0; k < 4; ++k)
			{
				int nleft;
				i = 0;
				while ((nleft = width - i) > 0)
				{
					count = __stbi_get8(s);
					if (count > 128)
					{
						// Run
						value = __stbi_get8(s);
						count -= 128;
						if (count > nleft)
						{
							STBI_FREE(hdr_data);
							STBI_FREE(scanline);
							return __stbi_errpf("corrupt", "bad RLE value in HDR");
						}
						for (z = 0; z < count; ++z) scanline[i++ * 4 + k] = value;
					}
					else
					{
						// Dump
						if (count > nleft)
						{
							STBI_FREE(hdr_data);
							STBI_FREE(scanline);
							return __stbi_errpf("corrupt", "bad RLE value in HDR");
						}
						for (z = 0; z < count; ++z) scanline[i++ * 4 + k] = __stbi_get8(s);
					}
				}
			}
			for (i = 0; i < width; ++i) __stbi_hdr_convert(hdr_data + (j * width + i) * req_comp, scanline + i * 4, req_comp);
		}
		if (scanline) STBI_FREE(scanline);
	}

	return hdr_data;
}

static int __stbi_hdr_info(__stbi_context* s, int* x, int* y, int* comp)
{
	char buffer[STBI__HDR_BUFLEN];
	char* token{};
	int valid = 0;
	int dummy{};

	if (!x) x = &dummy;
	if (!y) y = &dummy;
	if (!comp) comp = &dummy;

	if (__stbi_hdr_test(s) == 0)
	{
		__stbi_rewind(s);
		return 0;
	}

	for (;;)
	{
		token = __stbi_hdr_gettoken(s, buffer);
		if (token[0] == 0) break;
		if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1;
	}

	if (!valid)
	{
		__stbi_rewind(s);
		return 0;
	}
	token = __stbi_hdr_gettoken(s, buffer);
	if (strncmp(token, "-Y ", 3))
	{
		__stbi_rewind(s);
		return 0;
	}
	token += 3;
	*y = (int)strtol(token, &token, 10);
	while (*token == ' ') ++token;
	if (strncmp(token, "+X ", 3))
	{
		__stbi_rewind(s);
		return 0;
	}
	token += 3;
	*x = (int)strtol(token, NULL, 10);
	*comp = 3;
	return 1;
}
#endif // STBI_NO_HDR

#ifndef STBI_NO_BMP
static int __stbi_bmp_info(__stbi_context* s, int* x, int* y, int* comp)
{
	void* p{};
	__stbi_bmp_data info{};

	info.all_a = 255;
	p = __stbi_bmp_parse_header(s, &info);
	__stbi_rewind(s);
	if (p == NULL) return 0;
	if (x) *x = s->img_x;
	if (y) *y = s->img_y;
	if (comp)
	{
		if (info.bpp == 24 && info.ma == 0xff000000)
			*comp = 3;
		else
			*comp = info.ma ? 4 : 3;
	}
	return 1;
}
#endif

#ifndef STBI_NO_PSD
static int __stbi_psd_info(__stbi_context* s, int* x, int* y, int* comp)
{
	int channelCount, dummy{}, depth;
	if (!x) x = &dummy;
	if (!y) y = &dummy;
	if (!comp) comp = &dummy;
	if (__stbi_get32be(s) != 0x38425053)
	{
		__stbi_rewind(s);
		return 0;
	}
	if (__stbi_get16be(s) != 1)
	{
		__stbi_rewind(s);
		return 0;
	}
	__stbi_skip(s, 6);
	channelCount = __stbi_get16be(s);
	if (channelCount < 0 || channelCount > 16)
	{
		__stbi_rewind(s);
		return 0;
	}
	*y = __stbi_get32be(s);
	*x = __stbi_get32be(s);
	depth = __stbi_get16be(s);
	if (depth != 8 && depth != 16)
	{
		__stbi_rewind(s);
		return 0;
	}
	if (__stbi_get16be(s) != 3)
	{
		__stbi_rewind(s);
		return 0;
	}
	*comp = 4;
	return 1;
}

static int __stbi_psd_is16(__stbi_context* s)
{
	int channelCount, depth;
	if (__stbi_get32be(s) != 0x38425053)
	{
		__stbi_rewind(s);
		return 0;
	}
	if (__stbi_get16be(s) != 1)
	{
		__stbi_rewind(s);
		return 0;
	}
	__stbi_skip(s, 6);
	channelCount = __stbi_get16be(s);
	if (channelCount < 0 || channelCount > 16)
	{
		__stbi_rewind(s);
		return 0;
	}
	(void)__stbi_get32be(s);
	(void)__stbi_get32be(s);
	depth = __stbi_get16be(s);
	if (depth != 16)
	{
		__stbi_rewind(s);
		return 0;
	}
	return 1;
}
#endif

#ifndef STBI_NO_PIC
static int __stbi_pic_info(__stbi_context* s, int* x, int* y, int* comp)
{
	int act_comp = 0, num_packets = 0, chained, dummy{};
	__stbi_pic_packet packets[10]{};

	if (!x) x = &dummy;
	if (!y) y = &dummy;
	if (!comp) comp = &dummy;

	if (!__stbi_pic_is4(s, "\x53\x80\xF6\x34"))
	{
		__stbi_rewind(s);
		return 0;
	}

	__stbi_skip(s, 88);

	*x = __stbi_get16be(s);
	*y = __stbi_get16be(s);
	if (__stbi_at_eof(s))
	{
		__stbi_rewind(s);
		return 0;
	}
	if ((*x) != 0 && (1 << 28) / (*x) < (*y))
	{
		__stbi_rewind(s);
		return 0;
	}

	__stbi_skip(s, 8);

	do
	{
		__stbi_pic_packet* packet;

		if (num_packets == sizeof(packets) / sizeof(packets[0])) return 0;

		packet = &packets[num_packets++];
		chained = __stbi_get8(s);
		packet->size = __stbi_get8(s);
		packet->type = __stbi_get8(s);
		packet->channel = __stbi_get8(s);
		act_comp |= packet->channel;

		if (__stbi_at_eof(s))
		{
			__stbi_rewind(s);
			return 0;
		}
		if (packet->size != 8)
		{
			__stbi_rewind(s);
			return 0;
		}
	} while (chained);

	*comp = (act_comp & 0x10 ? 4 : 3);

	return 1;
}
#endif

// *************************************************************************************************
// Portable Gray Map and Portable Pixel Map loader
// by Ken Miller
//
// PGM: http://netpbm.sourceforge.net/doc/pgm.html
// PPM: http://netpbm.sourceforge.net/doc/ppm.html
//
// Known limitations:
//    Does not support comments in the header section
//    Does not support ASCII image value (formats P2 and P3)
//    Does not support 16-bit-per-channel

#ifndef STBI_NO_PNM

static int __stbi_pnm_test(__stbi_context* s)
{
	char p, t;
	p = (char)__stbi_get8(s);
	t = (char)__stbi_get8(s);
	if (p != 'P' || (t != '5' && t != '6'))
	{
		__stbi_rewind(s);
		return 0;
	}
	return 1;
}

static void* __stbi_pnm_load(__stbi_context* s, int* x, int* y, int* comp, int req_comp, __stbi_result_info* ri)
{
	stbi_uc* out;
	STBI_NOTUSED(ri);

	if (!__stbi_pnm_info(s, (int*)&s->img_x, (int*)&s->img_y, (int*)&s->img_n)) return 0;

	if (s->img_y > STBI_MAX_DIMENSIONS) return __stbi_errpuc("too large", "Very large image (corrupt?)");
	if (s->img_x > STBI_MAX_DIMENSIONS) return __stbi_errpuc("too large", "Very large image (corrupt?)");

	*x = s->img_x;
	*y = s->img_y;
	if (comp) *comp = s->img_n;

	if (!__stbi_mad3sizes_valid(s->img_n, s->img_x, s->img_y, 0)) return __stbi_errpuc("too large", "PNM too large");

	out = (stbi_uc*)__stbi_malloc_mad3(s->img_n, s->img_x, s->img_y, 0);
	if (!out) return __stbi_errpuc("outofmem", "Out of memory");
	__stbi_getn(s, out, s->img_n * s->img_x * s->img_y);

	if (req_comp && req_comp != s->img_n)
	{
		out = __stbi_convert_format(out, s->img_n, req_comp, s->img_x, s->img_y);
		if (out == NULL) return out; // __stbi_convert_format frees input on failure
	}
	return out;
}

static int __stbi_pnm_isspace(char c)
{
	return c == ' ' || c == '\t' || c == '\n' || c == '\v' || c == '\f' || c == '\r';
}

static void __stbi_pnm_skip_whitespace(__stbi_context* s, char* c)
{
	for (;;)
	{
		while (!__stbi_at_eof(s) && __stbi_pnm_isspace(*c)) *c = (char)__stbi_get8(s);

		if (__stbi_at_eof(s) || *c != '#') break;

		while (!__stbi_at_eof(s) && *c != '\n' && *c != '\r') *c = (char)__stbi_get8(s);
	}
}

static int __stbi_pnm_isdigit(char c)
{
	return c >= '0' && c <= '9';
}

static int __stbi_pnm_getinteger(__stbi_context* s, char* c)
{
	int value = 0;

	while (!__stbi_at_eof(s) && __stbi_pnm_isdigit(*c))
	{
		value = value * 10 + (*c - '0');
		*c = (char)__stbi_get8(s);
	}

	return value;
}

static int __stbi_pnm_info(__stbi_context* s, int* x, int* y, int* comp)
{
	int maxv, dummy{};
	char c, p, t;

	if (!x) x = &dummy;
	if (!y) y = &dummy;
	if (!comp) comp = &dummy;

	__stbi_rewind(s);

	// Get identifier
	p = (char)__stbi_get8(s);
	t = (char)__stbi_get8(s);
	if (p != 'P' || (t != '5' && t != '6'))
	{
		__stbi_rewind(s);
		return 0;
	}

	*comp = (t == '6') ? 3 : 1; // '5' is 1-component .pgm; '6' is 3-component .ppm

	c = (char)__stbi_get8(s);
	__stbi_pnm_skip_whitespace(s, &c);

	*x = __stbi_pnm_getinteger(s, &c); // read width
	__stbi_pnm_skip_whitespace(s, &c);

	*y = __stbi_pnm_getinteger(s, &c); // read height
	__stbi_pnm_skip_whitespace(s, &c);

	maxv = __stbi_pnm_getinteger(s, &c); // read max value

	if (maxv > 255)
		return __stbi_err("max value > 255", "PPM image not 8-bit");
	else
		return 1;
}
#endif

static int __stbi_info_main(__stbi_context* s, int* x, int* y, int* comp)
{
#ifndef STBI_NO_JPEG
	if (__stbi_jpeg_info(s, x, y, comp)) return 1;
#endif

#ifndef STBI_NO_PNG
	if (__stbi_png_info(s, x, y, comp)) return 1;
#endif

#ifndef STBI_NO_GIF
	if (__stbi_gif_info(s, x, y, comp)) return 1;
#endif

#ifndef STBI_NO_BMP
	if (__stbi_bmp_info(s, x, y, comp)) return 1;
#endif

#ifndef STBI_NO_PSD
	if (__stbi_psd_info(s, x, y, comp)) return 1;
#endif

#ifndef STBI_NO_PIC
	if (__stbi_pic_info(s, x, y, comp)) return 1;
#endif

#ifndef STBI_NO_PNM
	if (__stbi_pnm_info(s, x, y, comp)) return 1;
#endif

#ifndef STBI_NO_HDR
	if (__stbi_hdr_info(s, x, y, comp)) return 1;
#endif

		// test tga last because it's a crappy test!
#ifndef STBI_NO_TGA
	if (__stbi_tga_info(s, x, y, comp)) return 1;
#endif
	return __stbi_err("unknown image type", "Image not of any known type, or corrupt");
}

static int __stbi_is_16_main(__stbi_context* s)
{
#ifndef STBI_NO_PNG
	if (__stbi_png_is16(s)) return 1;
#endif

#ifndef STBI_NO_PSD
	if (__stbi_psd_is16(s)) return 1;
#endif

	return 0;
}

#ifndef STBI_NO_STDIO
STBIDEF int stbi_info(char const* filename, int* x, int* y, int* comp)
{
	FILE* f = __stbi_fopen(filename, "rb");
	int result;
	if (!f) return __stbi_err("can't fopen", "Unable to open file");
	result = stbi_info_from_file(f, x, y, comp);
	fclose(f);
	return result;
}

STBIDEF int stbi_info_from_file(FILE* f, int* x, int* y, int* comp)
{
	int r;
	__stbi_context s;
	long pos = ftell(f);
	__stbi_start_file(&s, f);
	r = __stbi_info_main(&s, x, y, comp);
	fseek(f, pos, SEEK_SET);
	return r;
}

STBIDEF int stbi_is_16_bit(char const* filename)
{
	FILE* f = __stbi_fopen(filename, "rb");
	int result;
	if (!f) return __stbi_err("can't fopen", "Unable to open file");
	result = stbi_is_16_bit_from_file(f);
	fclose(f);
	return result;
}

STBIDEF int stbi_is_16_bit_from_file(FILE* f)
{
	int r;
	__stbi_context s;
	long pos = ftell(f);
	__stbi_start_file(&s, f);
	r = __stbi_is_16_main(&s);
	fseek(f, pos, SEEK_SET);
	return r;
}
#endif // !STBI_NO_STDIO

STBIDEF int stbi_info_from_memory(stbi_uc const* buffer, int len, int* x, int* y, int* comp)
{
	__stbi_context s;
	__stbi_start_mem(&s, buffer, len);
	return __stbi_info_main(&s, x, y, comp);
}

STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const* c, void* user, int* x, int* y, int* comp)
{
	__stbi_context s;
	__stbi_start_callbacks(&s, (stbi_io_callbacks*)c, user);
	return __stbi_info_main(&s, x, y, comp);
}

STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const* buffer, int len)
{
	__stbi_context s;
	__stbi_start_mem(&s, buffer, len);
	return __stbi_is_16_main(&s);
}

STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const* c, void* user)
{
	__stbi_context s;
	__stbi_start_callbacks(&s, (stbi_io_callbacks*)c, user);
	return __stbi_is_16_main(&s);
}
