#ifndef UTF16_HEADER
#define UTF16_HEADER

// this header needs
#include <stdio.h>
#include "../ARRAY.h"
#include "../NUMBER/BITS.h"
#include "ASCII.h"
#include "UNICD.h"

// will represent an UTF16 character as its codepoint with
// a umax unsigned integer container (to be similar to C char)
// UTF16 codepoints are the same as Unicode code points
// so the mapping is 1 to 1
typedef unicd_int utf16_int;

// minimum and maximum codepoints supported by UTF16
#define MIN_UTF16_INT 0x000000 // MIN_UNICD_INT
#define MAX_UTF16_INT 0x10FFFF // only for UTF16
#define INV_UTF16_INT 0xFFFFFF // INV_UNICD_INT
#define INV_UTF16_INT_STR "[INV_UTF16_INT]"
#define INV_ENC_UTF16_STR "[INV_ENC_UTF16]"

// ranges of the invalid code points for UTF16
#define INV_UTF16_INT_RANGE1_MIN 0xD800
#define INV_UTF16_INT_RANGE1_MAX 0xDFFF

// UTF16 can encode characters in blocks of 2 bytes
// these building blocks of 2 bytes can be affected by endianness

// https://stackoverflow.com/questions/6240055/manually-converting-unicode-codepoints-into-utf-8-and-utf-16
// https://en.wikipedia.org/wiki/UTF-16
// https://linjan2.github.io/utf16-utf8.html

// UTF16 has this thing, there are valid/invalid
// code points that can be represented by it

// BIG ENDIAN
// code point range     bin code point representation   bin write representation              bits encoded
// 0x0000 - 0xD7FF      XXXXXXXX YYYYYYYY               XXXXXXXX YYYYYYYY                     16    
// 0xD800 - 0xDFFF      (invalid code points)           ...                                   ...
// 0xE000 - 0xFFFF      XXXXXXXX YYYYYYYY               XXXXXXXX YYYYYYYY                     16
// 0x010000 - 0x10FFFF  XXYY YYYYYYWW ZZZZZZZZ          110110XX YYYYYYYY 110111WW ZZZZZZZZ   20

// LITTLE ENDIAN (shite)
// code point range     bin code point representation   bin write representation              bits encoded
// 0x0000 - 0xD7FF      XXXXXXXX YYYYYYYY               YYYYYYYY XXXXXXXX                     16
// 0xD800 - 0xDFFF      (invalid code points)           ...                                   ...
// 0xE000 - 0xFFFF      XXXXXXXX YYYYYYYY               YYYYYYYY XXXXXXXX                     16
// 0x010000 - 0x10FFFF  XXYY YYYYYYWW ZZZZZZZZ          YYYYYYYY 110110XX ZZZZZZZZ 110111WW   20

// for when the character is in the 0x010000 -
// 0x10FFFF range the following mask will be used
#define utf16_byte_mask_1 0xFC // 11111100

// the result of the mask (applied to the right byte[i])
// will output the following outputs if the character is UTF16
#define utf16_block_1_bits 0xD8 // 11011000
#define utf16_block_2_bits 0xDC // 11011100

// also, for the same range, this is the integer to be substracted
// from the actual code point to be able to be encoded in 20 bits
#define UTF16_LAST_RANGE_BIAS 0x10000

// check, get, write and print an UTF16 character
// NOTE: the "be" and "le" functions are for Big Endian and
//       Little Endian byte ordered UTF16 encoded characters

// checking
byte check_utf16_int(utf16_int ch);
byte check_enc_utf16be(void * src, umax ssize);
byte check_enc_utf16le(void * src, umax ssize);
// get/write
utf16_int get_utf16be_int(void * src, umax ssize);
utf16_int get_utf16le_int(void * src, umax ssize);
void * write_enc_utf16be(utf16_int ch, void * dest, umax dsize);
void * write_enc_utf16le(utf16_int ch, void * dest, umax dsize);
// printing
byte print_utf16_int(utf16_int ch);
byte print_enc_utf16be(void * src, umax ssize);
byte print_enc_utf16le(void * src, umax ssize);

// unicode conversion related (for compatibility with CHAR_ARR functions)

// checking
byte check_utf16_as_unicd(utf16_int ch);
bool check_unicd_as_utf16(unicd_int ch);
// getting
unicd_int get_utf16_as_unicd(utf16_int ch);
utf16_int get_unicd_as_utf16(unicd_int ch);

#include "UTF16.c"

#endif // UTF16_HEADER
