(* Mimo Lexical analyser *)

{
	open Lexing
	open Parser
	(*open Lexer_tokens*)

	exception Lexing_error of string
	
	let keywords = 
		let l = (List.append [
				"break",		KBreak;
				"case",			KCase;
				"catch",		KCatch;
				"continue",		KContinue;
				"debugger",		KDebugger;
				"default",		KDefault;
				"delete",		KDelete;
				"do",			KDo;
				"else",			KElse;
				"finally",		KFinally;
				"for",			KFor;
				"function",		KFunction;
				"get",			KGet;
				"if",			KIf;
				"in",			KIn;
				"instanceof",	KInstanceof;
				"new",			KNew;
				"return",		KReturn;
				"switch",		KSwitch;
				"set",			KSet;
				"this",			KThis;
				"throw",		KThrow;
				"try",			KTry;
				"typeof",		KTypeof;
				"var",			KVar;
				"void",			KVoid;
				"while",		KWhile;
				"with",			KWith;
			] (List.append [
				"class",		KClass;
				"const",		KConst;
				"enum",			KEnum;
				"export",		KExport;
				"extends",		KExtends;
				"import",		KImport;
				"super",		KSuper;
				"meta",			KMeta;
			] (List.append [
				"null",			KNull;
				"true",			KTrue;
				"false",		KFalse;
			] (List.append [
				"namespace",	KNamespace;
				"using",		KUsing;
				"public",		KPublic;
				"private",		KPrivate;
				"protected",	KProtected;
				"select",		KSelect;
				"from",			KFrom;
				"where",		KWhere;
				"inline",		KInline;
				"fun",			KFun;
			] [
				"Infinity",		KInfinity;
			])))) in
			 let h = Hashtbl.create (List.length l) in
				List.iter (fun (s, k) -> Hashtbl.add h s k) l; h

	let stringToToken =
		fun s ->
		try
			Hashtbl.find keywords s
		with
			Not_found -> TIdentifier s

	let newline lexbuf n =
		let pos = lexbuf.lex_curr_p in
			lexbuf.lex_curr_p <- 
			{
				pos with pos_lnum = pos.pos_lnum + n;
				pos_bol = pos.pos_cnum
			}
			
	let string_of_char = String.make 1
	
	let regexable = ref true
	
	let current_token = ref TNewLine
	
	(*
	let is_operator = function
		| TDiv | TLChevron | TRChevron | TLEqual | TGEqual | TNEqual 			-> true
		| TDblEqual | TNDblEqual | TPlus | TMinus | TTimes | TMod 				-> true
		| TAmp | TPipe | TCaret | TNot | TTilde | TAnd | TOr | TQuestion 		-> true
		| TColon | TEqual | TPlusEqual | TMinusEqual | TTimesEqual 				-> true
		| TModEqual | TLDblChevronEqual | TRDblChevronEqual | TPipeEqual		-> true
		| TRTrplChevronEqual | TAmpEqual | TCaretEqual | TDivEqual 				-> true
		| TComma | TSemiColon | TLDblChevron | TRDblChevron | TRTrplChevron		-> true
		| _ -> false
	*)
	
	let tokenize t = 
		regexable := (match t with
		| TIdentifier _ | TNumber _ | TRBrace | TRPar | TRBracket | TString _ -> false
		| _ -> true );
		current_token := t; t
		
	let count_new_lines nl = 
		let nl = Str.global_replace (Str.regexp "\x09\\|\x0B\\|\x0C\\|\x20\\|\xA0") "" nl in
		(String.length (Str.global_replace (Str.regexp "\x0D\x0A") "\x0A" nl))
		
	
	let blockify nl func lexbuf =
		let current_token = !current_token in
		(match count_new_lines nl with
			| 0 -> func lexbuf
			| _ -> (match func lexbuf with
				| [] -> []
				| a::lst -> (match current_token,a with
					| TDiv, _  | TLChevron, _ | TRChevron, _ | TLEqual, _ | TGEqual, _ | TNEqual, _
					| TDblEqual, _ | TNDblEqual, _ | TPlus, _ | TMinus, _ | TTimes, _ | TMod, _
					| TAmp, _ | TPipe, _ | TCaret, _ | TNot, _ | TTilde, _ | TAnd, _ | TOr, _ | TQuestion, _
					| _, TQuestion | _, TDot
					| TColon, _ | TEqual, _ | TPlusEqual, _ | TMinusEqual, _ | TTimesEqual, _
					| TModEqual, _ | TLDblChevronEqual, _ | TRDblChevronEqual, _ | TPipeEqual, _
					| TOperator _, _ | KIn, _ | KVar, _ | TLBrace, _ | TLPar, _
					| TRTrplChevronEqual, _ | TAmpEqual, _ | TCaretEqual, _ | TDivEqual, _ 
					| TRBrace, TRBrace | _, KCatch | _, KFinally
					| TComma, _ | TSemiColon, _ | TLDblChevron, _ | TRDblChevron, _ | TRTrplChevron, _ 
					| KPrivate,_ | KProtected, _ | KPublic, _ | KMeta, _ | KInline, _				-> a::lst
					| TRPar, _ -> TNewLine::a::lst
					| _, TIdentifier _ | _, TLBrace | _, KIf | _, KFunction | _, KNamespace 
					| _, KDo | _, KWhile | _, KTry | _, KSwitch | _, KBreak 
					| _, KContinue | _, KDebugger | _, KDefault | _, KDelete
					| _, KGet | _, KReturn |  _, KSet | _, KThrow | _, KVar | _, KWith
					| _, KConst | _, KEnum | _, KExport | _, KImport | _, KSuper | _, KUsing
					| _, KClass | _, KPrivate | _, KPublic | _, KMeta | _, KInline-> TSemiColon::a::lst
					| _ -> a::lst)
				)
		)

	let count_lines lexbuf nl =
		newline lexbuf (count_new_lines nl)
}


let whiteSpace = 
	"\x09"
	| "\x0B" 
	| "\x0C" 
	| "\x20" 
	| "\xA0" 
(*	| "\xFE\xFF" *)

let lineTerminatorSequence	= 
	"\x0D\x0A"
	| "\x0A" 
	| "\x0D" 
(*	| "\x20\x28" 
	| "\x20\x29" *)

let nl = 
	"\x09"
	| "\x0B" 
	| "\x0C" 
	| "\x20" 
	| "\xA0" 
	| "\x0D\x0A"
	| "\x0A" 
	| "\x0D" 

let lletter					= ['a'-'z']
let uletter					= ['A'-'Z']

(* Numbers *)
let hexInteger 				= '0' ['x' 'X'] ['0'-'9' 'A'-'F' 'a'-'f']+
let octalInteger 			= '0' ['0'-'7']*
let decimalDigit			= ['0' - '9']
let nonZeroDigit			= ['1' - '9']
let decimalDigits			= decimalDigit+
let signedInteger			= ['+' '-']? decimalDigits
let decimalInteger			= '0' | (nonZeroDigit decimalDigits?)
let exponentPart			= ['e' 'E'] signedInteger
let decimal 				= (decimalInteger '.' decimalDigits? exponentPart?)
								| '.' decimalDigits exponentPart?
								| decimalInteger exponentPart?
let numeric					= octalInteger | hexInteger | decimal

let unicodeLetter			= lletter | uletter | decimalDigit | '_' | '$'
let identifierStart 		= lletter | uletter | '_' | '$' 
let identifier				= identifierStart unicodeLetter*

rule token = parse
	(* Comments *)
	| "/*"								{(comment_parse lexbuf)}
	| "//"								{(single_comment_parse lexbuf)}
	
	| '`' (identifier as s) '`'			{[tokenize (TIdentifier s)]}
	| "instanceof" (nl as nl)			{count_lines lexbuf nl;(tokenize KInstanceof)::(blockify nl token lexbuf)}
	| "in" (nl as nl)					{count_lines lexbuf nl;(tokenize KIn)::(blockify nl token lexbuf)}
	| (identifier as s)					{[tokenize (stringToToken s)]}
	| (numeric as s)			 		{[tokenize (TNumber s)]}
	| "/="								{(if !regexable
											then regex_parse "=" lexbuf
											else [tokenize TDivEqual])}
	| '/' 								{if !regexable
											then (regex_parse "" lexbuf)
											else [tokenize (TDiv)]}

	| '{'								{[tokenize TLBrace]}
	| '}' 								{[tokenize TRBrace]}
	| '('								{[tokenize TLPar]}
	| ')'								{[tokenize TRPar]}
	| '['								{[tokenize TLBracket]}
	| ']'								{[tokenize TRBracket]}
	
	| '.'								{[tokenize TDot]}
	| ';' 								{[tokenize TSemiColon]}
	| ','								{[tokenize TComma]}

	| "+="								{[tokenize TPlusEqual]}
	| "-="								{[tokenize TMinusEqual]}
	| "*="								{[tokenize TTimesEqual]}
	| "%="								{[tokenize TModEqual]}
	| "<<="								{[tokenize TLDblChevronEqual]}
	| ">>="								{[tokenize TRDblChevronEqual]}
	| ">>>="							{[tokenize TRTrplChevronEqual]}
	| "&="								{[tokenize TAmpEqual]}
	| "|="								{[tokenize TPipeEqual]}
	| "^="								{[tokenize TCaretEqual]}
	
	| "<<"								{[tokenize TLDblChevron]}
	| ">>"								{[tokenize TRDblChevron]}
	| ">>>"								{[tokenize TRTrplChevron]}
	| "<="								{[tokenize TLEqual]}
	| ">="								{[tokenize TGEqual]}
	| '<'								{[tokenize TLChevron]}
	| '>'								{[tokenize TRChevron]}
	| "==="								{[tokenize TTrplEqual]}
	| "=="								{[tokenize TDblEqual]}
	| '='								{[tokenize TEqual]}
	| "!=="								{[tokenize TNDblEqual]}
	| "!="								{[tokenize TNEqual]}
	
	| (lineTerminatorSequence as nl)
		(nl* as nl1) "++"
										{count_lines lexbuf (nl ^ nl1);(tokenize TPreIncrement)::(blockify nl token lexbuf)}
	| (lineTerminatorSequence as nl)
		(nl* as nl1) "--"
										{count_lines lexbuf (nl ^ nl1);(tokenize TPreDecrement)::(blockify nl token lexbuf)}
	| whiteSpace* "++"					{[tokenize TIncrement]}
	| whiteSpace* "--"					{[tokenize TDecrement]}
	| '+'								{[tokenize TPlus]}
	| "->"								{[tokenize TArrow]}
	| '-'								{[tokenize TMinus]}
	| '*'								{[tokenize TTimes]}
	| '%' 								{[tokenize TMod]}
	

	| "&&"								{[tokenize TAnd]}
	| "||"								{[tokenize TOr]}
	| '&'								{[tokenize TAmp]}
	| '|'								{[tokenize TPipe]}
	| '^'								{[tokenize TCaret]}
	| '!'								{[tokenize TNot]}
	| '~'								{[tokenize TTilde]}
	| '@' 								{
											let s = "@" ^ (operator_parse lexbuf) in
											(let lst = token lexbuf in
												match lst with
													| [] -> []
													| a::_ -> (match a with
																| TIdentifier _ | TNumber _ | TLBrace | TLPar | TRegex _ | TString _
																| TLBracket | TPlus | TMinus | TNot | TPreIncrement 
																| TPreDecrement | TEqual -> (TOperator s)::lst
																| _ -> (TPostOperator s)::lst
																)
											)
										}
	| '?'								{[tokenize TQuestion]}
	| ':'								{[tokenize TColon]}
	
										

	(* Strings *)
	| '"'								{[string_parse_dbl "" lexbuf]}
	| '\''								{[string_parse "" lexbuf]}
	
	
	| (nl* as nl)						{count_lines lexbuf nl; blockify nl token lexbuf}
	(*| whiteSpace+						{token lexbuf}
	| lineTerminatorSequence			{newline lexbuf 1; [TNewLine]}*)
	

	| eof								{[TEOF]}
	| _ as c 							{raise (Lexing_error ("illegal character " 
											^ (String.make 1 c)))}

and operator_parse = parse
	| (identifier as s)					{s}
	| _ as c							{raise (Lexing_error ("illegal character " 
											^ (String.make 1 c)))}

and comment_parse = parse
	| "*/"								{token lexbuf}
	| eof								{[TEOF]}
	| lineTerminatorSequence			{newline lexbuf 1; comment_parse lexbuf}
	| _									{comment_parse lexbuf}
	
and single_comment_parse = parse
	| eof								{[TEOF]}
	| lineTerminatorSequence (nl* as nl){newline lexbuf 1; count_lines lexbuf nl; token lexbuf}
	| _									{single_comment_parse lexbuf}
	
and regex_parse str = parse
	| '/' (unicodeLetter* as f)			{[tokenize (TRegex (str,f))]}
	| "\\\\"							{regex_parse (str ^ "\\\\") lexbuf}
	| "\\/"								{regex_parse (str ^ "/") lexbuf}
	| _	as s							{if (str = "" && s = '*')
											then (comment_parse lexbuf)
											else if (str = "" && s = '/')
											then (single_comment_parse lexbuf)
											else (regex_parse (str ^ (string_of_char s)) lexbuf)}

and string_parse str = parse
	| '\''								{tokenize (TString str)}
	| "\\\\"							{string_parse (str ^ "\\\\") lexbuf}
	| "\\'"								{string_parse (str ^ "'") lexbuf}
	| _	as s							{string_parse (str ^ (string_of_char s)) lexbuf}

and string_parse_dbl str = parse
	| '"'								{tokenize (TString str)}
	| "\\\\"							{string_parse_dbl (str ^ "\\\\") lexbuf}
	| "\\\""							{string_parse_dbl (str ^ "\"") lexbuf}
	| _	as s							{string_parse_dbl (str ^ (string_of_char s)) lexbuf}
	
{
	let buf = ref []
	
	let rec buffered_token x = 
		match !buf with
			| [] -> (buf := token x; buffered_token x)
			| a::b -> (buf := b; a)
				
}

