%% TODO: single and double quote [de]tokenizing
%% TODO: Comment support
%% TODO: 2'010110 style numbers
%% TODO: Need to figure out with parser what to do about "f(.." vs "f (.."

:- module(token,[tokenize/3,whitespace/2]).
:- use_module(operator).

whitespace --> [0' ], whitespace.
whitespace --> [0'\n], whitespace.
whitespace --> [0'%], finish_single_line_comment, whitespace.
whitespace --> !. %% Always eat the most amount of whitespace possible

finish_single_line_comment --> [0'\n], !. %% Never skip newlines
finish_single_line_comment --> [_], finish_single_line_comment.
%finish_single_line_comment([],_).

uppercase(0'A) --> [0'A].
uppercase(0'B) --> [0'B].
uppercase(0'C) --> [0'C].
uppercase(0'D) --> [0'D].
uppercase(0'E) --> [0'E].
uppercase(0'F) --> [0'F].
uppercase(0'G) --> [0'G].
uppercase(0'H) --> [0'H].
uppercase(0'I) --> [0'I].
uppercase(0'J) --> [0'J].
uppercase(0'K) --> [0'K].
uppercase(0'L) --> [0'L].
uppercase(0'M) --> [0'M].
uppercase(0'N) --> [0'N].
uppercase(0'O) --> [0'O].
uppercase(0'P) --> [0'P].
uppercase(0'Q) --> [0'Q].
uppercase(0'R) --> [0'R].
uppercase(0'S) --> [0'S].
uppercase(0'T) --> [0'T].
uppercase(0'U) --> [0'U].
uppercase(0'V) --> [0'V].
uppercase(0'W) --> [0'W].
uppercase(0'X) --> [0'X].
uppercase(0'Y) --> [0'Y].
uppercase(0'Z) --> [0'Z].

lowercase(0'a) --> [0'a].
lowercase(0'b) --> [0'b].
lowercase(0'c) --> [0'c].
lowercase(0'd) --> [0'd].
lowercase(0'e) --> [0'e].
lowercase(0'f) --> [0'f].
lowercase(0'g) --> [0'g].
lowercase(0'h) --> [0'h].
lowercase(0'i) --> [0'i].
lowercase(0'j) --> [0'j].
lowercase(0'k) --> [0'k].
lowercase(0'l) --> [0'l].
lowercase(0'm) --> [0'm].
lowercase(0'n) --> [0'n].
lowercase(0'o) --> [0'o].
lowercase(0'p) --> [0'p].
lowercase(0'q) --> [0'q].
lowercase(0'r) --> [0'r].
lowercase(0's) --> [0's].
lowercase(0't) --> [0't].
lowercase(0'u) --> [0'u].
lowercase(0'v) --> [0'v].
lowercase(0'w) --> [0'w].
lowercase(0'x) --> [0'x].
lowercase(0'y) --> [0'y].
lowercase(0'z) --> [0'z].

digit(0'0) --> [0'0].
digit(0'1) --> [0'1].
digit(0'2) --> [0'2].
digit(0'3) --> [0'3].
digit(0'4) --> [0'4].
digit(0'5) --> [0'5].
digit(0'6) --> [0'6].
digit(0'7) --> [0'7].
digit(0'8) --> [0'8].
digit(0'9) --> [0'9].

underscore(0'_) --> [0'_].

token('(') --> [0'(].
token(')') --> [0')].

token('[') --> [0'[].
token(']') --> [0']].

token('{') --> [0'{].
token('}') --> [0'}].

token(',') --> [0',].
token('|') --> [0'|].

token(atom([C|Cs])) --> atom_start(C), atom_finish(Cs), !.
atom_start(C) --> lowercase(C).
atom_middle(C)   --> underscore(C).
atom_middle(C)   --> lowercase(C).
atom_middle(C)   --> uppercase(C).
atom_middle(C)   --> digit(C).
atom_finish([C|Cs]) --> atom_middle(C), atom_finish(Cs).
atom_finish([])     --> [].

%% The following cut means, take only the longest match.
token(atom([C|Cs])) --> [0''], single_quoted([C|Cs]), !, [0''].
single_quoted([0''|Cs]) --> [0''],[0''], single_quoted(Cs).
single_quoted([C|Cs])   --> [C], {C \= 0''}, single_quoted(Cs).
single_quoted([])       --> [].

token(atom([D|Ds])) --> digits([D|Ds]).
digits([D|Ds]) --> digit(D), !, digits(Ds). %% Longest match
digits([])     --> [].

token(atom("!")) --> "!".

token(atom(O)) --> op_token(O), {O \= ","}, !. %% Comma treated specially by the parser, Longest match first

token(variable([C|Cs])) --> variable_start(C), variable_finish(Cs), !.
variable_start(C) --> uppercase(C).
variable_start(C) --> underscore(C).
variable_middle(C)   --> underscore(C).
variable_middle(C)   --> lowercase(C).
variable_middle(C)   --> uppercase(C).
variable_middle(C)   --> digit(C).
variable_finish([C|Cs]) --> atom_middle(C), atom_finish(Cs).
variable_finish([])     --> [].

token(string(Codes)) --> [0'"],finish_double_quote(Codes),!,[0'"]. %% Longest match
finish_double_quote([0'\\|Cs]) --> [0'\\,0'\\], finish_double_quote(Cs).
finish_double_quote([0'"|Cs])  --> [0'\\,0'"], finish_double_quote(Cs).
finish_double_quote([C|Cs])    --> [C], {C \= 0'"}, finish_double_quote(Cs).
finish_double_quote([])        --> [].

tokenize(Tokens, String, End) :- var(String), tokenize_backward(Tokens, String, End).
tokenize(Tokens, String, End) :- nonvar(String), tokenize_forward(Tokens, String, End).

tokenize_forward([open_atom(T)|Ts]) --> whitespace, token(atom(T)), token('('), tokenize_forward(Ts).
tokenize_forward([T|Ts]) --> whitespace, token(T), tokenize_forward(Ts).
tokenize_forward([])     --> whitespace.

tokenize_backward([])     --> [].
tokenize_backward([T|Ts]) --> token(T), tokenize_backward(Ts).
