(* A Lexer for APython *)
{ 
open Parser;;
open Lexing;;

let line_number = ref 1;;

let next_line = fun x -> line_number := !line_number + 1;;

let identifier id =
    try
        List.assoc id [
            ("True", TRUE);
            ("False", FALSE);
            ("apodef", APODEF); 
            ("def", DEF); 
            ("class", CLASS);
            ("return", RETURN);
            ("none", NONE);
            ("pass", PASS);
            ("if", IF);
            ("else", ELSE);
            ("in", IN);
            ("while", WHILE);
            ("for", FOR);
            ("lambda", LAMBDA)
        ]
    with Not_found ->
        IDENT id
;;

let new_line lexbuf = 
    lexbuf.lex_curr_p <- { lexbuf.lex_curr_p with
                            pos_lnum = lexbuf.lex_curr_p.pos_lnum +1; 
                            pos_bol = lexbuf.lex_curr_p.pos_cnum; 
                            }
;;
}


(* A nasty tokenizer*)

rule token' indents target_indent token = parse
(* line structure *)
    "\\\n"                          { new_line lexbuf; token indents target_indent lexbuf }     (* Line continuation *)
  | '\n'(' ')*                      { new_line lexbuf; target_indent := String.length (lexeme lexbuf) - 1; NEWLINE }
  | '\n'([' ' '\t']*'\n'((' ')* as ss)) { new_line lexbuf; target_indent := String.length (ss); NEWLINE }
  | [' ' '\t']+                     { token indents target_indent lexbuf }
(* names *)
  | ['a'-'z' 'A'-'Z' '_']['a'-'z' 'A'-'Z' '_' '0'-'9']*        { identifier (lexeme lexbuf) }
(* strings and integers *)
  | '"'                             { str_lex "" lexbuf }
  | ['0'-'9']+                      { INTEGER (int_of_string (lexeme lexbuf)) }
(* inlining apost *)
  | ":-"                            { apo_lex "" lexbuf }
  | "->"                            { ARROW }
  | "::"                            { DCOLON }
  | ":"                             { COLON }
  | "="                             { ASSIGN }
  | "("                             { LPAREN }
  | ")"                             { RPAREN }
  | ","                             { COMMA }
  | ";"                             { SEMI }
  | "."                             { DOT }
  | "*"                             { STAR }
  | "**"                            { DSTAR }
  | "-"                             { DASH }
  | "+"                             { PLUS }
  | "%"                             { PERCENT }
  | "<<"                            { LSHIFT }
  | ">>"                            { RSHIFT }
  | "&"                             { AMP }
  | "^"                             { HAT }
  | "<"                             { LT }
  | "<="                            { LEQ }
  | ">"                             { GT }
  | ">="                            { GEQ }
  | "=="                            { EQ }
  | "!="                            { NEQ }
  | "{"                             { LBRACE }
  | "}"                             { RBRACE }
  | eof                             { EOF }


and str_lex buf = parse
    "\""                            { STRING buf }
  | "\\n"                           { str_lex (buf ^ "\n") lexbuf}
  | "\n"                            { new_line lexbuf; line_number := !line_number + 1; str_lex (buf ^ "\n") lexbuf }
  | _                               { str_lex (buf ^ (lexeme lexbuf)) lexbuf}


and apo_lex buf = parse
    "-:"                           { APOSTRING buf }
  | _                              { apo_lex (buf ^ (lexeme lexbuf)) lexbuf }

{

(** A wrapper function for token' function that does all the work 
    This function has type: int Stack.t ref -> int ref -> Lexing.lexbuf -> Parser.token

IMPORTANT: Python actually has a context-sensitive grammar! (re. Chomsky Heirarchy)

*)
let rec token indents target_indent lexbuf =
    if Stack.top !indents > !target_indent then
        ( ignore (Stack.pop !indents); DEDENT )
    else (if Stack.top !indents < !target_indent then
        ( Stack.push !target_indent !indents; INDENT )
    else
        (token' indents target_indent token lexbuf))

}
