package turtle

import ("regexp"; "strings")

// anyTurtleToken is an RE that will match Turtle tokens (but by an
// large, not non-Turtle tokens). The spaces and newlines get stripped
// for the compiled tokenRE below.

var anyTurtleToken =
`
    <[^>\n]*>
    | @prefix
    | @base
    | [\-_a-zA-Z0-9]*:[\-_a-zA-Z0-9]*
    | true 
    | false
    | [+\-]? [0-9]* (| \.[0-9]*) (|[Ee] [+\-]? [0-9]+)
    | @[a-zA-Z\-]+
    | \^\^ (<[^>]*> | [\-_a-zA-Z0-9]*:[\-_a-zA-Z0-9]*)
    | ( "([^"\\]|\\[^\n])*" | """ ( [^\"] | \\[^\n] | "[^"] | ""[^"] )* """ ) 
    | #[^\n]*\n
    | .
`

var tokenRE = regexp.MustCompile( strings.Replace( strings.Replace(anyTurtleToken, " ", "", -1 ), "\n", "", -1 ) )

type Kind int

const (
    WRONG Kind = iota
    EOF
    PREFIX
    BASE
    URI
    NUMBER
    STRING
    LANGUAGE
    DATATYPE
    QNAME
    BOOLEAN
    LBOX
    RBOX
    LPAR
    RPAR
    A
    DOT
    SEMI
    COMMA
    BNODE 
)

var kindLabel = map[Kind]string{
    WRONG: "wrong",
    EOF: "EOF",
    PREFIX: "prefix",
    URI: "uri",
    NUMBER: "number",
    STRING: "string",
    LANGUAGE: "language",
    DATATYPE: "datatype",
    QNAME: "qname",
    LBOX: "lbox",
    RBOX: "rbox",
    LPAR: "lpar",
    RPAR: "rpar",
    BOOLEAN: "bool",
    A: "a",
    DOT: "dot",
    COMMA: "comma",
    SEMI: "semi",
    BNODE: "bnode",
}

var stringToKind = map[string]Kind{
    "[": LBOX, "]": RBOX,
    "(": LPAR, ")": RPAR,
    "a": A,
    ".": DOT, ",": COMMA, ";": SEMI,
}

// tokeniseString tokenises an entire string for us. (Because regexp
// doesn't give us a stream matcher, we're going to have to read it in
// a lump anyway, unless/until we have adapted/replaced regexp).
// The tokens are dropped down a channel for consumption elsego.
// Shame we have to do all this post-match checking up at the moment.
func tokeniseString(all string, sink chan(Token)) {
    for _, item := range tokenRE.FindAllString( all, -1 ) {
        handleItem( sink, item )
    }
    sink <- Token{EOF, ""}
}

func handleItem( sink chan(Token), item string ) {
    if item == "\n" || item == "\t" || item == " " || item[0] == '#' {
        // discard
    } else if item == "@prefix" {
        sink <- ( Token{PREFIX, item} )
    } else if item == "@base" {
        sink <- Token{BASE, item}
    } else if item[0] == '@' {
        sink <- Token{LANGUAGE, item[1:]}
    } else if item[0] == '^' {
        sink <- Token{DATATYPE, item[2:]}
    } else if item[0] == '<' {
        sink <- ( Token{URI, item} )
    } else if '0' <= item[0] && item[0] <= '9' || item[0] == '.' && len(item) > 1  || item[0] == '+' || item[0] == '-' {
        sink <- ( Token{NUMBER, item} )
    } else if item[0] == '"' {
        sink <- ( Token{STRING, item} )
    } else if item == "true" || item == "false" {
        sink <- ( Token{BOOLEAN, item} )
    } else if strings.IndexRune(item, ':') > -1 {
        sink <- ( Token{QNAME, item} )
    } else {
        sink <- ( Token{stringToKind[item], item} )
    }
}


