// The scanner package provides a lexical analyzer for plaintext BASIC programs.
package scanner

import (
	"fmt"
	"io"
	"io/ioutil"
	"strings"
	"unicode"

	"code.google.com/p/microtool/basic/dialect"
	"code.google.com/p/microtool/util/stringset"
)

// A Position describes the location of a token within the input stream.
type Position struct {
	row, col int
}

// String converts a position to a human-readable form.
func (p Position) String() string {
	return fmt.Sprintf("line %d, column %d", p.row, p.col)
}

// Type identifies the type of a token.
type Type struct {
	// Debug is the display version of the token for debugging output.
	Debug string

	// Output is the actual output form of the token.
	Output string

	// HasData is true if this token contains additional data.  In this
	// case, we expect both Debug and Output to have "%s" placeholders for
	// the extra data.
	HasData bool
}

// These are the known token types.
var (
	TypeBogus     = Type{Debug: "BOGUS(%s)", Output: "<<<%s>>>", HasData: true}
	TypeBackslash = Type{Debug: "BACKSLASH", Output: "\n  \\"}
	TypeColon     = Type{Debug: "COLON", Output: "\n  :"}
	TypeComma     = Type{Debug: "COMMA", Output: ","}
	TypeData      = Type{Debug: "DATA(%s)", Output: "DATA %s", HasData: true}
	TypeDivide    = Type{Debug: "DIVIDE", Output: "/"}
	TypeEOF       = Type{Debug: "EOF"}
	TypeEqual     = Type{Debug: "EQUAL", Output: "="}
	TypeGE        = Type{Debug: "GE", Output: ">="}
	TypeGT        = Type{Debug: "GT", Output: ">"}
	TypeID        = Type{Debug: "ID(%s)", Output: "%s", HasData: true}
	TypeKeyword   = Type{Debug: "KEYWORD(%s)", Output: "%s", HasData: true}
	TypeLE        = Type{Debug: "LE", Output: "<="}
	TypeLT        = Type{Debug: "LT", Output: "<"}
	TypeLParen    = Type{Debug: "LPAREN", Output: "("}
	TypeMinus     = Type{Debug: "MINUS", Output: "-"}
	TypeNEqual    = Type{Debug: "NEQUAL", Output: "<>"}
	TypeNewline   = Type{Debug: "NEWLINE", Output: "\n"}
	TypeNumber    = Type{Debug: "NUMBER(%s)", Output: "%s", HasData: true}
	TypePlus      = Type{Debug: "PLUS", Output: "+"}
	TypePound     = Type{Debug: "POUND", Output: "#"}
	TypeRemark    = Type{Debug: "REM(%s)", Output: "REM %s", HasData: true}
	TypeRemarkQ   = Type{Debug: "REMQ(%s)", Output: "' %s", HasData: true}
	TypeRParen    = Type{Debug: "RPAREN", Output: ")"}
	TypeSemicolon = Type{Debug: "SEMICOLON", Output: ";"}
	TypeString    = Type{Debug: "STRING(%s)", Output: "\"%s\"", HasData: true}
	TypeTimes     = Type{Debug: "TIMES", Output: "*"}
)

type TypePair struct {
	T1, T2 Type
}

var NoSpace = map[TypePair]bool{
	TypePair{TypeID, TypeComma}: true,
	TypePair{TypeID, TypeLParen}: true,
	TypePair{TypeID, TypeRParen}: true,
	TypePair{TypeID, TypeSemicolon}: true,
	TypePair{TypeKeyword, TypeComma}: true,
	TypePair{TypeKeyword, TypeLParen}: true,
	TypePair{TypeKeyword, TypeRParen}: true,
	TypePair{TypeKeyword, TypeSemicolon}: true,
	TypePair{TypeLParen, TypeID}: true,
	TypePair{TypeLParen, TypeKeyword}: true,
	TypePair{TypeLParen, TypeLParen}: true,
	TypePair{TypeLParen, TypeNumber}: true,
	TypePair{TypeNumber, TypeComma}: true,
	TypePair{TypeNumber, TypeRParen}: true,
	TypePair{TypeNumber, TypeSemicolon}: true,
	TypePair{TypePound, TypeID}: true,
	TypePair{TypePound, TypeMinus}: true,
	TypePair{TypePound, TypeNumber}: true,
	TypePair{TypeRParen, TypeComma}: true,
	TypePair{TypeRParen, TypeRParen}: true,
	TypePair{TypeRParen, TypeSemicolon}: true,
	TypePair{TypeString, TypeComma}: true,
	TypePair{TypeString, TypeRParen}: true,
	TypePair{TypeString, TypeSemicolon}: true,
	// Prevent trailing space before colon.
	TypePair{TypeComma, TypeColon}: true,
	TypePair{TypeID, TypeColon}: true,
	TypePair{TypeKeyword, TypeColon}: true,
	TypePair{TypeNumber, TypeColon}: true,
	TypePair{TypeRParen, TypeColon}: true,
	TypePair{TypeSemicolon, TypeColon}: true,
	TypePair{TypeString, TypeColon}: true,
}

// A Token is a single lexical element within a BASIC program.
type Token struct {
	Pos  Position
	Type Type
	Data []byte
}

// Debug constructs a semi-verbose version of this token for debugging.
func (t *Token) Debug() string {
	if t.Type.HasData {
		return fmt.Sprintf(t.Type.Debug, string(t.Data))
	}
	return t.Type.Debug
}

// Error constructs a verbose version of this token (location and debug version)
// for error reporting.
func (t *Token) Error() string {
	return fmt.Sprintf("Token at %s: %s", t.Pos, t.Debug())
}

// String constructs the version of the token appropriate for final output.
func (t *Token) String() string {
	if t.Type.HasData {
		return fmt.Sprintf(t.Type.Output, string(t.Data))
	}
	return t.Type.Output
}

// add appends another character to the data stored for the token.  The token
// itself is returned, in support of call chaining.
func (t *Token) add(ch byte) *Token {
	t.Data = append(t.Data, ch)
	return t
}

// typ sets the type of the token.  The token itself is returned, in support of
// call chaining.
func (t *Token) typ(typ Type) *Token {
	t.Type = typ
	return t
}

// A Scanner wraps a lexical analyzer for BASIC around a source file.
type Scanner struct {
	buffer  []byte
	offset  int
	pos     Position
	peek    *Token
	dialect *dialect.Dialect
}

// NewScanner tries to build a new scanner for the given input and dialect of
// BASIC.  Note that the entire input will be read in at a result of this call:
// the scanner package does not support lazy reading.
func NewScanner(r io.Reader, dialect *dialect.Dialect) (*Scanner, error) {
	buffer, err := ioutil.ReadAll(r)
	if err != nil {
		return nil, err
	}
	return &Scanner{buffer: buffer, dialect: dialect}, nil
}

// String constructs a string representing the current state of the scanner.
func (s *Scanner) String() string {
	return fmt.Sprintf("Scanner<%s @ %d of %d bytes>", s.dialect.Name(), s.offset, len(s.buffer))
}

// Peek returns the next token in the input without consuming it.  It returns
// a Token of type TypeEOF when all of the input has been consumed.
func (s *Scanner) Peek() *Token {
	if s.peek == nil {
		s.peek = s.read()
	}
	return s.peek
}

// Next returns the next token in the input, consuming it in the process.  It
// returns a Token of type TypeEOF when all of the input has been consumed.
func (s *Scanner) Next() (t *Token) {
	if s.peek == nil {
		return s.read()
	}
	t, s.peek = s.peek, nil
	return t
}

// advance advances the offset by a single character.
func (s *Scanner) advance() {
	if s.offset >= len(s.buffer) {
		return
	}
	ch := s.buffer[s.offset]
	switch ch {
	case '\n':
		s.pos = Position{s.pos.row + 1, 0}
	case '\r':
		s.pos = Position{s.pos.row, 0}
	default:
		s.pos = Position{s.pos.row, s.pos.col + 1}
	}
	s.offset++
}

// here returns the character at the current offset.
func (s *Scanner) here() byte {
	if s.offset >= len(s.buffer) {
		return EOF
	}
	return s.buffer[s.offset]
}

const EOF = 0

const (
	stateInit = iota
	stateID
	stateNumber
	stateNumberDec
	stateNumberExpFirst
	stateNumberExp
	stateString
	stateLT
	stateGT
	stateRemark
	stateData
)

func (s *Scanner) suffixMatch(suffixes stringset.Set) bool {
	for suffix := range suffixes {
		if s.offset + len(suffix) < len(s.buffer) {
			if strings.ToUpper(string(s.buffer[s.offset:s.offset+len(suffix)])) == suffix {
				return true
			}
		}
	}
	return false
}

func (s *Scanner) read() *Token {
	if s.offset >= len(s.buffer) {
		return &Token{Pos: s.pos, Type: TypeEOF}
	}

	t := &Token{Pos: s.pos}
	state := stateInit
	for {
		switch state {
		case stateInit:
			switch ch := s.here(); {
			case unicode.IsNumber(rune(ch)):
				t.add(ch)
				s.advance()
				state = stateNumber
			case ch == '.':
				t.add(ch)
				s.advance()
				state = stateNumberDec
			case unicode.IsLetter(rune(ch)):
				t.add(byte(unicode.ToUpper(rune(ch))))
				s.advance()
				state = stateID
			case ch == '(':
				s.advance()
				return t.typ(TypeLParen)
			case ch == ')':
				s.advance()
				return t.typ(TypeRParen)
			case ch == '\\': // Used as a line separator in some BASICs.
				s.advance()
				return t.typ(TypeBackslash)
			case ch == ':':
				s.advance()
				return t.typ(TypeColon)
			case ch == '+':
				s.advance()
				return t.typ(TypePlus)
			case ch == '-':
				s.advance()
				return t.typ(TypeMinus)
			case ch == '*':
				s.advance()
				return t.typ(TypeTimes)
			case ch == '/':
				s.advance()
				return t.typ(TypeDivide)
			case ch == '=':
				s.advance()
				return t.typ(TypeEqual)
			case ch == '<':
				s.advance()
				state = stateLT
			case ch == '>':
				s.advance()
				state = stateGT
			case ch == ',':
				s.advance()
				return t.typ(TypeComma)
			case ch == ';':
				s.advance()
				return t.typ(TypeSemicolon)
			case ch == '#':
				s.advance()
				return t.typ(TypePound)
			case ch == '"':
				s.advance()
				state = stateString
			case ch == '\'':
				s.advance()
				t.typ(TypeRemarkQ)
				state = stateRemark
			case ch == ' ' || ch == '\t' || ch == '\r':
				s.advance()
			case ch == '\n':
				s.advance()
				return t.typ(TypeNewline)
			case ch == EOF:
				return t.typ(TypeEOF)
			default:
				s.advance()
				return t.add(ch).typ(TypeBogus)
			}
		case stateID:
			switch ch := s.here(); {
			case unicode.IsNumber(rune(ch)) || unicode.IsLetter(rune(ch)) || ch == '@' || ch == '#':
				t.add(byte(unicode.ToUpper(rune(ch))))
				s.advance()
				if comp := s.dialect.MatchKeyword(string(t.Data)); comp != nil && !s.suffixMatch(comp.Suffixes) {
					if comp.Keyword.Name == "REM" {
						t.typ(TypeRemark).Data = nil
						state = stateRemark
						continue
					} else if comp.Keyword.Name == "DATA" {
						t.Data = nil
						state = stateData
						continue
					}
					t.Data = []byte(comp.Keyword.Name)
					return t.typ(TypeKeyword)
				}
			case ch == '$' || ch == '!' || ch == '%':
				t.add(ch)
				s.advance()
				if comp := s.dialect.MatchKeyword(string(t.Data)); comp != nil && !s.suffixMatch(comp.Suffixes) {
					t.Data = []byte(comp.Keyword.Name)
					return t.typ(TypeKeyword)
				}
				t.Data = []byte(strings.ToLower(string(t.Data)))
				return t.typ(TypeID)
			case ch == EOF:
				t.Data = []byte(strings.ToLower(string(t.Data)))
				return t.typ(TypeID)
			default:
				if comp := s.dialect.MatchKeyword(string(t.Data)); comp != nil && !s.suffixMatch(comp.Suffixes) {
					if comp.Keyword.Name == "REM" {
						t.typ(TypeRemark).Data = nil
						state = stateRemark
						continue
					} else if comp.Keyword.Name == "DATA" {
						t.Data = nil
						state = stateData
						continue
					}
					t.Data = []byte(comp.Keyword.Name)
					return t.typ(TypeKeyword)
				}
				t.Data = []byte(strings.ToLower(string(t.Data)))
				return t.typ(TypeID)
			}
		case stateNumber:
			switch ch := s.here(); {
			case unicode.IsNumber(rune(ch)):
				t.add(ch)
				s.advance()
			case ch == '.':
				t.add(ch)
				s.advance()
				state = stateNumberDec
			default:
				return t.typ(TypeNumber)
			}
		case stateNumberDec:
			switch ch := s.here(); {
			case unicode.IsNumber(rune(ch)):
				t.add(ch)
				s.advance()
			case ch == 'e' || ch == 'E':
				t.add(ch)
				s.advance()
				state = stateNumberExpFirst
			default:
				return t.typ(TypeNumber)
			}
		case stateNumberExpFirst:
			switch ch := s.here(); {
			case unicode.IsNumber(rune(ch)):
				t.add(ch)
				s.advance()
				state = stateNumberExp
			case ch == '+' || ch == '-':
				t.add(ch)
				s.advance()
				state = stateNumberExp
			default:
				return t.typ(TypeNumber)
			}
		case stateNumberExp:
			switch ch := s.here(); {
			case unicode.IsNumber(rune(ch)):
				t.add(ch)
				s.advance()
			default:
				return t.typ(TypeNumber)
			}
		case stateString:
			switch ch := s.here(); {
			case ch == '"':
				s.advance()
				return t.typ(TypeString)
			case ch == '\n' || ch == '\r' || ch == EOF:
				return t.typ(TypeString)
			default:
				t.add(ch)
				s.advance()
			}
		case stateLT:
			switch ch := s.here(); {
			case ch == '>':
				s.advance()
				return t.typ(TypeNEqual)
			case ch == '=':
				s.advance()
				return t.typ(TypeLE)
			default:
				return t.typ(TypeLT)
			}
		case stateGT:
			switch ch := s.here(); {
			case ch == '=':
				s.advance()
				return t.typ(TypeGE)
			default:
				return t.typ(TypeGT)
			}
		case stateRemark:
			switch ch := s.here(); {
			case ch == EOF || ch == '\n':
				if len(t.Data) > 0 && t.Data[0] == ' ' {
					t.Data = t.Data[1:]
				}
				return t // type already set
			default:
				t.add(ch)
				s.advance()
			}
		case stateData:
			switch ch := s.here(); {
			case ch == EOF || ch == '\n':
				if len(t.Data) > 0 && t.Data[0] == ' ' {
					t.Data = t.Data[1:]
				}
				return t.typ(TypeData)
			default:
				t.add(ch)
				s.advance()
			}
		}
	}
}
