package parser

import (
	"fmt"
	"mi/compiler/ast"
	"mi/compiler/diagnostic"
	"mi/compiler/performance"
	"mi/compiler/scanner"
	"mi/compiler/token"
	"mi/internal/xbase"
	"mi/internal/xpath"
)

type parsingContext = int

const (
	pcImportDeclarations       parsingContext = iota // Import in session file
	pcSourceElements                                 // Elements in session file
	pcBlockStatements                                // Statements in block
	pcSwitchClauses                                  // Clauses in switch statement
	pcSwitchClauseStatements                         // Statements in switch clause
	pcCatchClauses                                   // Clauses in switch statement
	pcTypeMembers                                    // Members in interface or type literal
	pcClassMembers                                    // Members in interface or type literal
	pcEnumMembers                                    // Members in enum declaration
	pcThrows										 // Function Throws
	pcHeritageClauseElement                          // Type references in extends or implements clause
	pcHeritageClauses                                // Heritage clauses for a class or interface declaration.
	pcVariableDeclarations                           // Variable declarations in variable statement
	pcArgumentExpressions                            // Expressions in argument list
	pcNamedAssignmentExpressions                     // Expressions in argument list
	pcArrayLiteralMembers                            // Members in array literal
	pcParameters                                     // Parameters in parameter list
	pcLambdaParameters                               // LambdaParameters in Lambda parameter list
	pcTypeParameters                                 // Type parameters in type parameter list
	pcTypeArguments                                  // Type arguments in type argument list
	pcDocumentTypeList
	pcParsingContextCount 							 // Number of parsing contexts
)

type Parser struct {
	scanner *scanner.Scanner

	sourceFile       *ast.SourceFile
	parseDiagnostics []*diagnostic.Diagnostic

	sourceText      []byte
	nodeCount       int
	identifierCount int

	parsingCtx   parsingContext
	contextFlags ast.NodeFlags

	hasDeprecatedTag bool
}

func NewParser() *Parser {
	var p = new(Parser)
	p.scanner = scanner.NewScanner(func(opt *scanner.Optional) {
		opt.SkipTrivia = true
	})
	return p
}

var parser = NewParser()

func ParseSourceFile(uri xpath.URI, sourceText []byte, languageVersion ast.ScriptTarget, scriptKind ast.ScriptKind) *ast.SourceFile {
	// scriptKind = ensureScriptKind(fileName, scriptKind)
	performance.Mark("beforeParse")
	parser.initializeState(sourceText, languageVersion, scriptKind)
	var result = parser.parseSourceFileWorker(uri, languageVersion, scriptKind)
	parser.clearState()
	performance.Mark("afterParse")
	performance.Measure("Parse", "beforeParse", "afterParse")
	return result
}

func (p *Parser) initializeState(sourceText []byte, languageVersion ast.ScriptTarget, _ ast.ScriptKind) {
	p.sourceText = sourceText

	p.parseDiagnostics = nil
	p.parsingCtx = 0
	p.identifierCount = 0
	p.nodeCount = 0

	p.contextFlags = ast.NFNone

	// Initialize and prime the scanner before parsing the session elements.
	p.scanner.SetText(sourceText, -1, -1)
	p.scanner.SetOnError(p.scanError)
	p.scanner.SetScriptTarget(languageVersion)
	// p.scanner.setLanguageVariant(getLanguageVariant(scriptKind))
}

func (p *Parser) startPos() int {
	return p.scanner.GetStartPos()
}

func (p *Parser) clearState() {
	// Clear out the text the scanner is pointing at, so it doesn't keep anything alive unnecessarily.
	p.scanner.SetText(nil, -1, -1)
	p.scanner.SetOnError(nil)

	// Clear any data.  We don't want to accidentally hold onto it for too long.
	p.parseDiagnostics = nil
	p.sourceFile = nil
	p.sourceText = nil
}

func (p *Parser) parsePackageDeclaration() *ast.PackageDeclaration {
	return p.tryParse(func() interface{} {
		return p.parserPackageDeclaration()
	}).(*ast.PackageDeclaration)
}

func (p *Parser) parseImportDeclarationList() *ast.DeclarationList {
	var list = new(ast.DeclarationList)
	p.listScope(list, func() {
		p.parseList(pcImportDeclarations, func() {
			list.Add(p.parseImportDeclaration())
		})
	})
	return list
}

func (p *Parser) parseDeclarationList() *ast.DeclarationList {
	var list = new(ast.DeclarationList)
	p.listScope(list, func() {
		p.parseList(pcSourceElements, func() {
			list.Add(p.parseTopLevelDeclaration())
		})
	})
	return list
}

func (p *Parser) parseSourceFileWorker(uri xpath.URI, languageVersion ast.ScriptTarget, scriptKind ast.ScriptKind) *ast.SourceFile {
	p.sourceFile = p.createSourceFile(uri, languageVersion, scriptKind)
	p.sourceFile.SetFlags(p.contextFlags)

	// Prime the scanner.
	p.nextToken()

	p.sourceFile.PackageDeclaration = p.parsePackageDeclaration()
	p.sourceFile.Imports = p.parseImportDeclarationList()
	p.sourceFile.Declarations = p.parseDeclarationList()

	assertMsg(p.token() == token.EndOfFile, "End of file not reached ")
	p.sourceFile.EndOfFileToken = p.parseToken()

	p.sourceFile.NodeCount = p.nodeCount
	p.sourceFile.IdentifierCount = p.identifierCount
	diagnostic.SetDiagnosticToSourceFile(p.sourceFile, diagnostic.ParseDiagnostics, p.parseDiagnostics)

	return p.sourceFile
}

func (p *Parser) addDocument(node ast.Node) {
	var comments = scanner.GetDocumentCommentRanges(node, p.sourceFile.Text)
	if comments != nil {
		for _, comment := range comments {
			var document = p.parseDocument(node, comment.Pos(), comment.End() - comment.Pos())
			if isNull(document) {
				continue
			}
			node.AddDocuments(document)
		}
	}
}

func (p *Parser) createSourceFile(uri xpath.URI, languageVersion ast.ScriptTarget, scriptKind ast.ScriptKind) *ast.SourceFile {
	// code from createNode is inlined here so createNode won't have to deal with special case of creating session files
	// this is quite rare comparing to other nodes and createNode should be as fast as possible
	var sourceFile = new(ast.SourceFile)
	sourceFile.SetPos(0)
	sourceFile.SetEnd(len(p.sourceText))
	p.nodeCount++

	sourceFile.Text = p.sourceText
	sourceFile.LanguageVersion = languageVersion
	sourceFile.Uri = uri
	sourceFile.IsDeclarationFile = xpath.FileExtensionIs(sourceFile.Uri.Filename(), ".d.mi")
	sourceFile.ScriptKind = scriptKind

	return sourceFile
}

func (p *Parser) setContextFlag(val bool, flag ast.NodeFlags) {
	if val {
		p.contextFlags |= flag
	} else {
		p.contextFlags &= ^flag
	}
}

func (p *Parser) doOutsideOfContext(context ast.NodeFlags, f func()) {
	// contextFlagsToClear will contain only the context flags that are
	// currently set that we need to temporarily clear
	// We don't just blindly reset to the previous flags to ensure
	// that we do not mutate cached flags for the incremental
	// parser (ThisNodeHasError, ThisNodeOrAnySubNodesHasError, and
	// HasAggregatedChildData).
	var contextFlagsToClear = context & p.contextFlags
	if contextFlagsToClear != 0 {
		// clear the requested context flags
		p.setContextFlag(false, contextFlagsToClear)
		f()
		// restore the context flags we just cleared
		p.setContextFlag(true, contextFlagsToClear)
		return
	}

	// no need to do anything special as we are not in any of the requested contexts
	f()
}

func (p *Parser) doInsideOfContext(context ast.NodeFlags, f func()) {
	// contextFlagsToSet will contain only the context flags that
	// are not currently set that we need to temporarily enable.
	// We don't just blindly reset to the previous flags to ensure
	// that we do not mutate cached flags for the incremental
	// parser (ThisNodeHasError, ThisNodeOrAnySubNodesHasError, and
	// HasAggregatedChildData).
	var contextFlagsToClear = context & ^p.contextFlags
	if contextFlagsToClear != 0 {
		// set the requested context flags
		p.setContextFlag(true, contextFlagsToClear)
		f()
		// reset the context flags we just set
		p.setContextFlag(false, contextFlagsToClear)
		return
	}

	// no need to do anything special as we are already in all of the requested contexts
	f()
}

func (p *Parser) inContext(flags ast.NodeFlags) bool {
	return (p.contextFlags & flags) != 0
}

func (p *Parser) errorAtCurrentToken(message *diagnostic.Message, args ...interface{}) {
	var start = p.scanner.GetTokenPos()
	var length = p.scanner.GetTextPos() - start

	p.errorAtPosition(start, length, message, args...)
}

func (p *Parser) errorAtPosition(start int, length int, message *diagnostic.Message, args ...interface{}) {
	// Don't report another error if it would just be at the same position as the last error.
	if n := len(p.parseDiagnostics); n == 0 || start != p.parseDiagnostics[n-1].Start {
		p.parseDiagnostics = append(p.parseDiagnostics, diagnostic.CreateFileDiagnostic(p.sourceFile, start, length, message, args...))
	}
}

func (p *Parser) scanError(message *diagnostic.Message, pos int, length int) {
	if pos == -1 {
		pos = p.scanner.GetTextPos()
	}
	p.errorAtPosition(pos, length, message)
}

func (p *Parser) getNodePos() int {
	return p.scanner.GetStartPos()
}

func (p *Parser) getNodeEnd() int {
	return p.scanner.GetStartPos()
}

func (p *Parser) token() token.Token {
	return p.scanner.GetToken()
}

func (p *Parser) nextToken() token.Token {
	p.scanner.Scan()
	return p.token()
}

func (p *Parser) reScanGreaterToken() token.Token {
	p.scanner.ReScanGreaterToken()
	return p.token()
}

func (p *Parser) isIdentifier() bool {
	if p.token() == token.Identifier {
		return true
	}

	//// If we have a 'yield' keyword, and we're in the [yield] context, then 'yield' is
	//// considered a keyword and is not an identifier.
	//if (token() === SyntaxKind.YieldKeyword && inYieldContext()) {
	//	return false;
	//}
	//
	//// If we have a 'await' keyword, and we're in the [Await] context, then 'await' is
	//// considered a keyword and is not an identifier.
	//if (token() === SyntaxKind.AwaitKeyword && inAwaitContext()) {
	//	return false;
	//}

	return p.token() > token.LastReservedWord
}

func (p *Parser) speculationHelper(callback func() interface{}, isLookAhead bool) interface{} {
	// Keep track of the state we'll need to rollback to if lookahead fails (or if the
	// caller asked us to always reset our state).
	var saveSyntacticErrorsLength = len(p.parseDiagnostics)

	// Note: it is not actually necessary to save/restore the context flags here.  That's
	// because the saving/restoring of these flags happens naturally through the recursive
	// descent nature of our parser.  However, we still store this here just so we can
	// assert that invariant holds.
	var saveContextFlags = p.contextFlags

	var result interface{}
	if isLookAhead {
		result = p.scanner.LookHead(callback)
	} else {
		result = p.scanner.TryScan(callback)
	}
	if saveContextFlags != p.contextFlags {
		panic("Context flags changed.")
	}


	// If our callback returned something 'falsy' or we're just looking ahead,
	// then unconditionally restore us to where we were.
	if isNull(result) || isLookAhead {
		p.parseDiagnostics = p.parseDiagnostics[:saveSyntacticErrorsLength]
	}

	return result
}

/** Invokes the provided callback then unconditionally restores the parser to the state it
 * was in immediately prior to invoking the callback.  The result of invoking the callback
 * is returned from this function.
 */
func (p *Parser) lookAhead(callback func() interface{}) interface{} {
	return p.speculationHelper(callback, true)
}

func (p *Parser) lookAheadBool(callback func() bool) bool {
	return p.speculationHelper(func() interface{} {
		return callback()
	}, true).(bool)
}

/** Invokes the provided callback.  If the callback returns something falsy, then it restores
 * the parser to the state it was in immediately prior to invoking the callback.  If the
 * callback returns something truthy, then the parser state is not rolled back.  The result
 * of invoking the callback is returned from this function.
 */
func (p *Parser) tryParse(callback func() interface{}) interface{} {
	return p.speculationHelper(callback, false)
}

func (p *Parser) tryParseBool(callback func() bool) bool {
	return p.speculationHelper(func() interface{} {
		return callback()
	}, false).(bool)
}

func (p *Parser) want(t token.Token) bool {
	return p.parseExpected(t, nil, true)
}

func (p *Parser) parseExpected(kind token.Token, diagnosticMessage *diagnostic.Message, shouldAdvance bool) bool {
	if p.token() == kind {
		if shouldAdvance {
			p.nextToken()
		}
		return true
	}

	// Report specific message if provided with one.  Otherwise, report generic fallback message.
	if diagnosticMessage != nil {
		p.errorAtCurrentToken(diagnosticMessage)
	} else {
		p.errorAtCurrentToken(diagnostic.M_0_expected, kind.ToString())
	}
	return false
}

func (p *Parser) got(t token.Token) bool {
	if p.token() == t {
		p.nextToken()
		return true
	}
	return false
}

func (p *Parser) gotToken(t token.Token) *ast.TokenNode {
	if p.token() == t {
		return p.parseToken()
	}
	return nil
}

func (p *Parser) wantToken(t token.Token, reportAtCurrentPosition bool, diagnosticMessage *diagnostic.Message, args ...interface{}) *ast.TokenNode {
	if t := p.gotToken(t); t != nil {
		return t
	}

	if reportAtCurrentPosition {
		p.errorAtPosition(p.startPos(), 0, diagnosticMessage, args)
	} else {
		p.errorAtCurrentToken(diagnosticMessage, args)
	}

	// Missing token (pos == end)
	var node = new(ast.TokenNode)
	p.fillMissPos(node)
	return node
}

func (p *Parser) canParseSemicolon() bool {
	// If there'scanner a real semicolon, then we can always parse it out.
	if p.token() == token.Semicolon {
		return true
	}

	// We can parse out an optional semicolon in ASI cases in the following cases.
	return p.token() == token.CloseBrace || p.token() == token.EndOfFile || p.scanner.HasPrecedingLineBreak()
}

func (p *Parser) parseSemicolon() bool {
	if p.canParseSemicolon() {
		if p.token() == token.Semicolon {
			// consume the semicolon if it was explicitly provided.
			p.nextToken()
		}
		return true
	} else {
		return p.parseExpected(token.Semicolon, nil, true)
	}
}

// An identifier that starts with two underscores has an extra underscore character prepended to it to avoid issues
// with magic property names like '__proto__'. The 'identifiers' object is used to share a single string instance for
// each identifier in order to reduce memory consumption.
func (p *Parser) createIdentifier(isIdentifier bool, diagnosticMessage *diagnostic.Message) *ast.Identifier {
	if isIdentifier {
		p.identifierCount ++
		var node = new(ast.Identifier)
		p.scope(node, func() {
			node.OriginalToken = p.token()
			node.Value = p.scanner.GetTokenValue()
			p.nextToken()
		})
		return node
	}

	if diagnosticMessage == nil {
		diagnosticMessage = diagnostic.M_Identifier_expected
	}
	p.errorAtCurrentToken(diagnosticMessage)

	var node = new(ast.Identifier)
	p.fillMissPos(node)
	return node
}

func (p *Parser) parseIdentifier(diagnosticMessage *diagnostic.Message) *ast.Identifier {
	return p.createIdentifier(p.isIdentifier(), diagnosticMessage)
}

func (p *Parser) parseContextualModifier(t token.Token) bool {
	return p.token() == t && p.tryParseBool(p.nextTokenCanFollowModifier)
}

func (p *Parser) nextTokenIsOnSameLineAndCanFollowModifier() bool {
	p.nextToken()
	if p.scanner.HasPrecedingLineBreak() {
		return false
	}
	return p.canFollowModifier()
}

func (p *Parser) nextTokenCanFollowModifier() bool {
	if p.token() == token.StaticKeyword {
		p.nextToken()
		return p.canFollowModifier()
	}

	return p.nextTokenIsOnSameLineAndCanFollowModifier()
}

func (p *Parser) parseAnyContextualModifier() bool {
	return p.token().IsModifier() && p.tryParseBool(p.nextTokenCanFollowModifier)
}

func (p *Parser) canFollowModifier() bool {
	return p.token() == token.ClassKeyword ||
		p.token() == token.EnumKeyword ||
		p.token() == token.AnnotationKeyword ||
		p.token() == token.VarKeyword ||
		p.token() == token.FunctionKeyword ||
		p.token() == token.DotDotDot ||
		p.isIdentifier() ||
		p.token().IsModifier()
}

func (p *Parser) nextTokenIsClassOrFunction() bool {
	p.nextToken()
	return p.token() == token.ClassKeyword || p.token() == token.FunctionKeyword
}

// True if positioned at the start of a list element
func (p *Parser) isListElement(context parsingContext, inErrorRecovery bool) bool {
	switch context {
	case pcSourceElements:
		return !(p.token() == token.Semicolon && inErrorRecovery) && p.isStartOfTopLevelDeclaration()
	case pcImportDeclarations:
		return p.token() == token.ImportKeyword
	case pcBlockStatements,
		pcSwitchClauseStatements:
		// If we're in error recovery, then we don't want to treat ';' as an empty statement.
		// The problem is that ';' can show up in far too many contexts, and if we see one
		// and assume it's a statement, then we may bail out inappropriately from whatever
		// we're parsing.  For example, if we have a semicolon in the middle of a class, then
		// we really don't want to assume the class is over and we're on a statement in the
		// outer module.  We just want to consume and move on.
		return !(p.token() == token.Semicolon && inErrorRecovery) && p.isStartOfStatement()
	case pcSwitchClauses:
		return p.token() == token.CaseKeyword || p.token() == token.DefaultKeyword
	case pcCatchClauses:
		return p.token() == token.CatchKeyword
	case pcTypeMembers:
		return p.lookAheadBool(p.isTypeMemberStart) // || (p.token() == token.SemicolonToken && !inErrorRecovery)
	case pcClassMembers:
		return p.lookAheadBool(p.isClassMemberStart) // || (p.token() == token.SemicolonToken && !inErrorRecovery)
	case pcEnumMembers:
		// Include open bracket computed properties. This technically also lets in indexers,
		// which would be a candidate for improved error reporting.
		return p.lookAheadBool(p.isEnumMemberStart)
	case pcThrows:
		return p.isStartOfType()
	case pcHeritageClauseElement:
		return p.isStartOfType()
	case pcHeritageClauses:
		return p.isHeritageClause()
	case pcVariableDeclarations,
		pcTypeParameters:
		return p.isIdentifier()
	case pcArgumentExpressions:
		return p.isStartOfExpression()
	case pcNamedAssignmentExpressions:
		return p.isStartOfLeftHandSideExpression()
	case pcArrayLiteralMembers:
		return p.token() == token.Comma || p.isStartOfExpression()
	case pcParameters:
		return p.isStartOfParameter()
	case pcLambdaParameters:
		return p.isStartOfParameter()
	case pcTypeArguments:
		return p.isStartOfType() || p.token() == token.Question
	case pcDocumentTypeList:
		return p.isStartOfType() || p.token() == token.DotDotDot
	}

	panic("Non-exhaustive case in 'isListElement'.")
}

func (p *Parser) nextTokenIsIdentifier() bool {
	p.nextToken()
	return p.isIdentifier()
}

func (p *Parser) nextTokenIsIdentifierOrKeyword() bool {
	p.nextToken()
	return scanner.TokenIsIdentifierOrKeyword(p.token())
}

func (p *Parser) nextTokenIsStartOfExpression() bool {
	p.nextToken()
	return p.isStartOfExpression()
}

// True if positioned at a list terminator
func (p *Parser) isListTerminator(kind parsingContext) bool {
	if p.token() == token.EndOfFile {
		// Being at the end of the sourceFile ends all lists.
		return true
	}

	switch kind {
	case pcBlockStatements,
		pcSwitchClauses,
		pcTypeMembers,
		pcClassMembers:
		return p.token() == token.CloseBrace
	case pcEnumMembers:
		return p.canParseSemicolon()
	case pcThrows:
		return p.canParseSemicolon() ||
			p.token() == token.OpenBrace
	case pcSwitchClauseStatements:
		return p.token() == token.CloseBrace ||
			p.token() == token.CaseKeyword ||
			p.token() == token.DefaultKeyword
	case pcHeritageClauseElement:
		return p.token() == token.OpenBrace ||
			p.token() == token.ExtendsKeyword ||
			p.token() == token.ImplementsKeyword
	case pcCatchClauses:
		return p.token() != token.CatchKeyword
	case pcHeritageClauses:
		return p.token() == token.OpenBrace ||
			p.token() == token.CloseBrace
	case pcVariableDeclarations:
		return p.isVariableDeclaratorListTerminator()
	case pcTypeParameters:
		// Tokens other than '>' are here for better error recovery
		return p.token() == token.GreaterThan ||
			p.token() == token.OpenParen ||
			p.token() == token.OpenBrace ||
			p.token() == token.ExtendsKeyword ||
			p.token() == token.ImplementsKeyword
	case pcArgumentExpressions:
		// Tokens other than ')' are here for better error recovery
		return p.token() == token.CloseParen ||
			p.token() == token.Semicolon
	case pcNamedAssignmentExpressions:
		return p.token() == token.CloseParen
	case pcArrayLiteralMembers:
		return p.token() == token.CloseBrace
	case pcParameters:
		// Tokens other than ')' and ']' (the latter for index signatures) are here for better error recovery
		return p.token() == token.CloseParen ||
			p.token() == token.CloseBracket ||
			p.token() == token.OpenBrace
	case pcLambdaParameters:
		return p.token() != token.Comma
	case pcTypeArguments:
		// Tokens other than '>' are here for better error recovery
		return p.token() == token.GreaterThan ||
			p.token() == token.OpenParen ||
			// When parsing the document type list, prevent transitional parsing
			p.token() == token.NewLineTrivia
	case pcImportDeclarations:
		return p.token() != token.ImportKeyword
	case pcDocumentTypeList:
		return p.token() == token.CloseParen ||
			// 'NewLineTrivia' is only possible when parsing the document type
			p.token() == token.NewLineTrivia
	}
	return false
}

func (p *Parser) isVariableDeclaratorListTerminator() bool {
	// At for of statement
	if p.token() == token.OfKeyword {
		return true
	}

	// If we can consume a semicolon (either explicitly, or with ASI), then consider us done
	// with parsing the list of  variable declarators.
	if p.canParseSemicolon() {
		return true
	}

	// ERROR RECOVERY TWEAK:
	// For better error recovery, if we see an '=>' then we just stop immediately.  We've got an
	// arrow function here and it'scanner going to be very unlikely that we'll resynchronize and get
	// another variable declaration.
	if p.token() == token.EqualsGreaterThan {
		return true
	}

	// Keep trying to parse out variable declarators.
	return false
}

// Parses a list of elements
func (p *Parser) parseList(kind parsingContext, parseElement func()) {
	saveParsingCtx := p.parsingCtx
	p.parsingCtx |= 1 << kind

	for !p.isListTerminator(kind) {
		if p.isListElement(kind, false) {
			parseElement()
			continue
		}

		if p.reportErrorAndMoveToNextToken(kind) {
			break
		}
	}

	p.parsingCtx = saveParsingCtx
}

func (p *Parser) parseListElement(_ parsingContext, parseElement func() *ast.Node) *ast.Node {
	return parseElement()
}

// Returns true if we should abort parsing.
func (p *Parser) reportErrorAndMoveToNextToken(kind parsingContext) bool {
	p.errorAtCurrentToken(parsingContextErrors(kind))
	p.nextToken()
	return false
}

func parsingContextErrors(context parsingContext) *diagnostic.Message {
	switch context {
	case pcImportDeclarations:
		return diagnostic.M_Declaration_expected
	case pcSourceElements:
		return diagnostic.M_Declaration_expected
	case pcBlockStatements:
		return diagnostic.M_Declaration_or_statement_expected
	case pcSwitchClauses:
		return diagnostic.M_case_or_default_expected
	case pcSwitchClauseStatements:
		return diagnostic.M_Statement_expected
	case pcTypeMembers:
		return diagnostic.M_Property_or_signature_expected
	case pcClassMembers:
		return diagnostic.M_Property_or_signature_expected
	case pcEnumMembers:
		return diagnostic.M_Enum_member_expected
	case pcArgumentExpressions:
		return diagnostic.M_Argument_expression_expected
	case pcArrayLiteralMembers:
		return diagnostic.M_Expression_or_comma_expected
	case pcParameters:
		return diagnostic.M_Parameter_declaration_expected
	case pcTypeParameters:
		return diagnostic.M_Type_parameter_declaration_expected
	case pcTypeArguments:
		return diagnostic.M_Type_argument_expected
	case pcHeritageClauses:
		return diagnostic.M_Type_reference_expected
	case pcVariableDeclarations:
		return diagnostic.M_Variable_declaration_expected
	case pcNamedAssignmentExpressions:
		return diagnostic.M_Expression_or_comma_expected
	case pcThrows:
		return diagnostic.M_Identifier_expected
	case pcHeritageClauseElement:
		return diagnostic.M_Type_expected
	case pcDocumentTypeList:
		return diagnostic.M_Type_expected
	}

	panic(fmt.Sprintf("ParsingContext(%d) kind is unknown:", context))
}

// Parses a comma-delimited list of elements
func (p *Parser) parseDelimitedList(kind parsingContext, parseElement func(), allowTrailingComma bool) {
	var saveParsingContext = p.parsingCtx
	p.parsingCtx |= 1 << kind

	var commaStart = -1 // Meaning the previous token was not a comma
	for true {
		if p.isListElement(kind, false) {
			parseElement()
			commaStart = p.scanner.GetTokenPos()
			if p.got(token.Comma) {
				continue
			}

			commaStart = -1 // Back to the state where the last token was not a comma
			if p.isListTerminator(kind) {
				break
			}

			// We didn't get a comma, and the list wasn't terminated, explicitly parse
			// out a comma so we give a good error message.
			p.parseExpected(token.Comma, nil, true)

			continue
		}

		if p.isListTerminator(kind) {
			break
		}

		if p.reportErrorAndMoveToNextToken(kind) {
			break
		}
	}

	// Recording the trailing comma is deliberately done after the previous
	// loop, and not just if we see a list terminator. This is because the list
	// may have ended incorrectly, but it is still important to know if there
	// was a trailing comma.
	// Check if the last token was a comma.
	if commaStart >= 0 && !allowTrailingComma {
		p.errorAtCurrentToken(diagnostic.M_Trailing_comma_not_allowed)
	}

	p.parsingCtx = saveParsingContext
}

// The allowReservedWords parameter controls whether reserved words are permitted after the first dot
func (p *Parser) parseEntityName(diagnosticMessage *diagnostic.Message) ast.Node {
	var entity ast.Expression = p.parseIdentifier(diagnosticMessage)
	for p.got(token.Dot) {
		var node = new(ast.QualifiedName)
		node.SetPos(entity.Pos())
		node.Left = entity
		node.Right = p.parseRightSideOfDot()
		node.SetEnd(p.startPos())
		entity = node
	}
	return entity
}

func (p *Parser) parseRightSideOfDot() *ast.Identifier {
	// Technically a keyword is valid here as all identifiers and keywords are identifier names.
	// However, often we'll encounter this in error situations when the identifier or keyword
	// is actually starting another valid construct.
	//
	// So, we check for the following specific case:
	//
	//      name.
	//      identifierOrKeyword identifierNameOrKeyword
	//
	// Note: the newlines are important here.  For example, if that above code
	// were rewritten into:
	//
	//      name.identifierOrKeyword
	//      identifierNameOrKeyword
	//
	// Then we would consider it valid.  That's because ASI would take effect and
	// the code would be implicitly: "name.identifierOrKeyword; identifierNameOrKeyword".
	// In the first case though, ASI will not take effect because there is not a
	// line terminator after the identifier or keyword.
	if p.scanner.HasPrecedingLineBreak() && scanner.TokenIsIdentifierOrKeyword(p.token()) {
		var matchesPattern = p.lookAheadBool(p.nextTokenIsIdentifierOrKeywordOnSameLine)

		if matchesPattern {
			// Report that we need an identifier.  However, report it right after the dot,
			// and not on the next token.  This is because the next token might actually
			// be an identifier and the error would be quite confusing.
			var node = new(ast.Identifier)
			node.SetPos(p.startPos())
			node.SetEnd(p.startPos())
			p.errorAtPosition(p.startPos(), 0, diagnostic.M_Identifier_expected)
			return node
		}
	}

	return p.parseIdentifier(nil)
}

func (p *Parser) parseLiteralExpression() *ast.LiteralExpression {
	return p.parseLiteralExpressionRest(p.token())
}

func (p *Parser) parseLiteralExpressionRest(kind token.Token) *ast.LiteralExpression {
	var node = new(ast.LiteralExpression)
	p.scope(node, func() {
		node.Token = kind
		node.Value = p.scanner.GetTokenValue()
		p.nextToken()
	})
	return node
}

// TYPES

func (p *Parser) parseTypeReference() *ast.TypeReference {
	var fullStart = p.getNodePos()
	var annotations = p.parseAnnotations()
	return p.parseTypeReferenceRest(fullStart, annotations)
}

func (p *Parser) parseTypeArgumentList() *ast.TypeList {
	if p.want(token.LessThan) {
		var list = new(ast.TypeList)
		p.listScope(list, func() {
			p.parseDelimitedList(pcTypeArguments, func() {
				list.Add(p.parseTypeArgument())
			}, false)
		})
		p.want(token.GreaterThan)
		return list
	}
	return nil
}

func (p *Parser) parseTypeReferenceRest(fullStart int, annotations *ast.NodeList) *ast.TypeReference {
	var node = new(ast.TypeReference)
	p.scope(node, func() {
		var typeName = p.parseEntityName(diagnostic.M_Type_expected)
		node.Annotations = annotations
		node.Name = typeName
		if !p.scanner.HasPrecedingLineBreak() && p.token() == token.LessThan {
			node.TypeArguments = p.parseTypeArgumentList()
		}
	}, fullStart)
	return node
}

func (p *Parser) parseAnonymousClassDeclaration() *ast.BodyDeclaration {
	var node = new(ast.BodyDeclaration)
	p.scope(node, func() {
		if p.want(token.OpenBrace) {
			node.Members = p.parseTypeMembers()
			p.want(token.CloseBrace)
		}
	})
	return node
}

func (p *Parser) parseTypeParameter() *ast.TypeParameterDeclaration {
	var node = new(ast.TypeParameterDeclaration)
	p.scope(node, func() {
		node.Name = p.parseIdentifier(nil)
		if p.got(token.ExtendsKeyword) {
			node.Constraint = p.parseType()
		}
	})
	return node
}

func (p *Parser) parseTypeParameters() *ast.DeclarationList {
	if p.token() == token.LessThan {
		p.want(token.LessThan)
		var list = new(ast.DeclarationList)
		p.listScope(list, func() {
			p.parseDelimitedList(pcTypeParameters, func() {
				list.Add(p.parseTypeParameter())
			}, false)
		})
		p.want(token.GreaterThan)
		return list
	}
	return nil
}

func (p *Parser) isStartOfParameter() bool {
	return p.token() == token.DotDotDot ||
		p.isIdentifier() ||
		p.token().IsModifier() ||
		p.token() == token.At ||
		p.token() == token.ThisKeyword
}

func (p *Parser) parseParameter() *ast.ParameterDeclaration {
	var node = new(ast.ParameterDeclaration)
	p.scope(node, func() {
		node.Annotations = p.parseAnnotations()
		node.Modifiers = p.parseModifiers()
		node.Dots = p.gotToken(token.DotDotDot)
		node.Name = p.parseIdentifier(nil)
		node.Type = p.parseTypeAnnotation()
	})
	p.addDocument(node)
	return node
}

func (p *Parser) fillSignature(requireCompleteParameterList bool, decl ast.Declaration) {
	var typeParams = p.parseTypeParameters()
	var params = p.parseParameterList(requireCompleteParameterList)

	var tpe ast.Type
	if !p.canParseSemicolon() && p.got(token.Colon) {
		tpe = p.parseType()
	}

	var throws *ast.TypeList
	if p.got(token.ThrowsKeyword) {
		var list = new(ast.TypeList)
		p.listScope(list, func() {
			p.parseDelimitedList(pcThrows, func() {
				list.Add(p.parseType())
			}, false)
		})
		throws = list
	}

	switch n := decl.(type) {
	case *ast.ConstructorDeclaration:
		n.TypeParameters = typeParams
		n.Parameters = params
		n.Throws = throws
		n.Type = tpe
	case *ast.MethodDeclaration:
		n.TypeParameters = typeParams
		n.Parameters = params
		n.Throws = throws
		n.Type = tpe
	default:
		panic("fillSignature declaration is not legal")
	}
}

// Because we use this for index signatures as well, we sometimes use
// parentheses, and sometimes use brackets.
func (p *Parser) parseParameterList(requireCompleteParameterList bool) *ast.DeclarationList {
	if p.want(token.OpenParen) {
		var list = new(ast.DeclarationList)
		p.listScope(list, func() {
			p.parseDelimitedList(pcParameters, func() {
				list.Add(p.parseParameter())
			}, false)
		})
		if !p.want(token.CloseParen) && requireCompleteParameterList {
			// Caller insisted that we had to end with a )   We didn't.  So just return
			// undefined here.
			return nil
		}

		return list
	}

	// We didn't even have an open paren.  If the caller requires a complete parameter list,
	// we definitely can't provide that.  However, if they're ok with an incomplete one,
	// then just return an empty set of parameters.
	if requireCompleteParameterList {
		return nil
	}


	var list = new(ast.DeclarationList)
	p.fillMissPos(list)
	return list
}

func (p *Parser) parseDocumentType() ast.Type {
	p.scanner.SetInDocument(true)
	var tpe = p.parseType()
	p.scanner.SetInDocument(false)
	return tpe
}

func (p *Parser) parseDocumentTypeDeclaration() *ast.DocumentTypeDeclaration {
	var node = new(ast.DocumentTypeDeclaration)
	p.scope(node, func() {
		if p.token() == token.DotDotDot {
			node.Dots = p.parseToken()
		}
		node.Type = p.parseType()
	})
	return node
}

func (p *Parser) parseDocumentTypeList() *ast.DeclarationList {
	p.scanner.SetInDocument(true)
	var list = new(ast.DeclarationList)
	p.listScope(list, func() {
		p.listScope(list, func() {
			p.parseDelimitedList(pcDocumentTypeList, func() {
				list.Add(p.parseDocumentTypeDeclaration())
			}, false)
		})
	})
	p.scanner.SetInDocument(false)
	return list
}

func (p *Parser) parseTypeMemberSemicolon() {
	// We allow type members to be separated by commas or (possibly ASI) semicolons.
	// First check if it was a comma.  If so, we're done with the member.
	if p.got(token.Comma) {
		return
	}

	// Didn't have a comma.  We must have a (possible ASI) semicolon.
	p.parseSemicolon()
}

func (p *Parser) isTypeMemberModifier(idToken token.Token) bool {
	switch idToken {
	case token.PublicKeyword,
		token.PrivateKeyword,
		token.ProtectedKeyword,
		token.StaticKeyword:
		return true
	default:
		return false
	}
}

func (p *Parser) isTypeMemberStart() bool {
	if p.token() == token.At {
		return true
	}

	// Eat up all modifiers, but hold on to the last one in case it is actually an identifier.
	for p.token().IsModifier() {
		// If the idToken is a class modifier (protected, private, public, and static), it is
		// certain that we are starting to parse class member. This allows better error recovery
		// Example:
		//      public foo() ...     // true
		//      public @dec blah ... // true; we will then report an error later
		//      export public ...    // true; we will then report an error later
		if p.isTypeMemberModifier(p.token()) {
			if !p.lookAheadBool(p.parseAnyContextualModifier) {
				return false
			}

			return true
		}
		p.nextToken()
	}

	switch p.token() {
	case token.ClassKeyword,
		token.InterfaceKeyword,
		token.EnumKeyword,
		token.AnnotationKeyword,
		token.FunctionKeyword,
		token.VarKeyword:
		return true
	}

	return false
}

func (p *Parser) isClassMemberStart() bool {
	//var idToken = token.Unknown
	if p.token() == token.At {
		return true
	}

	// Eat up all modifiers, but hold on to the last one in case it is actually an identifier.
	for p.token().IsModifier() {
		// If the idToken is a class modifier (protected, private, public, and static), it is
		// certain that we are starting to parse class member. This allows better error recovery
		// Example:
		//      public foo() ...     // true
		//      public @dec blah ... // true; we will then report an error later
		//      export public ...    // true; we will then report an error later
		if p.isTypeMemberModifier(p.token()) {
			if !p.lookAheadBool(p.parseAnyContextualModifier) {
				return false
			}

			return true
		}

		p.nextToken()
	}

	switch p.token() {
	case token.ClassKeyword,
		token.InterfaceKeyword,
		token.EnumKeyword,
		token.AnnotationKeyword,
		token.FunctionKeyword,
		token.VarKeyword,
		token.ConstructorKeyword:
		return true
	}

	return false
}

func (p *Parser) isEnumMemberStart() bool {
	p.parseAnnotations()
	p.parseModifiers()
	return p.isIdentifier()
}

func (p *Parser) isStartOfConstructSignature() bool {
	p.nextToken()
	return p.token() == token.OpenParen || p.token() == token.LessThan
}

func (p *Parser) parseIntrinsicType(fullStart int, annotation *ast.NodeList) *ast.IntrinsicType {
	var node = new(ast.IntrinsicType)
	p.scope(node, func() {
		node.Token = p.token()
		p.nextToken()
		node.Annotations = annotation
	}, fullStart)
	return node
}

func (p *Parser) parseNonArrayType() ast.Type {
	var fullStart = p.getNodePos()
	var annotations = p.parseAnnotations()

	switch p.token() {
	case token.BooleanKeyword,
		token.ByteKeyword,
		token.CharKeyword,
		token.ShortKeyword,
		token.IntKeyword,
		token.LongKeyword,
		token.FloatKeyword,
		token.DoubleKeyword:
		return p.parseIntrinsicType(fullStart, annotations)
	}
	return p.parseTypeReferenceRest(fullStart, annotations)
}

func (p *Parser) isStartOfType() bool {
	switch p.token() {
	case token.At,
		token.BooleanKeyword,
		token.ByteKeyword,
		token.CharKeyword,
		token.ShortKeyword,
		token.IntKeyword,
		token.LongKeyword,
		token.FloatKeyword,
		token.DoubleKeyword:
		return true
	default:
		return p.isIdentifier()
	}
}

func (p *Parser) parseArrayTypeOrHigher() ast.Type {
	var tpe = p.parseNonArrayType()
	for !p.scanner.HasPrecedingLineBreak() && p.got(token.OpenBracket) {
		var node = new(ast.ArrayType)
		p.scope(node, func() {
			node.Element = tpe
			if p.token() != token.CloseBracket && p.isStartOfExpression() {
				node.Dimension = p.parseExpression()
			}
			p.want(token.CloseBracket)
		}, tpe.Pos())
		tpe = node
	}
	return tpe
}

func (p *Parser) parseUnionOrIntersectionType(create func(fullStart int, types *ast.TypeList)ast.Type, parseConstituentType func () ast.Type, operator token.Token) ast.Type {
	p.got(operator)
	var tpe = parseConstituentType()
	if p.token() == operator {
		var types = new(ast.TypeList)
		types.Add(tpe)
		p.listScope(types, func() {
			for p.got(operator) {
				types.Add(parseConstituentType())
			}
		}, tpe.Pos())

		tpe = create(tpe.Pos(), types)
	}
	return tpe
}

func (p *Parser) parseIntersectionTypeOrHigher() ast.Type {
	return p.parseUnionOrIntersectionType(func(fullStart int, types *ast.TypeList) ast.Type {
		var node = new(ast.IntersectionType)
		p.scope(node, func() {
			node.Types = types
		}, fullStart)
		return node
	}, p.parseArrayTypeOrHigher, token.Ampersand)
}

func (p *Parser) parseUnionTypeOrHigher() ast.Type {
	return p.parseUnionOrIntersectionType(func(fullStart int, types *ast.TypeList) ast.Type {
		var node = new(ast.UnionType)
		p.scope(node, func() {
			node.Types = types
		}, fullStart)
		return node
	}, p.parseIntersectionTypeOrHigher, token.Bar)
}

func (p *Parser) parseType() ast.Type {
	var tpe ast.Type
	p.doOutsideOfContext(ast.TypeExcludesFlags, func() {
		tpe = p.parseUnionTypeOrHigher()
	})
	return tpe
}

func (p *Parser) parseToken() *ast.TokenNode {
	var node = new(ast.TokenNode)
	p.scope(node, func() {
		node.Token = p.token()
		p.nextToken()
	})
	return node
}

func (p *Parser) parseBoundsType() *ast.BoundsType {
	var node = new(ast.BoundsType)
	p.scope(node, func() {
		p.want(token.Question)
		if p.token() == token.ExtendsKeyword || p.token() == token.SuperKeyword {
			node.Bound = p.parseToken()
			node.Type = p.parseType()
		}
	})
	return node
}

func (p *Parser) parseTypeArgument() ast.Type {
	if p.token() == token.Question {
		return p.parseBoundsType()
	}
	return p.parseType()
}

func (p *Parser) parseTypeAnnotation() ast.Type {
	if p.got(token.Colon) {
		return p.parseType()
	}
	return nil
}

// EXPRESSIONS

func (p *Parser) isStartOfLeftHandSideExpression() bool {
	switch p.token() {
	case token.ThisKeyword,
		token.SuperKeyword,
		token.NullKeyword,
		token.TrueKeyword,
		token.FalseKeyword,
		token.CharLiteral,
		token.IntLiteral,
		token.LongLiteral,
		token.FloatLiteral,
		token.DoubleLiteral,
		token.StringLiteral,
		token.OpenParen,
		token.OpenBrace,
		token.NewKeyword,
		token.Slash,
		token.SlashEquals,
		token.Identifier:
		return true
	default:
		return p.isIdentifier()
	}
}

func (p *Parser) isStartOfExpression() bool {
	if p.isStartOfLeftHandSideExpression() {
		return true
	}

	switch p.token() {
	case token.Plus,
		token.Minus,
		token.Tilde,
		token.Exclamation,
		token.VoidKeyword,
		token.PlusPlus,
		token.MinusMinus,
		token.LessThan:
		return true
	default:
		// Error tolerance.  If we see the start of some binary operator, we consider
		// that the start of an expression.  That way we'll parse out a missing identifier,
		// give a good message about an identifier being missing, and then consume the
		// rest of the binary expression.
		if p.isBinaryOperator() && p.token() != token.AsKeyword {
			return true
		}

		return p.isIdentifier()
	}
}

func (p *Parser) isStartOfExpressionStatement() bool {
	// As per the grammar, none of '{' or 'function' or 'class' can start an expression statement.
	return p.token() != token.OpenBrace &&
		p.token() != token.FunctionKeyword &&
		p.token() != token.ClassKeyword &&
		p.token() != token.At &&
		p.isStartOfExpression()
}

func (p *Parser) parseExpression() ast.Expression {
	return p.parseAssignmentExpressionOrHigher()
}

func (p *Parser) parseCommaExpression() ast.Expression {
	var expr = p.parseAssignmentExpressionOrHigher()
	for operatorToken := p.gotToken(token.Comma); operatorToken != nil; {
		expr = p.makeBinaryExpression(expr, operatorToken, p.parseAssignmentExpressionOrHigher())
	}
	return expr
}

func (p *Parser) parseInitializer() ast.Expression {
	if p.token() != token.Equals {
		// It'scanner not uncommon during typing for the user to miss writing the '=' token.  Check if
		// there is no newline after the last token and if we're on an expression.  If so, parse
		// this as an equals-value clause with a missing equals.
		// NOTE There are two places where we allow equals-value clauses.  The first is in a
		// variable declarator.  The second is with a parameter.  For variable declarators
		// it'scanner more likely that a { would be a allowed (as an object literal).  While this
		// is also allowed for parameters, the risk is that we consume the { as an object
		// literal when it really will be for the block following the parameter.
		if p.scanner.HasPrecedingLineBreak() || !p.isStartOfExpression() {
			// preceding line break, open brace in a parameter (likely a function body) or current token is not an expression -
			// do not try to parse initializer
			return nil
		}
	}

	p.parseExpected(token.Equals, nil, true)
	return p.parseAssignmentExpressionOrHigher()
}

func (p *Parser) makeArrowExpr(decl *ast.ArrowFunctionDeclaration) *ast.ArrowExpression {
	var node = new(ast.ArrowExpression)
	p.scope(node, func() {
		node.Declaration = decl
	}, decl.Pos())
	return node
}

func (p *Parser) parseAssignmentExpressionOrHigher() ast.Expression {
	//  AssignmentExpression
	//      1) ConditionalExpression
	//      2) LeftHandSideExpression = AssignmentExpression
	//      3) LeftHandSideExpression AssignmentOperator AssignmentExpression
	//      4) ArrowFunction
	//
	// Note: for ease of implementation we treat productions '2' and '3' as the same thing.
	// (i.e. they're both BinaryExpressions with an assignment operator in it).

	// First, check if we have an arrow function (production '4') that starts with a parenthesized
	// parameter list. If we do, we must *not* recurse for productions 1, 2 or 3. An ArrowFunction is
	// not a  LeftHandSideExpression, nor does it start a ConditionalExpression.  So we are done
	// with AssignmentExpression if we see one.
	var arrowFuncDecl = p.tryParseParenthesizedArrowFunctionDeclaration()
	if arrowFuncDecl != nil {
		return p.makeArrowExpr(arrowFuncDecl)
	}

	// Now try to see if we're in production '1' '2' or '3'.  A conditional expression can
	// start with a LogicalOrExpression, while the assignment productions can only start with
	// LeftHandSideExpressions.
	//
	// So, first, we try to just parse out a BinaryExpression.  If we get something that is a
	// LeftHandSide or higher, then we can try to parse out the assignment expression part.
	// Otherwise, we try to parse out the conditional expression bit.  We want to allow any
	// binary expression here, so we pass in the 'lowest' precedence here so that it matches
	// and consumes anything.
	var expr = p.parseBinaryExpression(0)

	// To avoid a look-ahead, we did not handle the case of an arrow function with a single un-parenthesized
	// parameter ('x => ...') above. We handle it here by checking if the parsed expression was a single
	// identifier and the current token is an arrow.
	if ast.IsIdentifier(expr) && p.token() == token.EqualsGreaterThan {
		return p.makeArrowExpr(p.parseSimpleArrowFunctionDeclaration(expr.(*ast.Identifier)))
	}

	// Now see if we might be in cases '2' or '3'.
	// If the expression was a LHS expression, and we have an assignment operator, then
	// we're in '2' or '3'. Consume the assignment and return.
	//
	// Note: we call reScanGreaterToken so that we get an appropriately merged token
	// for cases like > > =  becoming >>=
	if !p.scanner.HasPrecedingLineBreak() &&
		ast.IsLeftHandSideExpression(expr) &&
		p.reScanGreaterToken().IsAssignmentOperator() {
		return p.makeBinaryExpression(expr, p.parseToken(), p.parseAssignmentExpressionOrHigher())
	}

	// It wasn't an assignment or a lambda.  This is a conditional expression:
	return p.parseConditionalExpression(expr)
}

func (p *Parser) nextTokenIsIdentifierOnSameLine() bool {
	p.nextToken()
	return !p.scanner.HasPrecedingLineBreak() && p.isIdentifier()
}

func (p *Parser) parseSimpleArrowFunctionDeclaration(name *ast.Identifier) *ast.ArrowFunctionDeclaration {
	if p.token() != token.EqualsGreaterThan {
		panic("parseSimpleArrowFunctionExpression should only have been called if we had a ->")
	}

	var node = new(ast.ArrowFunctionDeclaration)
	p.scope(node, func() {
		var param = new(ast.ParameterDeclaration)
		p.scope(param, func() {
			param.Name = name
		}, name.Pos())

		var params = new(ast.DeclarationList)
		p.listScope(params, func() {
			params.Add(param)
		}, param.Pos())

		node.Parameters = params
		node.Arrow = p.parseToken()
		node.Body = p.parseArrowExpressionBody()
	}, name.Pos())
	p.addDocument(node)
	return node
}

func (p *Parser) tryParseParenthesizedArrowFunctionDeclaration() *ast.ArrowFunctionDeclaration {
	var triState = p.isParenthesizedArrowFunctionDeclaration()
	if triState != nil && !*triState {
		// It's definitely not a parenthesized arrow function expression.
		return nil
	}

	// If we definitely have an arrow function, then we can just parse one, not requiring a
	// following => or { token. Otherwise, we *might* have an arrow function.  Try to parse
	// it out, but don't allow any ambiguity, and return 'undefined' if this could be an
	// expression instead.
	var arrowFunc *ast.ArrowFunctionDeclaration
	if triState != nil && *triState {
		arrowFunc = p.parseParenthesizedArrowFunctionDeclarationHead(true)
	} else {
		arrowFunc = p.tryParse(func() interface{} {
			return p.parsePossibleParenthesizedArrowFunctionDeclarationHead()
		}).(*ast.ArrowFunctionDeclaration)
	}

	if arrowFunc == nil {
		// Didn't appear to actually be a parenthesized arrow function.  Just bail out.
		return nil
	}

	// If we have an arrow, then try to parse the body. Even if not, try to parse if we
	// have an opening brace, just in case we're in an error state.
	var lastToken = p.token()
	arrowFunc.Arrow = p.wantToken(token.EqualsGreaterThan, false, diagnostic.M_0_expected, "=>")
	if lastToken == token.EqualsGreaterThan || lastToken == token.OpenBrace {
		arrowFunc.Body = p.parseArrowExpressionBody()
	} else {
		arrowFunc.Body = p.parseIdentifier(nil)
	}
	arrowFunc.SetEnd(p.startPos())
	p.addDocument(arrowFunc)
	return arrowFunc
}

//  True        -> We definitely expect a parenthesized arrow function here.
//  False       -> There *cannot* be a parenthesized arrow function here.
//  Nil         -> There *might* be a parenthesized arrow function here.
//                 Speculatively look ahead to be sure, and rollback if not.
func (p *Parser) isParenthesizedArrowFunctionDeclaration() *bool {
	if p.token() == token.OpenParen {
		return p.lookAhead(func() interface{} {
			return p.isParenthesizedArrowFunctionDeclarationWorker()
		}).(*bool)
	}

	if p.token() == token.EqualsGreaterThan {
		// ERROR RECOVERY TWEAK:
		// If we see a standalone => try to parse it as an arrow function expression as that's
		// likely what the user intended to write.
		return xbase.NewBool(true)
	}
	// Definitely not a parenthesized arrow function.
	return xbase.NewBool(false)
}

func (p *Parser) isParenthesizedArrowFunctionDeclarationWorker() *bool {

	var first = p.token()
	var second = p.nextToken()

	if first == token.OpenParen {
		if second == token.CloseParen {
			// Simple cases: "() =>", "(): ", and  "() {".
			// This is an arrow function with no parameters.
			// The last one is not actually an arrow function,
			// but this is probably what the user intended.
			var third = p.nextToken()
			switch third {
			case token.EqualsGreaterThan,
				token.Colon,
				token.OpenBrace:
				return xbase.NewBool(true)
			default:
				return xbase.NewBool(false)
			}
		}

		// Simple case: "(..."
		// This is an arrow function with a rest parameter.
		if second == token.DotDotDot {
			return xbase.NewBool(true)
		}

		// If we had "(" followed by something that's not an identifier,
		// then this definitely doesn't look like a lambda.
		// Note: we could be a little more lenient and allow
		// "(public" or "(private". These would not ever actually be allowed,
		// but we could provide a good error message instead of bailing out.
		if !p.isIdentifier() {
			return xbase.NewBool(false)
		}

		// If we have something like "(a:", then we must have a
		// type-annotated parameter in an arrow function expression.
		if p.nextToken() == token.Colon {
			return xbase.NewBool(true)
		}

		// This *could* be a parenthesized arrow function.
		// Return Unknown to let the caller know.
		return nil
	} else {
		if first != token.LessThan {
			panic("First must is less then token.")
		}

		// If we have "<" not followed by an identifier,
		// then this definitely is not an arrow function.
		if !p.isIdentifier() {
			return xbase.NewBool(false)
		}

		// This *could* be a parenthesized arrow function.
		return nil
	}
}

func (p *Parser) parsePossibleParenthesizedArrowFunctionDeclarationHead() *ast.ArrowFunctionDeclaration {
	return p.parseParenthesizedArrowFunctionDeclarationHead(false)
}

func (p *Parser) parseParenthesizedArrowFunctionDeclarationHead(allowAmbiguity bool) *ast.ArrowFunctionDeclaration {
	var node = new(ast.ArrowFunctionDeclaration)
	node.SetPos(p.startPos())

	// Arrow functions are never generators.
	//
	// If we're speculatively parsing a signature for a parenthesized arrow function, then
	// we have to have a complete parameter list.  Otherwise we might see something like
	// a => (b => c)
	// And think that "(b =>" was actually a parenthesized arrow function with a missing
	// close paren.

	node.TypeParameters = p.parseTypeParameters()
	node.Parameters = p.parseParameterList(!allowAmbiguity)

	// If we couldn't get parameters, we definitely could not parse out an arrow function.
	if node.Parameters == nil {
		return nil
	}

	// Parsing a signature isn't enough.
	// Parenthesized arrow signatures often look like other valid expressions.
	// For instance:
	//  - "(x = 10)" is an assignment expression parsed as a signature with a default parameter value.
	//  - "(x,y)" is a comma expression parsed as a signature with two parameters.
	//  - "a ? (b): c" will have "(b):" parsed as a signature with a return type annotation.
	//
	// So we need just a bit of lookahead to ensure that it can only be a signature.
	if !allowAmbiguity && p.token() != token.EqualsGreaterThan {
		// Returning undefined here will cause our caller to rewind to where we started from.
		return nil
	}

	return node
}

func (p *Parser) parseArrowExpressionBody() ast.Node {
	if p.token() == token.OpenBrace {
		return p.parseFunctionBlock(false, nil)
	}

	if p.token() != token.Semicolon &&
		p.token() != token.FunctionKeyword &&
		p.token() != token.ClassKeyword &&
		p.isStartOfStatement() &&
		!p.isStartOfExpressionStatement() {
		// Check if we got a plain statement (i.e. no expression-statements, no function/class expressions/declarations)
		//
		// Here we try to recover from a potential error situation in the case where the
		// user meant to supply a block. For example, if the user wrote:
		//
		//  a =>
		//      let v = 0;
		//  }
		//
		// they may be missing an open brace.  Check to see if that's the case so we can
		// try to recover better.  If we don't do this, then the next close curly we see may end
		// up preemptively closing the containing construct.
		//
		// Note: even when 'ignoreMissingOpenBrace' is passed as true, parseBody will still error.
		return p.parseFunctionBlock(true, nil)
	}

	return p.parseAssignmentExpressionOrHigher()
}

func (p *Parser) parseConditionalExpression(leftOperand ast.Expression) ast.Expression {
	// Note: we are passed in an expression which was produced from parseBinaryExpressionOrHigher.
	var questionToken = p.gotToken(token.Question)
	if questionToken == nil {
		return leftOperand
	}

	// Note: we explicitly 'allowIn' in the whenTrue part of the condition expression, and
	// we do not that for the 'whenFalse' part.
	var node = new(ast.ConditionalExpression)
	p.scope(node, func() {
		node.Condition = leftOperand
		node.QuestionTok = questionToken
		node.WhenTrue = p.parseAssignmentExpressionOrHigher()
		node.ColonTok = p.wantToken(token.Colon, false,
			diagnostic.M_0_expected, token.Colon.ToString())
		node.WhenFalse = p.parseAssignmentExpressionOrHigher()
	}, leftOperand.Pos())
	return node
}

func (p *Parser) parseBinaryExpression(precedence int) ast.Expression {
	var leftOperand = p.parseUnaryExpression()
	return p.parseBinaryExpressionRest(precedence, leftOperand)
}

func (p *Parser) parseBinaryExpressionRest(precedence int, leftOperand ast.Expression) ast.Expression {
	for true {
		// We either have a binary operator here, or we're finished.  We call
		// reScanGreaterToken so that we merge token sequences like > and = into >=

		p.reScanGreaterToken()
		var newPrecedence = p.getBinaryOperatorPrecedence()

		// Check the precedence to see if we should "take" this operator
		// - For left associative operator (all operator but **), consume the operator,
		//   recursively call the function below, and parse binaryExpression as a rightOperand
		//   of the caller if the new precedence of the operator is greater then or equal to the current precedence.
		//   For example:
		//      a - b - c
		//            ^token; leftOperand = b. Return b to the caller as a rightOperand
		//      a * b - c
		//            ^token; leftOperand = b. Return b to the caller as a rightOperand
		//      a - b * c
		//            ^token; leftOperand = b. Return b * c to the caller as a rightOperand
		var consumeCurrentOperator = newPrecedence > precedence

		if !consumeCurrentOperator {
			break
		}

		if p.token() == token.AsKeyword {
			// Make sure we *do* perform ASI for constructs like this:
			//    var x = foo
			//    as (Bar)
			// This should be parsed as an initialized variable, followed
			// by a function call to 'as' with the argument 'Bar'
			if p.scanner.HasPrecedingLineBreak() {
				break
			} else {
				p.nextToken()
				leftOperand = p.makeAsExpression(leftOperand, p.parseType())
			}
		} else {
			leftOperand = p.makeBinaryExpression(leftOperand, p.parseToken(), p.parseBinaryExpression(newPrecedence))
		}
	}

	return leftOperand
}

func (p *Parser) isBinaryOperator() bool {
	return p.getBinaryOperatorPrecedence() > 0
}

func (p *Parser) getBinaryOperatorPrecedence() int {
	switch p.token() {
	case token.BarBar:
		return 1
	case token.AmpersandAmpersand:
		return 2
	case token.Bar:
		return 3
	case token.Caret:
		return 4
	case token.Ampersand:
		return 5
	case token.EqualsEquals,
		token.ExclamationEquals:
		return 6
	case token.LessThan,
		token.GreaterThan,
		token.LessThanEquals,
		token.GreaterThanEquals,
		token.InstanceOfKeyword,
		token.AsKeyword:
		return 7
	case token.LessThanLessThan,
		token.GreaterThanGreaterThan,
		token.GreaterThanGreaterThanGreaterThan:
		return 8
	case token.Plus,
		token.Minus:
		return 9
	case token.Asterisk,
		token.Slash,
		token.Percent:
		return 10
	}

	// -1 is lower than all other precedences.  Returning it will cause binary expression
	// parsing to stop.
	return -1
}

func (p *Parser) makeBinaryExpression(left ast.Expression, operator *ast.TokenNode, right ast.Expression) *ast.BinaryExpression {
	var node = new(ast.BinaryExpression)
	p.scope(node, func() {
		node.Left = left
		node.Operator = operator
		node.Right = right
	}, left.Pos())
	return node
}

func (p *Parser) makeAsExpression(left ast.Expression, right ast.Type) *ast.AsExpression {
	var node = &ast.AsExpression{
		Expression: left,
		Type:       right,
	}
	node.SetPos(left.Pos())
	node.SetEnd(p.startPos())
	return node
}

func (p *Parser) parsePrefixUnaryExpression() *ast.PrefixUnaryExpression {
	var node = new(ast.PrefixUnaryExpression)
	p.scope(node, func() {
		node.Operator = p.parseToken()
		node.Operand = p.parseSimpleUnaryExpression()
	})
	return node
}

/**
 * Parse exponential expression
 *
 * ExponentiationExpression:
 *      1) UnaryExpression
 *
 */
func (p *Parser) parseUnaryExpression() ast.Expression {
	/**
	 * UpdateExpression:
	 *      1) LeftHandSideExpression[?Yield]
	 *      2) LeftHandSideExpression[?Yield][no LineTerminator here]++
	 *      3) LeftHandSideExpression[?Yield][no LineTerminator here]--
	 *      4) ++UnaryExpression[?Yield]
	 *      5) --UnaryExpression[?Yield]
	 */
	if p.isUpdateExpression() {
		return p.parseIncrementExpression()
	}

	/**
	 * UnaryExpression:
	 *      1) UpdateExpression
	 *      2) + UpdateExpression
	 *      3) - UpdateExpression
	 *      4) ~ UpdateExpression
	 *      5) ! UpdateExpression
	 */
	return p.parseSimpleUnaryExpression()
}

/**
 * Parse simple-unary expression or higher:
 *
 * UnaryExpression:
 *      1) UpdateExpression
 *      2) void UnaryExpression
 *      3) + UnaryExpression
 *      4) - UnaryExpression
 *      5) ~ UnaryExpression
 *      6) ! UnaryExpression
 *      7) [+Await] await UnaryExpression
 */
func (p *Parser) parseSimpleUnaryExpression() ast.Expression {
	switch p.token() {
	case token.Plus,
		token.Minus,
		token.Tilde,
		token.Exclamation:
		return p.parsePrefixUnaryExpression()
	default:
		return p.parseIncrementExpression()
	}
}

/**
 * Check if the current token can possibly be an increment expression.
 *
 * UpdateExpression:
 *      LeftHandSideExpression
 *      LeftHandSideExpression[no LineTerminator here]++
 *      LeftHandSideExpression[no LineTerminator here]--
 *      ++LeftHandSideExpression
 *      --LeftHandSideExpression
 */
func (p *Parser) isUpdateExpression() bool {
	// This function is called inside parseUnaryExpression to decide
	// whether to call parseSimpleUnaryExpression or call parseIncrementExpression directly
	switch p.token() {
	case token.Plus,
		token.Minus,
		token.Tilde,
		token.Exclamation,
		token.VoidKeyword:
		return false
	default:
		return true
	}
}

/**
 * Parse IncrementExpression. IncrementExpression is used instead of ES6's PostFixExpression.
 *
 * IncrementExpression:
 *      1) LeftHandSideExpression
 *      2) LeftHandSideExpression [[no LineTerminator here]]++
 *      3) LeftHandSideExpression [[no LineTerminator here]]--
 *      4) ++LeftHandSideExpression
 *      5) --LeftHandSideExpression
 * In TypeScript (2), (3) are parsed as PostfixUnaryExpression. (4), (5) are parsed as PrefixUnaryExpression
 */
func (p *Parser) parseIncrementExpression() ast.Expression {
	if p.token() == token.PlusPlus || p.token() == token.MinusMinus {
		var node = new(ast.PrefixUnaryExpression)
		p.scope(node, func() {
			node.Operator = p.parseToken()
			node.Operand = p.parseLeftHandSideExpressionOrHigher()
		})
		return node
	}

	var expr = p.parseLeftHandSideExpressionOrHigher()
	assertMsg(ast.IsLeftHandSideExpression(expr), "Here expression must is left hand side expression.")

	if (p.token() == token.PlusPlus || p.token() == token.MinusMinus) && !p.scanner.HasPrecedingLineBreak() {
		var node = new(ast.PostfixUnaryExpression)
		p.scope(node, func() {
			node.Operand = expr
			node.Operator = p.parseToken()
		}, expr.Pos())
		return node
	}

	return expr
}

func (p *Parser) parseLeftHandSideExpressionOrHigher() ast.Expression {
	// LeftHandSideExpression: See 11.2
	//      MemberExpression
	//      CallExpression
	//
	// See comment in parseMemberExpressionOrHigher on how we replaced NewExpression with
	// MemberExpression to make our lives easier.
	//
	// to best understand the below code, it's important to see how CallExpression expands
	// out into its own productions:
	//
	// CallExpression:
	//      MemberExpression Arguments
	//      CallExpression Arguments
	//      CallExpression[Expression]
	//      CallExpression.IdentifierName
	//      super   (   ArgumentList opt   )
	//      super.IdentifierName
	//
	// Because of the recursion in these calls, we need to bottom out first.  There are two
	// bottom out states we can run into.  Either we see 'super' which must start either of
	// the last two CallExpression productions.  Or we have a MemberExpression which either
	// completes the LeftHandSideExpression, or starts the beginning of the first four
	// CallExpression productions.
	var expression = p.parseMemberExpressionOrHigher()

	// Now, we *may* be complete.  However, we might have consumed the start of a
	// CallExpression.  As such, we need to consume the rest of it here to be complete.
	return p.parseCallExpressionRest(expression)
}

func (p *Parser) parseMemberExpressionOrHigher() ast.Expression {
	// Note: to make our lives simpler, we decompose the the NewExpression productions and
	// place ObjectCreationExpression and ArrowFunction into PrimaryExpression.
	// like so:
	//
	//   PrimaryExpression : See 11.1
	//      this
	//      Identifier
	//      Literal
	//      ArrayLiteralExpression
	//      ObjectLiteral
	//      (Expression)
	//      ArrowFunction
	//      new MemberExpression Arguments?
	//
	//   MemberExpression : See 11.2
	//      PrimaryExpression
	//      MemberExpression[Expression]
	//      MemberExpression.IdentifierName
	//
	//   CallExpression : See 11.2
	//      MemberExpression
	//      CallExpression Arguments
	//      CallExpression[Expression]
	//
	// Technically this is ambiguous.  i.e. CallExpression defines:
	//
	//   CallExpression:
	//      CallExpression Arguments
	//
	// If you see: "new Foo()"
	//
	// Then that could be treated as a single ObjectCreationExpression, or it could be
	// treated as the invocation of "new Foo".  We disambiguate that in code (to match
	// the original grammar) by making sure that if we see an ObjectCreationExpression
	// we always consume arguments if they are there. So we treat "new Foo()" as an
	// object creation only, and not at all as an invocation)  Another way to think
	// about this is that for every "new" that we see, we will consume an argument list if
	// it is there as part of the *associated* object creation ast.  Any additional
	// argument lists we see, will become invocation expressions.
	//
	// Because there are no other places in the grammar now that refer to ArrowFunction
	// or ObjectCreationExpression, it is safe to push down into the PrimaryExpression
	// production.
	//
	// Because CallExpression and MemberExpression are left recursive, we need to bottom out
	// of the recursion immediately.  So we parse out a primary expression to start with.
	var expression = p.parsePrimaryExpression()
	return p.parseMemberExpressionRest(expression)
}

func (p *Parser) parseThisExpression() *ast.ThisExpression {
	return p.parseThisExpressionRest(p.startPos(), nil)
}

func (p *Parser) parseThisExpressionRest(fullStart int, left ast.Expression) *ast.ThisExpression {
	var node = new(ast.ThisExpression)
	node.SetPos(fullStart)
	p.want(token.ThisKeyword)
	node.Expression = left
	node.SetEnd(p.startPos())
	return node
}

func (p *Parser) parseSuperExpression() *ast.SuperExpression {
	return p.parseSuperExpressionRest(p.startPos(), nil)
}

func (p *Parser) parseSuperExpressionRest(fullStart int, left ast.Expression) *ast.SuperExpression {
	var node = new(ast.SuperExpression)
	node.SetPos(fullStart)
	p.want(token.SuperKeyword)
	node.Expression = left
	node.SetEnd(p.startPos())
	if !(p.token() == token.OpenParen || p.token() == token.Dot) {
		p.errorAtCurrentToken(diagnostic.M_super_must_be_followed_by_an_argument_list_or_member_access)
	}
	return node
}

func (p *Parser) parseClassExpressionRest(fullStart int, expr ast.Expression) *ast.ClassExpression {
	var node = new(ast.ClassExpression)
	p.scope(node, func() {
		p.want(token.ClassKeyword)
		node.Type = expr
	}, fullStart)
	return node
}

func (p *Parser) parseMemberExpressionRest(expr ast.Expression) ast.Expression {
	for true {
		// Must on same line
		if p.scanner.HasPrecedingLineBreak() {
			break
		}

		var dotToken = p.gotToken(token.Dot)
		if dotToken != nil {
			switch p.token() {
			case token.NewKeyword:
				var call = p.parseConstructExpressionRest(expr.Pos(), expr)
				expr = p.parseNewExpressionRest(expr.Pos(), call)
				continue
			case token.ThisKeyword:
				expr =  p.parseThisExpressionRest(expr.Pos(), expr)
				continue
			case token.SuperKeyword:
				expr =  p.parseSuperExpressionRest(expr.Pos(), expr)
				continue
			case token.ClassKeyword:
				if ast.IsEntityName(expr) {
					expr = p.parseClassExpressionRest(expr.Pos(), expr)
					continue
				}
				fallthrough
			default:
				var node = new(ast.SelectorExpression)
				p.scope(node, func() {
					node.Expression = expr
					node.Name = p.parseRightSideOfDot()
				}, expr.Pos())
				expr = node
				continue
			}
		}

		if p.got(token.OpenBracket) {
			var node = new(ast.ElementAccessExpression)
			p.scope(node, func() {
				node.Expression = expr
				if p.token() != token.CloseBracket {
					node.ArgumentExpression = p.parseExpression()
				}
				p.want(token.CloseBracket)
			}, expr.Pos())

			expr = node
			continue
		}

		break
	}

	return expr
}

func (p *Parser) parseCallExpressionRest(expr ast.Expression) ast.Expression {
	for true {
		// Must on same line
		if p.scanner.HasPrecedingLineBreak() {
			break
		}

		expr = p.parseMemberExpressionRest(expr)
		if p.token() == token.LessThan {
			// See if this is the start of a generic invocation.  If so, consume it and
			// keep checking for postfix expressions.  Otherwise, it's just a '<' that's
			// part of an arithmetic expression.  Break out so we consume it higher in the
			// stack.
			var typeArgs = p.parseTypeArgumentsInExpression()
			if typeArgs == nil {
				return expr
			}

			var callExpr = new(ast.CallExpression)
			p.scope(callExpr, func() {
				callExpr.Expression = expr
				callExpr.TypeArguments = typeArgs
				callExpr.Arguments = p.parseArgumentList()
			}, expr.Pos())
			expr = callExpr
			continue
		} else if p.token() == token.OpenParen {
			var callExpr = new(ast.CallExpression)
			p.scope(callExpr, func() {
				callExpr.Expression = expr
				callExpr.Arguments = p.parseArgumentList()
			}, expr.Pos())
			expr = callExpr
			continue
		}

		break
	}

	return expr
}

func (p *Parser) parseArgumentList() *ast.ExpressionList {
	if !p.want(token.OpenParen) {
		return nil
	}
	var list = new(ast.ExpressionList)
	p.listScope(list, func() {
		p.parseDelimitedList(pcArgumentExpressions, func() {
			list.Add(p.parseArgumentExpression())
		}, false)
	})

	p.want(token.CloseParen)
	return list
}

func (p *Parser) parseTypeArgumentsInExpression() *ast.TypeList{
	var parse = func() *ast.TypeList {
		if !p.got(token.LessThan) {
			return nil
		}

		var list = new(ast.TypeList)
		p.parseDelimitedList(pcTypeArguments, func() {
			list.Add(p.parseTypeArgument())
		}, false)
		if !p.want(token.GreaterThan) {
			// If it doesn't have the closing >  then it's definitely not an type argument list.
			return nil
		}

		// If we have a '<', then only parse this as a argument list if the type arguments
		// are complete and we have an open paren.  if we don't, rewind and return nothing.
		if list != nil && p.canFollowTypeArgumentsInExpression() {
			return list
		}
		return nil
	}

	return p.tryParse(func() interface{} {
		return parse()
	}).(*ast.TypeList)
}

func (p *Parser) canFollowTypeArgumentsInExpression() bool {
	switch p.token() {
	case token.OpenParen, // foo<x>(
		// this case are the only case where this token can legally follow a type argument
		// list.  So we definitely want to treat this as a type arg list.

		token.Dot,                     // foo<x>.
		token.CloseParen,              // foo<x>)
		token.CloseBracket,            // foo<x>]
		token.Colon,                   // foo<x>:
		token.Semicolon,               // foo<x>
		token.Question,                // foo<x>?
		token.EqualsEquals,            // foo<x> ==
		token.ExclamationEquals,       // foo<x> !=
		token.AmpersandAmpersand,      // foo<x> &&
		token.BarBar,                  // foo<x> ||
		token.Caret,                   // foo<x> ^
		token.Ampersand,               // foo<x> &
		token.Bar,                     // foo<x> |
		token.CloseBrace,              // foo<x> }
		token.EndOfFile:               // foo<x>
		// these cases can't legally follow a type arg list.  However, they're not legal
		// expressions either.  The user is probably in the middle of a generic type. So
		// treat it as such.
		return true

	case token.Comma, // foo<x>,
		token.OpenBrace: // foo<x> {
		// We don't want to treat these as type arguments.  Otherwise we'll parse this
		// as an invocation expression.  Instead, we want to parse out the expression
		// in isolation from the type arguments.

		return false
	default:
		// Anything else treat as an expression.
		return false
	}
}

func (p *Parser) parsePrimaryExpression() ast.Expression {
	switch p.token() {
	case token.IntLiteral,
		token.FloatLiteral,
		token.DoubleLiteral,
		token.LongLiteral,
		token.CharLiteral,
		token.StringLiteral,
		token.NullKeyword,
		token.TrueKeyword,
		token.FalseKeyword:
		return p.parseLiteralExpression()
	case token.ThisKeyword:
		return p.parseThisExpression()
	case token.SuperKeyword:
		return p.parseSuperExpression()
	case token.OpenParen:
		return p.parseParenthesizedExpression()
	case token.OpenBrace:
		return p.parseArrayLiteralExpression()
	case token.NewKeyword:
		return p.parseNewExpression()
	}

	return p.parseIdentifier(diagnostic.M_Expression_expected)
}

func (p *Parser) parseParenthesizedExpression() *ast.ParenthesizedExpression {
	var node = new(ast.ParenthesizedExpression)
	p.scope(node, func() {
		p.want(token.OpenParen)
		node.Expression = p.parseExpression()
		p.want(token.CloseParen)
	})
	return node
}

func (p *Parser) parseArgumentOrArrayLiteralElement() ast.Expression {
	if p.token() == token.Comma {
		var expr = new(ast.OmittedExpression)
		expr.SetPos(p.startPos())
		return expr
	} else {
		return p.parseAssignmentExpressionOrHigher()
	}
}

func (p *Parser) parseArgumentExpression() ast.Expression {
	return p.parseArgumentOrArrayLiteralElement()
}

func (p *Parser) parseArrayLiteralExpression() *ast.ArrayLiteralExpression {
	var node = new(ast.ArrayLiteralExpression)
	p.scope(node, func() {
		p.want(token.OpenBrace)
		var list = new(ast.ExpressionList)
		p.listScope(list, func() {
			p.parseDelimitedList(pcArrayLiteralMembers, func() {
				list.Add(p.parseArgumentOrArrayLiteralElement())
			}, false)
		})
		node.Elements = list
		p.want(token.CloseBrace)
	})
	return node
}

func (p *Parser) parseConstructExpressionRest(fullStart int, scope ast.Expression) ast.Expression {
	var node = new(ast.ConstructorExpression)
	p.scope(node, func() {
		node.Scope = scope
		p.want(token.NewKeyword)
		node.TypeName = p.parseEntityName(nil)
		if p.token() == token.LessThan {
			node.TypeArguments = p.parseTypeArgumentList()
		}

		node.Arguments = p.parseArgumentList()
	}, fullStart)

	return node
}

// Here not scope
func (p *Parser) parseNewExpression() ast.Expression {
	var fullStart = p.startPos()
	p.want(token.NewKeyword)
	var tpe = p.parseType()
	switch t := tpe.(type) {
	case *ast.ArrayType,
		*ast.IntrinsicType,
		*ast.UnionType,
		*ast.IntersectionType:
		var node = new(ast.NewArrayExpression)
		p.scope(node, func() {
			node.Type = tpe
			// Error report is placed in grammar check

			if p.token() == token.OpenBrace {
				node.Initializer = p.parseArrayLiteralExpression()
			}
		}, fullStart)
		return node

	case *ast.TypeReference:
		if p.token() == token.OpenBrace {
			var node = new(ast.QualifiedExpression)
			p.scope(node, func() {
				node.Type = t
			}, fullStart)
			return p.parseNewExpressionRest(fullStart, node)
		} else {
			var node = new(ast.ConstructorExpression)
			p.scope(node, func() {
				node.TypeName = t.Name
				node.TypeArguments = t.TypeArguments
				node.Arguments = p.parseArgumentList()
			}, fullStart)
			return p.parseNewExpressionRest(fullStart, node)
		}

	default:
		panic("Unknown type in parseNewExpression")
	}
}

func (p *Parser) parseNewExpressionRest(fullStart int, tpe ast.Expression) *ast.NewObjectExpression {
	var node = new(ast.NewObjectExpression)
	p.scope(node, func() {
		node.Type = tpe
		if p.token() == token.OpenBrace || ast.IsQualifiedExpression(tpe) {
			node.Body = p.parseAnonymousClassDeclaration()
		}
	}, fullStart)
	return node
}

// STATEMENTS

func (p *Parser) parseBlockStatement(ignoreMissingOpenBrace bool, diagnosticMessage *diagnostic.Message) *ast.BlockStatement {
	var node = new(ast.BlockStatement)
	p.scope(node, func() {
		// If not want, node is missing
		if p.parseExpected(token.OpenBrace, diagnosticMessage, true) || ignoreMissingOpenBrace {
			node.Statements = new(ast.StatementList)
			p.listScope(node.Statements, func() {
				p.parseList(pcBlockStatements, func() {
					node.Statements.Add(p.parseStatement())
				})
			})
			p.want(token.CloseBrace)
		}
	})

	return node
}

func (p *Parser) parseFunctionBlock(ignoreMissingOpenBrace bool, diagnosticMessage *diagnostic.Message) *ast.BlockStatement {
	return p.parseBlockStatement(ignoreMissingOpenBrace, diagnosticMessage)
}

func (p *Parser) initNode(node ast.Node) {
	p.nodeCount ++
}

func (p *Parser) finishNode(node ast.Node) {
}

func (p *Parser) scope(node ast.Node, f func(), pos ... int) {
	p.initNode(node)
	p.listScope(node, f, pos ... )
	p.finishNode(node)
}

func (p *Parser) listScope(node ast.TextRange, f func(), pos ... int) {
	if len(pos) > 0 {
		node.SetPos(pos[0])
	} else {
		node.SetPos(p.startPos())
	}

	f()

	if len(pos) > 1 {
		node.SetEnd(pos[1])
	} else {
		node.SetEnd(p.startPos())
	}
}

func (p *Parser) fillMissPos(node ast.TextRange) {
	node.SetPos(p.startPos())
	node.SetEnd(p.startPos())
}

func (p *Parser) parseEmptyStatement() *ast.EmptyStatement {
	var node = new(ast.EmptyStatement)
	p.scope(node, func() {
		p.want(token.Semicolon)
	})
	return node
}

func (p *Parser) parseIfStatement() *ast.IfStatement {
	var node = new(ast.IfStatement)
	p.scope(node, func() {
		p.want(token.IfKeyword)
		node.Condition = p.parseExpression()
		node.ThenStatement = p.parseBlockStatement(false, nil)
		if p.got(token.ElseKeyword) {
			if p.token() != token.IfKeyword {
				node.ElseStatement = p.parseBlockStatement(false, nil)
			} else {
				node.ElseStatement = p.parseStatement()
			}
		}
	})
	return node
}

func (p *Parser) parseDoStatement() *ast.DoStatement {
	var node = new(ast.DoStatement)
	p.scope(node, func() {
		p.want(token.DoKeyword)
		node.Body =p.parseBlockStatement(false, nil)
		p.want(token.WhileKeyword)
		node.Condition = p.parseExpression()
	})
	return node
}

func (p *Parser) parseWhileStatement() *ast.WhileStatement {
	var node = new(ast.WhileStatement)
	p.scope(node, func() {
		p.want(token.WhileKeyword)
		node.Condition = p.parseExpression()
		node.Body = p.parseBlockStatement(false, nil)
	})
	return node
}

func (p *Parser) parseForOrForOfStatement() ast.Statement {
	pos := p.getNodePos()
	p.want(token.ForKeyword)

	var initializer ast.Node
	if p.token() != token.Semicolon {
		if p.isStartOfVariableDeclaration() {
			initializer = p.parseVariableDeclaration()
		} else {
			initializer = p.parseCommaExpression()
		}
	}

	if ast.IsVariableDeclaration(initializer) && p.got(token.OfKeyword) {
		var node = new(ast.ForOfStatement)
		p.scope(node, func() {
			// Multiple expressions or variables are allowed in the initialization list of
			// the for in statement, anyway, an error will be reported in the final grammar check.
			node.VariableDeclaration = initializer.(*ast.VariableDeclaration)
			node.Expression = p.parseExpression()
			node.Body = p.parseBlockStatement(false, nil)
		}, pos)
		return node
	} else {
		var node = new(ast.ForStatement)
		p.scope(node, func() {
			node.Initializer = initializer
			p.want(token.Semicolon)
			if p.token() != token.Semicolon && p.token() != token.OpenBrace {
				node.Condition = p.parseExpression()
			}
			p.want(token.Semicolon)
			if p.token() != token.OpenBrace {
				node.Incrementor = p.parseCommaExpression()
			}
			node.Body = p.parseBlockStatement(false, nil)
		})
		return node
	}
}

func (p *Parser) parseBranchStatement() *ast.BranchStatement {
	if p.token() != token.BreakKeyword && p.token() != token.ContinueKeyword {
		panic(	fmt.Sprintf("'parseBranchStmt' token(%s) not legal", p.token().ToString()))
	}

	var node = new(ast.BranchStatement)
	p.scope(node, func() {
		node.Token = p.token()
		p.nextToken()
		if !p.canParseSemicolon() {
			node.Label = p.parseIdentifier(nil)
		}
		p.parseSemicolon()
	})
	return node
}

func (p *Parser) parseReturnStatement() *ast.ReturnStatement {
	var node = new(ast.ReturnStatement)
	p.scope(node, func() {
		p.want(token.ReturnKeyword)
		if !p.canParseSemicolon() {
			node.Expression = p.parseExpression()
		}
		p.parseSemicolon()
	})
	return node
}

func (p *Parser) parseCaseClause() *ast.CaseClause {
	if p.token() != token.CaseKeyword && p.token() != token.DefaultKeyword {
		panic(	fmt.Sprintf("'parseCaseClause' token(%s) not legal", p.token().ToString()))
	}

	var node = new(ast.CaseClause)
	p.scope(node, func() {
		node.Token = p.parseToken()
		if node.Token.Token == token.CaseKeyword {
			node.Expression = p.parseExpression()
		}
		p.want(token.Colon)

		node.Statements = new(ast.StatementList)
		p.listScope(node.Statements, func() {
			p.parseList(pcSwitchClauseStatements, func() {
				node.Statements.Add(p.parseStatement())
			})
		})
	})
	return node
}

func (p *Parser) parseSwitchStatement() *ast.SwitchStatement {
	var node = new(ast.SwitchStatement)
	p.scope(node, func() {
		p.want(token.SwitchKeyword)
		node.Expression = p.parseExpression()
		var block = new(ast.CaseBlock)
		p.scope(block, func() {
			p.want(token.OpenBrace)
			var list = new(ast.NodeList)
			p.listScope(list, func() {
				p.parseList(pcSwitchClauses, func() {
					list.Add(p.parseCaseClause())
				})
			})
			block.Clauses = list
			p.want(token.CloseBrace)
		})
		node.Body = block
	})
	return node
}

func (p *Parser) parseThrowStatement() *ast.ThrowStatement {
	var node = new(ast.ThrowStatement)
	p.scope(node, func() {
		p.want(token.ThrowKeyword)
		if !p.scanner.HasPrecedingLineBreak() {
			node.Expression = p.parseExpression()
		}
		p.parseSemicolon()
	})
	return node
}

// TODO: Review for error recovery
func (p *Parser) parseTryStatement() *ast.TryStatement {
	var node = new(ast.TryStatement)
	p.scope(node, func() {
		p.want(token.TryKeyword)
		node.TryBlock = p.parseBlockStatement(false, nil)
		if p.token() == token.CatchKeyword {
			var list = new(ast.NodeList)
			p.listScope(list, func() {
				p.parseList(pcCatchClauses, func() {
					list.Add(p.parseCatchClause())
				})
			})
			node.CatchClauses = list
		}

		if node.CatchClauses == nil || p.token() == token.FinallyKeyword {
			p.want(token.FinallyKeyword)
			node.FinallyBlock = p.parseBlockStatement(false, nil)
		}
	})

	return node
}

func (p *Parser) parseCatchClause() *ast.CatchClause {
	var node = new(ast.CatchClause)
	p.scope(node, func() {
		p.want(token.CatchKeyword)
		var variableDeclaration = new(ast.VariableDeclaration)
		p.scope(variableDeclaration, func() {
			variableDeclaration.Annotations = p.parseAnnotations()
			variableDeclaration.Modifiers = p.parseModifiers()
			variableDeclaration.Name = p.parseIdentifier(nil)
			variableDeclaration.Type = p.parseTypeAnnotation()
		})

		node.VariableDeclaration = variableDeclaration
		node.Block = p.parseBlockStatement(false, nil)
	})
	return node
}

func (p *Parser) parseExpressionOrLabeledStatement() ast.Statement {
	// Avoiding having to do the lookahead for a labeled statement by just trying to parse
	// out an expression, seeing if it is identifier and then seeing if it is followed by
	// a colon.
	var start = p.scanner.GetStartPos()
	var expr = p.parseExpression()

	if name, ok := expr.(*ast.Identifier); ok && p.got(token.Colon) {
		var node = new(ast.LabeledStatement)
		node.SetPos(start)
		node.Label = name
		node.Statement = p.parseStatement()
		node.SetEnd(p.startPos())
		p.addDocument(node)
		return node
	} else {
		var node = new(ast.ExpressionStatement)
		node.SetPos(start)
		node.Expression = expr
		p.parseSemicolon()
		node.SetEnd(p.startPos())
		p.addDocument(node)
		return node
	}
}

func (p *Parser) nextTokenIsIdentifierOrKeywordOnSameLine() bool {
	p.nextToken()
	return scanner.TokenIsIdentifierOrKeyword(p.token()) && !p.scanner.HasPrecedingLineBreak()
}

func (p *Parser) nextTokenIsFunctionKeywordOnSameLine() bool {
	p.nextToken()
	return p.token() == token.FunctionKeyword && !p.scanner.HasPrecedingLineBreak()
}

func (p *Parser) nextTokenIsOpenBraceOnSameLine() bool {
	p.nextToken()
	return p.token() == token.OpenBrace && !p.scanner.HasPrecedingLineBreak()
}

func (p *Parser) nextTokenIsIdentifierOrKeywordOrNumberOnSameLine() bool {
	p.nextToken()
	return scanner.TokenIsIdentifierOrKeyword(p.token()) || p.token() == token.IntLiteral && !p.scanner.HasPrecedingLineBreak()
}

func (p *Parser) isDeclaration() bool {
	for true {
		switch p.token() {
		case token.At,
			token.VarKeyword,
			token.ClassKeyword,
			token.EnumKeyword,
			token.InterfaceKeyword,
			token.AnnotationKeyword:
			return true

		case token.AbstractKeyword,
			token.PrivateKeyword,
			token.ProtectedKeyword,
			token.PublicKeyword:
			p.nextToken()
			// ASI takes effect for this modifier.
			if p.scanner.HasPrecedingLineBreak() {
				return false
			}
			continue
		case token.StaticKeyword,
			token.FinalKeyword:
			p.nextToken()
			continue
		default:
			return false
		}
	}
	panic("Not run to here")
}

func (p *Parser) isTopLevelDeclaration() bool {
	for true {
		switch p.token() {
		case token.At,
			token.ClassKeyword,
			token.EnumKeyword,
			token.AnnotationKeyword,
			token.InterfaceKeyword:
			return true

		case token.AbstractKeyword,
			token.PrivateKeyword,
			token.ProtectedKeyword,
			token.PublicKeyword,
			token.FinalKeyword:
			p.nextToken()
			// ASI takes effect for this modifier.
			if p.scanner.HasPrecedingLineBreak() {
				return false
			}
			continue
		case token.StaticKeyword:
			p.nextToken()
			continue
		default:
			return false
		}
	}
	panic("Not run to here")
}

func (p *Parser) isStartOfTopLevelDeclaration() bool {
	return p.lookAhead(func() interface{} {
		return p.isTopLevelDeclaration()
	}).(bool)
}

func (p *Parser) isStartOfDeclaration() bool {
	return p.lookAhead(func() interface{} {
		return p.isDeclaration()
	}).(bool)
}

func (p *Parser) isStartOfStatement() bool {
	switch p.token() {
	case token.At,
		token.Semicolon,
		token.OpenBrace,
		token.VarKeyword,
		token.ClassKeyword,
		token.EnumKeyword,
		token.IfKeyword,
		token.DoKeyword,
		token.WhileKeyword,
		token.ForKeyword,
		token.ContinueKeyword,
		token.BreakKeyword,
		token.ReturnKeyword,
		token.SwitchKeyword,
		token.ThrowKeyword,
		token.TryKeyword,
		// 'catch' and 'finally' do not actually indicate that the code is part of a statement,
		// however, we say they are here so that we may gracefully parse them and error later.
		token.CatchKeyword,
		token.FinallyKeyword:
		return true

	case token.InterfaceKeyword:
		// When these don't start a declaration, they're an identifier in an expression statement
		return true

	case token.PublicKeyword,
		token.PrivateKeyword,
		token.ProtectedKeyword,
		token.StaticKeyword:
		// When these don't start a declaration, they may be the start of a class member if an identifier
		// immediately follows. Otherwise they're an identifier in an expression statement.
		return p.isStartOfDeclaration() || !p.lookAheadBool(p.nextTokenIsIdentifierOrKeywordOnSameLine)

	default:
		return p.isStartOfExpression()
	}
}

func (p *Parser) nextTokenIsIdentifierOrStartOfDestructuring() bool {
	p.nextToken()
	return p.isIdentifier() || p.token() == token.OpenBrace || p.token() == token.OpenBracket
}

func (p *Parser) parseStatement() ast.Statement {
	switch p.token() {
	case token.Semicolon:
		return p.parseEmptyStatement()
	case token.OpenBrace:
		return p.parseBlockStatement(false,nil)
	case token.IfKeyword:
		return p.parseIfStatement()
	case token.DoKeyword:
		return p.parseDoStatement()
	case token.WhileKeyword:
		return p.parseWhileStatement()
	case token.ForKeyword:
		return p.parseForOrForOfStatement()
	case token.ContinueKeyword,
		token.BreakKeyword:
		return p.parseBranchStatement()
	case token.ReturnKeyword:
		return p.parseReturnStatement()
	case token.SwitchKeyword:
		return p.parseSwitchStatement()
	case token.ThrowKeyword:
		return p.parseThrowStatement()
	case token.TryKeyword,
		token.CatchKeyword,
		token.FinallyKeyword:
		return p.parseTryStatement()
	case token.At,
		token.VarKeyword,
		token.ClassKeyword,
		token.InterfaceKeyword,
		token.EnumKeyword,
		token.AnnotationKeyword,
		token.PrivateKeyword,
		token.ProtectedKeyword,
		token.PublicKeyword,
		token.AbstractKeyword,
		token.StaticKeyword,
		token.FinalKeyword:
		if p.isStartOfDeclaration() {
			var node = new(ast.DeclarationStatement)
			p.scope(node, func() {
				node.Declaration = p.parseDeclaration()
			})
			return node
		}
	}
	return p.parseExpressionOrLabeledStatement()
}

func (p *Parser) parseTopLevelDeclaration() ast.Declaration {
	fullStart := p.getNodePos()
	annotations := p.parseAnnotations()
	modifiers := p.parseModifiers()

	var result ast.Declaration
	switch p.token() {
	case token.ClassKeyword:
		result = p.parseClassDeclaration(fullStart, annotations, modifiers)
	case token.InterfaceKeyword:
		result = p.parseInterfaceDeclaration(fullStart, annotations, modifiers)
	case token.EnumKeyword:
		result = p.parseEnumDeclaration(fullStart, annotations, modifiers)
	case token.AnnotationKeyword:
		result = p.parseAnnotationDeclaration(fullStart, annotations, modifiers)
	default:
		if annotations != nil || modifiers != nil {
			var node = new(ast.BadDeclaration)
			p.scope(node, func() {
				node.Annotations = annotations
				node.Modifiers = modifiers
			}, fullStart)
			p.errorAtCurrentToken(diagnostic.M_Declaration_expected)

			result = node
		}
	}

	return result
}

func (p *Parser) parseDeclaration() ast.Declaration {
	fullStart := p.getNodePos()
	annotations := p.parseAnnotations()
	modifiers := p.parseModifiers()

	var result ast.Declaration
	switch p.token() {
	case token.VarKeyword:
		result = p.parseVariableDeclarationRest(fullStart, annotations, modifiers)
	case token.ClassKeyword:
		result = p.parseClassDeclaration(fullStart, annotations, modifiers)
	case token.InterfaceKeyword:
		result = p.parseInterfaceDeclaration(fullStart, annotations, modifiers)
	case token.EnumKeyword:
		result = p.parseEnumDeclaration(fullStart, annotations, modifiers)
	case token.AnnotationKeyword:
		return p.parseAnnotationDeclaration(fullStart, annotations, modifiers)
	default:
		if annotations != nil || modifiers != nil {
			var node = new(ast.BadDeclaration)
			node.SetPos(fullStart)
			node.Annotations = annotations
			node.Modifiers = modifiers
			node.SetEnd(p.startPos())
			p.errorAtCurrentToken(diagnostic.M_Declaration_expected)

			result = node
		}
	}

	return result
}

func (p *Parser) nextTokenIsIdentifierOrStringLiteralOnSameLine() bool {
	p.nextToken()
	return !p.scanner.HasPrecedingLineBreak() && p.isIdentifier() || p.token() == token.StringLiteral
}

func (p *Parser) parseFunctionBlockOrSemicolon(allowEmpty bool, diagnosticMessage *diagnostic.Message) *ast.BlockStatement {
	if p.token() != token.OpenBrace && p.canParseSemicolon() {
		if p.canParseSemicolon() {
			p.parseSemicolon()
			return nil
		}

		if allowEmpty {
			return nil
		}
	}

	return p.parseFunctionBlock(false, diagnosticMessage)
}

// DECLARATIONS

func (p *Parser) parserPackageDeclaration() *ast.PackageDeclaration {
	var fullStart = p.getNodePos()
	var annotations = p.parseAnnotations()
	var modifiers = p.parseModifiers()
	if p.got(token.PackageKeyword) {
		var node = new(ast.PackageDeclaration)
		p.scope(node, func() {
			node.Annotations = annotations
			node.Modifiers = modifiers
			node.Name = p.parseEntityName(nil)
		}, fullStart)
		return node
	}
	return nil
}

func (p *Parser) parseImportDeclaration() *ast.ImportDeclaration {
	var node = new(ast.ImportDeclaration)
	p.scope(node, func() {
		p.want(token.ImportKeyword)
		var path ast.Expression
		var target ast.Node
		path = p.parseIdentifier(nil)
		for p.got(token.Dot) {
			if p.token() == token.Asterisk {
				target = p.parseToken()
				break
			}

			var right = p.parseRightSideOfDot()
			if p.token() != token.Dot {
				target = right
				break
			}

			var name = new(ast.QualifiedName)
			p.scope(name, func() {
				name.Left = path
				name.Right = right
			}, path.Pos())
			path = name
		}

		node.Path = path
		node.Target = target
	})
	return node
}

func (p *Parser) isVariableDeclaration() bool {
	for true {
		switch p.token() {
		case token.At,
			token.VarKeyword:
			return true

		case token.StaticKeyword,
			token.FinalKeyword:
			p.nextToken()
			continue
		default:
			return false
		}
	}

	panic("Not run to here")
}

func (p *Parser) isStartOfVariableDeclaration() bool {
	return p.lookAheadBool(p.isVariableDeclaration)
}

func (p *Parser) parseVariableDeclaration() *ast.VariableDeclaration {
	var fullStart = p.getNodePos()
	var annotations = p.parseAnnotations()
	var modifiers = p.parseModifiers()
	return p.parseVariableDeclarationRest(fullStart, annotations, modifiers)
}

func (p *Parser) parseVariableDeclarationRest(fullStart int, annotations *ast.NodeList, modifiers *ast.NodeList) *ast.VariableDeclaration {
	var node = new(ast.VariableDeclaration)
	p.scope(node, func() {
		node.Annotations = annotations
		node.Modifiers = modifiers
		p.want(token.VarKeyword)
		node.Name = p.parseIdentifier(nil)
		node.Type = p.parseTypeAnnotation()
		if p.token() != token.OfKeyword {
			node.Initializer = p.parseInitializer()
		}
	}, fullStart)
	return node
}

func (p *Parser) parseConstructorDeclaration(fullStart int, annotations *ast.NodeList, modifiers *ast.NodeList) *ast.ConstructorDeclaration {
	var node = new(ast.ConstructorDeclaration)
	p.scope(node, func() {
		node.Annotations = annotations
		node.Modifiers = modifiers
		p.want(token.ConstructorKeyword)
		p.fillSignature(false, node)
		node.Body = p.parseFunctionBlockOrSemicolon(false, nil)

	}, fullStart)
	p.addDocument(node)
	return node
}

func (p *Parser) parseMethodDeclaration(fullStart int, annotations *ast.NodeList, modifiers *ast.NodeList, diagnosticMessage *diagnostic.Message) *ast.MethodDeclaration {
	var node = new(ast.MethodDeclaration)
	p.scope(node, func() {
		node.Annotations = annotations
		node.Modifiers = modifiers
		p.want(token.FunctionKeyword)
		node.Name = p.parseIdentifier(nil)
		p.fillSignature(false, node)
		// Token is default keyword and on same line
		if p.token() == token.DefaultKeyword && !p.scanner.HasPrecedingLineBreak() {
			node.Default = p.parseToken()
			node.Value = p.parseExpression()
		} else {
			node.Body = p.parseFunctionBlockOrSemicolon(true, diagnosticMessage)
		}
	}, fullStart)
	p.addDocument(node)
	return node
}

func (p *Parser) parseFieldDeclaration(fullStart int, annotations *ast.NodeList, modifiers *ast.NodeList) *ast.FieldDeclaration {
	var node = new(ast.FieldDeclaration)
	p.scope(node, func() {
		node.Annotations = annotations
		node.Modifiers = modifiers
		p.want(token.VarKeyword)
		node.Name = p.parseIdentifier(nil)
		node.Type = p.parseTypeAnnotation()
		if p.token() != token.OfKeyword {
			node.Initializer = p.parseInitializer()
		}
		p.parseSemicolon()
	}, fullStart)
	p.addDocument(node)
	return node
}

func (p *Parser) parseAnnotations() *ast.NodeList {
	var annotations *ast.NodeList
	for true {
		var annotationStart = p.getNodePos()
		if !p.got(token.At) {
			break
		}

		var annotation = new(ast.Annotation)
		annotation.Type = p.parseTypeReferenceRest(p.startPos(), nil)
		if p.token() == token.OpenParen {
			annotation.Arguments = p.parseArgumentList()
		}

		if annotations == nil {
			annotations = new(ast.NodeList)
			annotations.SetPos(annotationStart)
		}
		annotations.Add(annotation)
	}

	if annotations != nil {
		annotations.SetEnd(p.getNodeEnd())
	}
	return annotations
}

func (p *Parser) parseModifiers() *ast.NodeList {
	var modifiers *ast.NodeList
	for true {
		var modifierStart = p.scanner.GetStartPos()
		var modifierKind = p.token()
		if !p.parseAnyContextualModifier() {
			break
		}

		var modifier = new(ast.Modifier)
		modifier.SetPos(modifierStart)
		modifier.Token = modifierKind
		modifier.SetEnd(p.startPos())


		if modifiers == nil {
			modifiers = new(ast.NodeList)
			modifiers.SetPos(modifierStart)
		}
		modifiers.Add(modifier)
	}
	if modifiers != nil {
		modifiers.SetEnd(p.startPos())
	}
	return modifiers
}

func (p *Parser) parseTypeElement() ast.Declaration {
	var fullStart = p.getNodePos()
	var annotations = p.parseAnnotations()
	var modifiers = p.parseModifiers()

	switch p.token() {
	case token.ClassKeyword:
		return p.parseClassDeclaration(fullStart, annotations, modifiers)
	case token.InterfaceKeyword:
		return p.parseInterfaceDeclaration(fullStart, annotations, modifiers)
	case token.EnumKeyword:
		return p.parseEnumDeclaration(fullStart, annotations, modifiers)
	case token.AnnotationKeyword:
		return p.parseAnnotationDeclaration(fullStart, annotations, modifiers)
	case token.VarKeyword:
		return p.parseFieldDeclaration(fullStart, annotations, modifiers)
	case token.FunctionKeyword:
		return p.parseMethodDeclaration(fullStart, annotations, modifiers, diagnostic.M_or_expected)
	}

	if annotations != nil || modifiers != nil {
		var node = new(ast.BadDeclaration)
		p.scope(node, func() {
			node.Annotations = annotations
			node.Modifiers = modifiers
		}, fullStart)
		p.errorAtCurrentToken(diagnostic.M_Declaration_expected)
		return node
	}

	// 'isClassMemberStart' should have hinted not to attempt parsing.
	panic("Should not have attempted to parse type member declaration.")
}

func (p *Parser) parseClassElement() ast.Declaration {
	var fullStart = p.getNodePos()
	var annotations = p.parseAnnotations()
	var modifiers = p.parseModifiers()

	switch p.token() {
	case token.ClassKeyword:
		return p.parseClassDeclaration(fullStart, annotations, modifiers)
	case token.InterfaceKeyword:
		return p.parseInterfaceDeclaration(fullStart, annotations, modifiers)
	case token.EnumKeyword:
		return p.parseEnumDeclaration(fullStart, annotations, modifiers)
	case token.AnnotationKeyword:
		return p.parseAnnotationDeclaration(fullStart, annotations, modifiers)
	case token.ConstructorKeyword:
		return p.parseConstructorDeclaration(fullStart, annotations, modifiers)
	case token.VarKeyword:
		return p.parseFieldDeclaration(fullStart, annotations, modifiers)
	case token.FunctionKeyword:
		return p.parseMethodDeclaration(fullStart, annotations, modifiers, diagnostic.M_or_expected)
	}

	if annotations != nil || modifiers != nil {
		var node = new(ast.BadDeclaration)
		p.scope(node, func() {
			node.Annotations = annotations
			node.Modifiers = modifiers
		}, fullStart)
		p.errorAtCurrentToken(diagnostic.M_Declaration_expected)
		return node
	}

	// 'isClassMemberStart' should have hinted not to attempt parsing.
	panic("Should not have attempted to parse class member declaration.")
}

func (p *Parser) parseAnnotationDeclaration(fullStart int, annotations *ast.NodeList, modifiers *ast.NodeList) *ast.AnnotationDeclaration {
	var node = new(ast.AnnotationDeclaration)
	p.scope(node, func() {
		node.Annotations = annotations
		node.Modifiers = modifiers
		p.want(token.AnnotationKeyword)
		node.Name = p.parseIdentifier(nil)
		node.TypeParameters = p.parseTypeParameters()
		node.HeritageClauses = p.parseHeritageClauses()
		if p.want(token.OpenBrace) {
			node.Members = p.parseTypeMembers()
			p.want(token.CloseBrace)
		}
	}, fullStart)
	p.addDocument(node)
	return node
}

func (p *Parser) parseClassDeclaration(fullStart int, annotations *ast.NodeList, modifiers *ast.NodeList) *ast.ClassDeclaration {
	var node = new(ast.ClassDeclaration)
	p.scope(node, func() {
		node.Annotations = annotations
		node.Modifiers = modifiers
		p.want(token.ClassKeyword)
		node.Name = p.parseIdentifier(nil)
		node.TypeParameters = p.parseTypeParameters()
		node.HeritageClauses = p.parseHeritageClauses()
		if p.want(token.OpenBrace) {
			node.Members = p.parseClassMembers()
			p.want(token.CloseBrace)
		}
	}, fullStart)
	p.addDocument(node)
	return node
}

func (p *Parser) isImplementsClause() bool {
	return p.token() == token.ImplementsKeyword && p.lookAheadBool(p.nextTokenIsIdentifierOrKeyword)
}

func (p *Parser) parseHeritageClauses() *ast.NodeList {
	if p.isHeritageClause() {
		var list = new(ast.NodeList)
		p.listScope(list, func() {
			p.parseList(pcHeritageClauses, func() {
				list.Add(p.parseHeritageClause())
			})
		})
		return list
	}

	return nil
}

func (p *Parser) parseHeritageClause() *ast.HeritageClause {
	if p.token() == token.ExtendsKeyword || p.token() == token.ImplementsKeyword {
		var node = new(ast.HeritageClause)
		p.scope(node, func() {
			node.Token = p.token()
			p.nextToken()

			var list = new(ast.TypeList)
			p.listScope(list, func() {
				p.parseDelimitedList(pcHeritageClauseElement, func() {
					list.Add(p.parseTypeReference())
				}, false)
			})
			node.Types = list
		})
		return node
	}

	return nil
}

func (p *Parser) isHeritageClause() bool {
	return p.token() == token.ExtendsKeyword || p.token() == token.ImplementsKeyword
}

func (p *Parser) parseTypeMembers() *ast.DeclarationList {
	var list = new(ast.DeclarationList)
	p.listScope(list, func() {
		p.parseList(pcTypeMembers, func() {
			list.Add(p.parseTypeElement())
		})
	})
	return list
}

func (p *Parser) parseClassMembers() *ast.DeclarationList {
	var list = new(ast.DeclarationList)
	p.listScope(list, func() {
		p.parseList(pcClassMembers, func() {
			list.Add(p.parseClassElement())
		})
	})
	return list
}

func (p *Parser) parseInterfaceDeclaration(fullStart int, annotations *ast.NodeList, modifiers *ast.NodeList) *ast.InterfaceDeclaration {
	var node = new(ast.InterfaceDeclaration)
	p.scope(node, func() {
		node.Annotations = annotations
		node.Modifiers = modifiers
		p.want(token.InterfaceKeyword)
		node.Name = p.parseIdentifier(nil)
		node.TypeParameters = p.parseTypeParameters()
		node.HeritageClauses = p.parseHeritageClauses()
		if p.want(token.OpenBrace) {
			node.Members = p.parseTypeMembers()
			p.want(token.CloseBrace)
		}
	}, fullStart)
	p.addDocument(node)
	return node
}

// In an ambient declaration, the grammar only allows integer literals as initializers.
// In a non-ambient declaration, the grammar allows uninitialized members only in a
// ConstantEnumMemberSection, which starts at the beginning of an enum declaration
// or any time an integer literal initializer is encountered.
func (p *Parser) parseEnumItemDeclaration() *ast.EnumItemDeclaration {
	var node = new(ast.EnumItemDeclaration)
	p.scope(node, func() {
		node.Name = p.parseIdentifier(nil)
		if p.token() == token.OpenParen {
			node.Arguments = p.parseArgumentList()
		}
		if p.token() == token.OpenBrace {
			node.Body = p.parseAnonymousClassDeclaration()
		}
	})
	return node
}

func (p *Parser) parseEnumItemDeclarations() *ast.DeclarationList {
	var list = new(ast.DeclarationList)
	p.listScope(list, func() {
		p.parseDelimitedList(pcEnumMembers, func() {
			list.Add(p.parseEnumItemDeclaration())
		}, false)
	})
	return list
}

func (p *Parser) parseEnumDeclaration(fullStart int, annotations *ast.NodeList, modifiers *ast.NodeList) *ast.EnumDeclaration {
	var node = new(ast.EnumDeclaration)
	p.scope(node, func() {
		node.Annotations = annotations
		node.Modifiers = modifiers
		p.want(token.EnumKeyword)
		node.Name = p.parseIdentifier(nil)
		node.TypeParameters = p.parseTypeParameters()
		node.HeritageClauses = p.parseHeritageClauses()
		if p.want(token.OpenBrace) {
			node.EnumItems = p.parseEnumItemDeclarations()
			if p.token() != token.CloseBrace {
				p.want(token.Semicolon)
				node.Members = p.parseClassMembers()
			}
			p.want(token.CloseBrace)
		}
	}, fullStart)
	p.addDocument(node)
	return node
}