lexer grammar LineLexer;
options {
language=CSharp3;
}/* 
Possible Antlr bug
Gated symantic predicates not hoisted into lexer's mTokens prediction DFA?
Before I moved the comment and String stuff into different grammars, I had to
have backtracking turned on, otherwise their gated predicates would not get hoisted
into the mTokens() prediction DFA correctly.




$type = xxx;

Jim


> Subject: [antlr-interest] Heterogeneous token types
>
> In Antlr2 there was a $setToken function that could be used to return
> tokens with different types from different lexical rules.
>
> Is this not possible in Antlr3 anymore?
>
> List: http://www.antlr.org/mailman/listinfo/antlr-interest
> Unsubscribe: http://www.antlr.org/mailman/options/antlr-interest/your-
> email-address 
*/

@members{
public bool insideMultiLineComment{ get; set; }

public int numHexDigits { get; set; }
}

/* bug in dmd's lexer:
input String: "#line 0 _" will cause lexer to loop forever. If the integer is followed by an '_',
but the '_' is not __FILE__, then it will loop forever.
*/

/* Walter's web grammar is wrong:
End of Line is not a member of end of file, except for line comments e.g. '//foo'
Decimal float: Floating points that begin with . must be followed by a digit, not an underscore e.g. these are wrong ._1 or ._ but the web grammar says they are right
Decimal float- if it begins with a 0, it must have a '.' before the 'e'. Numbers like 0e0 are not floats. It is a 0 followed by something else (not sure what, 0 followed by Identifier?)
Binary: a 0 or 1 is required somewhere after the '0b', it cannot be followed solely by underscores
Hexadecimal- same as binary, can't have '0x' or '0x_'
Exponent- underscore cannot immediately follow 'e' or 'e+', there must be a real digit. This applies to both decimal and Hexadecimal exponents
*/


//why do Hexadecimal floats always require exponents?

/* TODO
stop lexing if we run into end of file. I think I'll leave this to the parser to figure out.
String literals should be collapsed into a single String literal token
*/

//Tokens

/*TODO when I added this, I got all kinds of problems. It seems the interaction of Keyword and UNIVERSAL ALPHA that
resides in Identifier causes problems. UNIVERSAL ALPHA is probably just too big for ANTLR. Temporary solution is to
disable UNIVERSAL ALPHA.
*/
//must come before Identifier
Keyword	:
	'abstract' | 'alias' | 'align' | 'asm' | 'assert' | 'auto'
	| 'body' | 'bool' | 'break' | 'byte'
	| 'case' | 'cast' | 'catch' | 'cdouble' | 'cent' | 'cfloat' | 'char' | 'class' | 'const' | 'continue' | 'creal'
	| 'dchar' | 'debug' | 'default' | 'delegate' | 'delete' | 'deprecated' | 'do' | 'double'
	| 'else' | 'enum' | 'export' | 'extern'
	| 'false' | 'final' | 'finally' | 'float' | 'for' | 'foreach' | 'foreach_reverse' | 'function'
	| 'goto'
	| 'idouble' | 'if' | 'ifloat' | 'import' | 'in' | 'inout' | 'int' | 'interface' | 'invariant' | 'ireal' | 'is'
	| 'lazy' | 'long'
	| 'macro' | 'mixin' | 'module'
	| 'new' | 'null'
	| 'out' | 'override'
	| 'package' | 'pragma' | 'private' | 'protected' | 'public'
	| 'real' | 'ref' | 'return'
	| 'scope' | 'short' | 'static' | 'struct' | 'super' | 'switch' | 'synchronized'
	| 'template' | 'this' | 'throw' | 'true' | 'try' | 'typedef' | 'typeid' | 'typeof'
	| 'ubyte' | 'ucent' | 'uint' | 'ulong' | 'union' | 'unittest' | 'ushort'
	| 'version' | 'void' | 'volatile'
	| 'wchar' | 'while' | 'with'
 ;

//must appear before Identifier 
//TODO these get translated during lexing
SpecialToken
	:	'__FILE__' | '__Line__' | '__DATE__' | '__TIME__' | '__TIMESTAMP__' | '__VENDOR__' | '__VERSION__'
	;

Identifier	
	:	('a'..'z'|'A'..'Z'|'_') ('a'..'z'|'A'..'Z'|'0'..'9'|'_')*
	;
//Very long/big rules kill Antlr lexer?
//TODO would be nice if I could enable this. I think it's a bug in ANTLR, and UniversalAlpha is too big of a rule.
//	:	('a'..'z'|'A'..'Z'|'_'|UniversalAlpha) ('a'..'z'|'A'..'Z'|'0'..'9'|'_'|UniversalAlpha)*

// r"..."
WysiwygString
	:	'r"' WysiwygCharacter* '"' StringPostfix?
	;

// r"...\n
StartWysiwygString
	:	'r"' WysiwygCharacter* EndOfLineFragment
	;

// `...`
AlternateWysiwygString
	:	'`' AlternateWysiwygCharacter* '`' StringPostfix?
	;

// `...\n
StartAlternateWysiwygString
	:	'`' AlternateWysiwygCharacter* EndOfLineFragment
	;

// "..."
DoubleQuotedString
	:	'"' DoubleQuotedCharacter* '"' StringPostfix?
	;

// "...\n
StartDoubleQuotedString
	:	'"' DoubleQuotedCharacter* EndOfLineFragment
	;
	
//TODO do this later. I have to pass in how many digits have been found so far to the End hexString rule, what a pain.
//the semantic predicate checks for an even number of hex digits (possibly none at all)
HexadecimalString
@init{numHexDigits = 0;}
	:	'x"' (HexadecimalDigit {++numHexDigits;} | WhiteSpace)* '"' StringPostfix? {numHexDigits \% 2==0}?
	;

StartHexadecimalString
@init{numHexDigits = 0;}
	:	'x"' (HexadecimalDigit {++numHexDigits;} | WhiteSpace)* EndOfLineFragment
	;

CharacterLiteral
	:	'\'' SingleQuotedCharacter '\''
	;

/*
The complication is to match DMD's lexer, which classifies 0x0i as a hex float that's missing
its exponent, and thus is in error.
It used to be
Integer IntegerSuffix?
*/
IntegerLiteral 
	:	Integer IntegerSuffix
	|	IntegerLessHexadecimal
	|	Hexadecimal	{input.LA(1) != 'i'}?
	;

FloatLiteral
	:	(DecimalFloat|HexadecimalFloat) FloatSuffixAny?
	|	Decimal FloatSuffix ImaginarySuffix?
	|	DecimalDigit DecimalDigitUnderscore* RealSuffix? ImaginarySuffix
	;

/*
I'm not quite sure how to draw the line between operators and other 'punctuation'. So my categorization
may seem arbitrary
*/
Operator
	:	'/' | '/=' | 
	'&' | '&=' | '&&' | 
	'-' | '-=' | '--' | 
	'+' | '+=' | '++' | 
	'<' | '<=' | '<<' | '<<=' | '<>' | '<>=' | 
	'>' | '>=' | '>>=' | '>>>=' | '>>' | '>>>' | 
	'!' | '!=' | '!==' | // !== is only for d version==1
	'!<>' | '!<>=' | '!<' | '!<=' | '!>' | '!>=' | 
	'=' | '==' | '===' | // === is only for d version==1
	'*' | '*=' | 
	'%' | '%=' | 
	'^' | '^=' | 
	'~' | '~='
	;

Punctuation
	:	'.' | '..' | '...' | 
	'(' | ')' | 
	'[' | ']' | 
	'{' | '}' | 
	'?' | 
	',' | 
	';' | 
	':' | 
	'$'
	;

//Tokens that get hidden
EndOfLine
	:	EndOfLineFragment {$channel=Hidden;}
	;

//If you have escape sequance as a String literal (deprecated behavior), this must 
//go before String literal so that it doesn't become an escape sequence.
WhiteSpace
	:	('\u0020' | '\u0009' | '\u000B' | '\u000C') {$channel=Hidden;}
	;

/*
A word about nesting block comments (NBC) - any /+ encountered Starts a NBC, or increases
the nesting level.
If an NBC is wholly containted within a line, it will be matched by rule SingularNestingBlockComment.
It does not increase or decrease the nesting level.  and is not inside an enclosing NBC?

We also have rule StartNestingBlockComment. It signifies that we have encountered the Start
of an NBC, any number of nested NBCs, but not this NBC's matching terminator, before encountering 
End of Line (which we consume), or /+ , which is the Start of a different NBC (which we don't consume).
The aforementioned nested NBCs MUST be matching pairs of /+ and +/ . Essentially, we ignore
the nested NBCs, because they do not increase our nesting count before the line is terminated or 
the Start of another NBC is found. It signifies that the caller should increment the nesting level.

Finally there is rule ENDNestingBlockComment. It is the conceptual opposite of the Start rule.
It means we have encountered any number of nested NBCs, followed by a single +/ terminator, and
have not encountered End of Line. It signifies that the caller should decrement the nesting level.

Ok. all this fanciness failed. The Startnestingblockcomment and singularnestingblockcomment rules
are (i think) correct. The problem is that from the mTokens rule, backtracking is needed to decide
which one to use. I don't want to set the backtracking flag at the global level. I can't make a single
rule that encloses Start & singular and then turn on backtracking, because that would mark the 
token type as ENCLOSINGRULE instead of Start or singular.
I think an easier approach would be to just have StartNBC and EndNBC rules, not worry about singularNBC.
Then there are no common Starting Characters, so no backtracking is needed.

The fancy rules -
StartNestingBlockComment
	:	'/+'
			((SingularNestingBlockComment)=> SingularNestingBlockComment
			| ('/' ~'+')=> '/'
			| ~('/' | EndOfLineCharacter | EndOfFileFragment))* 
		(EndOfLineFragment | ('/+')=> )
	;
SingularNestingBlockComment
	:	'/+' 
			((SingularNestingBlockComment)=> SingularNestingBlockComment
			| ('/' ~'+')=> '/'
			| ('+' ~'/')=> '+'
			| ~('/' | '+' | EndOfLineCharacter | EndOfFileFragment))* 
		'+/'
	;
*/

/*
If terminated by an End of Line, mark that we're inside a multiline comment
*/
MultiLineComment
	:	'/*' (('*' ~'/')=> '*' 
			| ~('*' | EndOfLineFragment | EndOfFileFragment))* 
		('*/'
			| EndOfLineFragment {insideMultiLineComment = true;}) {$channel=Hidden;}
	;
	
//terminated by End of Line, or the Start of another NBC (not consumed)
StartNestingBlockComment
	:	'/+'
			(('/' ~'+')=> '/'
			| ('+' ~'/')=> '+'
			| ~('/' | '+' | EndOfLineCharacter | EndOfFileFragment))* 
		(EndOfLineFragment | ('/+')=> ) {$channel=Hidden;}
	;

//just the // comment
//don't mess with this. it took a while to find a format ANTRL liked. Other forms of the same rule (or at least I think they were the same)
//pissed ANTLR off, and the DFA it produced would loop forever.
LineComment
	:	'//' ~(EndOfLineCharacter | EndOfFileFragment)* (EndOfLineFragment | {input.LA(1) == -1 || input.LA(1) == '\x00' || input.LA(1) == '\x1A'}?) {$channel=Hidden;}
	;

//should this be hidden? I guess not...
EndOfFile
	:	EndOfFileFragment
	;

//TODO eventually I should support this being broken across multiple lines. too much work for now.
//TODO what do I do with this? Walter's lexer changes the current line number and file name
/*This sets the source line number to Integer, and optionally the source file name to Filespec, beginning with the next line of source text. 
The source file and line number is used for printing error messages and for mapping generated code back to the source for the symbolic debugging output. 
*/
SpecialTokenSequence
	:	'#line' (WhiteSpace | EndOfLineFragment)+ SpecialTokenInteger WhiteSpace* FileSpec? (EndOfLineFragment | {input.LA(1) == -1 || input.LA(1) == '\x00' || input.LA(1) == '\x1A'}?)
	;

//End of tokens, Start of fragments

//apparently unsigned integers are not allowed in special token sequence #line
fragment
SpecialTokenInteger
	:	Integer 'L'?
	;

fragment
FileSpec:	'"' (options {greedy=false;} : ~(EndOfLineCharacter | EndOfFileFragment))* '"'
	;

//fragments for String literals
fragment
WysiwygCharacter
	:	~('"' | EndOfLineCharacter | EndOfFileFragment)
	;

fragment
AlternateWysiwygCharacter
	:	~('`' | EndOfLineCharacter | EndOfFileFragment)
	;

fragment
DoubleQuotedCharacter
	:	EscapeSequence
	|	~('"' | EndOfLineCharacter | EndOfFileFragment)
	;
	
fragment
StringPostfix
	:	'c'|'w'|'d'
	;

/*
Visual Studio eats up (doesn't pass to me) the normal end of line Strings:
'\n' '\r' and '\r\n'. So I manually append a '\n' to all input Strings. So
don't bother looking for '\r' in the input.
*/
fragment
EndOfLineFragment
	:	EndOfLineCharacter
	;

//this is just end of line without \r\n, since that messes stuff up e.g. ~(EndOfLineFragment) causes problems
fragment
EndOfLineCharacter
	:	'\n' | '\u2029' | '\u2028'
	;

//TODO transform end of file to a backslash? thats what walter does, I think
//TODO do appropriate transformations to escape sequences found here/where the fragment is used
//EscapeSequence used to be a valid String literal on its own (not inside quotes or anything). That is now deprecated.
fragment
EscapeSequence
	:	'\\' ('\''|'"'|'?'|'\\'|'a'|'b'|'f'|'n'|'r'|'t'|'v'|EndOfFileFragment)
	|	'\\x' HexadecimalDigit HexadecimalDigit
	|	'\\' OctalDigit OctalDigit OctalDigit
	|	'\\' OctalDigit OctalDigit
	|	'\\' OctalDigit
	|	'\\u' HexadecimalDigit HexadecimalDigit HexadecimalDigit HexadecimalDigit
	|	'\\U' HexadecimalDigit HexadecimalDigit HexadecimalDigit HexadecimalDigit HexadecimalDigit HexadecimalDigit HexadecimalDigit HexadecimalDigit
	//|	'\\&' NamedCharacterEntity ';'
	;



//TODO like EndOfLine fragment, is this needed?
fragment
EndOfFileFragment
	:	('\u0000' | '\u001A')
	;

//TODO translate escape sequence
fragment
SingleQuotedCharacter
	:	EscapeSequence
	|	~('\'' | EndOfLineCharacter)
	;

	
//fragments for integer literal
fragment
Integer	:	Decimal
	|	Binary
	|	Octal
	|	Hexadecimal
	;

//remove Hexadecimal so it can be treated specially
fragment
IntegerLessHexadecimal
	:	Decimal
	|	Binary
	|	Octal
	;

//I think theres a bug in ANTLR. I can't state the alternatives like so:
// (IntegerSuffix ~IntegerSuffixCharacter)=>	'L' ('u'|'U')?
// (IntegerSuffix ~IntegerSuffixCharacter)=>	('u'|'U') 'L'?
//or even like this:
// ('L' ('u'|'U')? ~IntegerSuffixCharacter)=>	'L' ('u'|'U')?
// (('u'|'U') 'L'? ~IntegerSuffixCharacter)=>	('u'|'U') 'L'?

//Matches: uL UL Lu Lu L u U.   A 'u', 'U', or 'L' immediately following is not allowed.
//e.g. uLU is bad. 
fragment
IntegerSuffix
	:	'L' ('u'|'U')	{input.LA(1) != 'L' && input.LA(1) != 'u' && input.LA(1) != 'U'}?
	|	('u'|'U') 'L'	{input.LA(1) != 'L' && input.LA(1) != 'u' && input.LA(1) != 'U'}?
	|	'L'		{input.LA(1) != 'L'}?
	|	('u'|'U')	{input.LA(1) != 'u' && input.LA(1) != 'U'}?
	;

fragment
Decimal	:	'0'
	|	('1'..'9') DecimalDigitUnderscore*
	;

fragment
Binary	:	'0' ('b' | 'B') '_'* '0'..'1' BinaryDigit*
	;

//TODO if we get a nonoctal digit, 8 or 9, warn that we were expecting an octal here
fragment
Octal	:	'0' (OctalDigit | '_')+
	;

fragment
OctalDigit
	:	'0'..'7'
	;

fragment
Hexadecimal
	:	HexadecimalPrefix '_'* HexadecimalDigit HexadecimalDigitUnderscore*
	;

fragment
DecimalDigit
	:	'0'..'9'
	;

fragment
DecimalDigitUnderscore
	:	DecimalDigit | '_'
	;

fragment
BinaryDigit
	:	'_' | '0'..'1'
	;

fragment
HexadecimalDigit
	:	'0'..'9'|'a'..'f'|'A'..'F'
	;
	
fragment
HexadecimalDigitUnderscore
	:	HexadecimalDigit|'_'
	;

//fragments for float literal
//Contrary to what it says on the webpage, I believe based on looking at Walter's lexer, that
//something like 1.e1 is allowed. The DecimalDigit after the . is optional, if there were digits before the .

fragment
DecimalExponent
	:	('e'|'E')('+'|'-')? DecimalDigit DecimalDigitUnderscore*
	;
fragment
DecimalFloat
	:	DecimalDigit DecimalDigitUnderscore* '.' DecimalDigitUnderscore* DecimalExponent?
	|	'.' DecimalDigit DecimalDigitUnderscore* DecimalExponent?
	|	'1'..'9' DecimalDigitUnderscore* DecimalExponent
	;


fragment
HexadecimalFloat
	:	HexadecimalPrefix HexadecimalDigitUnderscore* '.' HexadecimalDigitUnderscore* HexadecimalExponent
	|	HexadecimalPrefix HexadecimalDigitUnderscore* HexadecimalExponent
	;

fragment
HexadecimalPrefix
	:	'0' ('x'|'X')
	;

fragment
HexadecimalExponent
	:	('p'|'P') ('+'|'-')? DecimalDigit DecimalDigitUnderscore*
	;

fragment
FloatSuffixAny
	:	FloatSuffix ImaginarySuffix?
	|	RealSuffix ImaginarySuffix?
	|	ImaginarySuffix
	;

fragment
FloatSuffix
	:	'f'|'F'
	;

fragment
RealSuffix
	:	'L'
	;

fragment
ImaginarySuffix
	:	'i'
	;
	
	
//TODO can I put these in a separate file so they dont slow things down? yes: import other_lexer
//really long fragments
//fragment
//UniversalAlpha
//	:	'\u00AA'..'\u00AA' | '\u00B5'..'\u00B5' | '\u00B7'..'\u00B7' | '\u00BA'..'\u00BA' | '\u00C0'..'\u00D6' | '\u00D8'..'\u00F6' | '\u00F8'..'\u01F5' | '\u01FA'..'\u0217' | '\u0250'..'\u02A8' | '\u02B0'..'\u02B8' | '\u02BB'..'\u02BB' | '\u02BD'..'\u02C1' | '\u02D0'..'\u02D1' | '\u02E0'..'\u02E4' | '\u037A'..'\u037A' | '\u0386'..'\u0386' | '\u0388'..'\u038A' | '\u038C'..'\u038C' | '\u038E'..'\u03A1' | '\u03A3'..'\u03CE' | '\u03D0'..'\u03D6' | '\u03DA'..'\u03DA' | '\u03DC'..'\u03DC' | '\u03DE'..'\u03DE' | '\u03E0'..'\u03E0' | '\u03E2'..'\u03F3' | '\u0401'..'\u040C' | '\u040E'..'\u044F' | '\u0451'..'\u045C' | '\u045E'..'\u0481' | '\u0490'..'\u04C4' | '\u04C7'..'\u04C8' | '\u04CB'..'\u04CC' | '\u04D0'..'\u04EB' | '\u04EE'..'\u04F5' | '\u04F8'..'\u04F9' | '\u0531'..'\u0556' | '\u0559'..'\u0559' | '\u0561'..'\u0587' | '\u05B0'..'\u05B9' | '\u05BB'..'\u05BD' | '\u05BF'..'\u05BF' | '\u05C1'..'\u05C2' | '\u05D0'..'\u05EA' | '\u05F0'..'\u05F2' | '\u0621'..'\u063A' | '\u0640'..'\u0652' | '\u0660'..'\u0669' | '\u0670'..'\u06B7' | '\u06BA'..'\u06BE' | '\u06C0'..'\u06CE' | '\u06D0'..'\u06DC' | '\u06E5'..'\u06E8' | '\u06EA'..'\u06ED' | '\u06F0'..'\u06F9' | '\u0901'..'\u0903' | '\u0905'..'\u0939' | '\u093D'..'\u093D' | '\u093E'..'\u094D' | '\u0950'..'\u0952' | '\u0958'..'\u0963' | '\u0966'..'\u096F' | '\u0981'..'\u0983' | '\u0985'..'\u098C' | '\u098F'..'\u0990' | '\u0993'..'\u09A8' | '\u09AA'..'\u09B0' | '\u09B2'..'\u09B2' | '\u09B6'..'\u09B9' | '\u09BE'..'\u09C4' | '\u09C7'..'\u09C8' | '\u09CB'..'\u09CD' | '\u09DC'..'\u09DD' | '\u09DF'..'\u09E3' | '\u09E6'..'\u09EF' | '\u09F0'..'\u09F1' | '\u0A02'..'\u0A02' | '\u0A05'..'\u0A0A' | '\u0A0F'..'\u0A10' | '\u0A13'..'\u0A28' | '\u0A2A'..'\u0A30' | '\u0A32'..'\u0A33' | '\u0A35'..'\u0A36' | '\u0A38'..'\u0A39' | '\u0A3E'..'\u0A42' | '\u0A47'..'\u0A48' | '\u0A4B'..'\u0A4D' | '\u0A59'..'\u0A5C' | '\u0A5E'..'\u0A5E' | '\u0A66'..'\u0A6F' | '\u0A74'..'\u0A74' | '\u0A81'..'\u0A83' | '\u0A85'..'\u0A8B' | '\u0A8D'..'\u0A8D' | '\u0A8F'..'\u0A91' | '\u0A93'..'\u0AA8' | '\u0AAA'..'\u0AB0' | '\u0AB2'..'\u0AB3' | '\u0AB5'..'\u0AB9' | '\u0ABD'..'\u0AC5' | '\u0AC7'..'\u0AC9' | '\u0ACB'..'\u0ACD' | '\u0AD0'..'\u0AD0' | '\u0AE0'..'\u0AE0' | '\u0AE6'..'\u0AEF' | '\u0B01'..'\u0B03' | '\u0B05'..'\u0B0C' | '\u0B0F'..'\u0B10' | '\u0B13'..'\u0B28' | '\u0B2A'..'\u0B30' | '\u0B32'..'\u0B33' | '\u0B36'..'\u0B39' | '\u0B3D'..'\u0B3D' | '\u0B3E'..'\u0B43' | '\u0B47'..'\u0B48' | '\u0B4B'..'\u0B4D' | '\u0B5C'..'\u0B5D' | '\u0B5F'..'\u0B61' | '\u0B66'..'\u0B6F' | '\u0B82'..'\u0B83' | '\u0B85'..'\u0B8A' | '\u0B8E'..'\u0B90' | '\u0B92'..'\u0B95' | '\u0B99'..'\u0B9A' | '\u0B9C'..'\u0B9C' | '\u0B9E'..'\u0B9F' | '\u0BA3'..'\u0BA4' | '\u0BA8'..'\u0BAA' | '\u0BAE'..'\u0BB5' | '\u0BB7'..'\u0BB9' | '\u0BBE'..'\u0BC2' | '\u0BC6'..'\u0BC8' | '\u0BCA'..'\u0BCD' | '\u0BE7'..'\u0BEF' | '\u0C01'..'\u0C03' | '\u0C05'..'\u0C0C' | '\u0C0E'..'\u0C10' | '\u0C12'..'\u0C28' | '\u0C2A'..'\u0C33' | '\u0C35'..'\u0C39' | '\u0C3E'..'\u0C44' | '\u0C46'..'\u0C48' | '\u0C4A'..'\u0C4D' | '\u0C60'..'\u0C61' | '\u0C66'..'\u0C6F' | '\u0C82'..'\u0C83' | '\u0C85'..'\u0C8C' | '\u0C8E'..'\u0C90' | '\u0C92'..'\u0CA8' | '\u0CAA'..'\u0CB3' | '\u0CB5'..'\u0CB9' | '\u0CBE'..'\u0CC4' | '\u0CC6'..'\u0CC8' | '\u0CCA'..'\u0CCD' | '\u0CDE'..'\u0CDE' | '\u0CE0'..'\u0CE1' | '\u0CE6'..'\u0CEF' | '\u0D02'..'\u0D03' | '\u0D05'..'\u0D0C' | '\u0D0E'..'\u0D10' | '\u0D12'..'\u0D28' | '\u0D2A'..'\u0D39' | '\u0D3E'..'\u0D43' | '\u0D46'..'\u0D48' | '\u0D4A'..'\u0D4D' | '\u0D60'..'\u0D61' | '\u0D66'..'\u0D6F' | '\u0E01'..'\u0E3A' | '\u0E40'..'\u0E5B' | '\u0E50'..'\u0E59' | '\u0E81'..'\u0E82' | '\u0E84'..'\u0E84' | '\u0E87'..'\u0E88' | '\u0E8A'..'\u0E8A' | '\u0E8D'..'\u0E8D' | '\u0E94'..'\u0E97' | '\u0E99'..'\u0E9F' | '\u0EA1'..'\u0EA3' | '\u0EA5'..'\u0EA5' | '\u0EA7'..'\u0EA7' | '\u0EAA'..'\u0EAB' | '\u0EAD'..'\u0EAE' | '\u0EB0'..'\u0EB9' | '\u0EBB'..'\u0EBD' | '\u0EC0'..'\u0EC4' | '\u0EC6'..'\u0EC6' | '\u0EC8'..'\u0ECD' | '\u0ED0'..'\u0ED9' | '\u0EDC'..'\u0EDD' | '\u0F00'..'\u0F00' | '\u0F18'..'\u0F19' | '\u0F20'..'\u0F33' | '\u0F35'..'\u0F35' | '\u0F37'..'\u0F37' | '\u0F39'..'\u0F39' | '\u0F3E'..'\u0F47' | '\u0F49'..'\u0F69' | '\u0F71'..'\u0F84' | '\u0F86'..'\u0F8B' | '\u0F90'..'\u0F95' | '\u0F97'..'\u0F97' | '\u0F99'..'\u0FAD' | '\u0FB1'..'\u0FB7' | '\u0FB9'..'\u0FB9' | '\u10A0'..'\u10C5' | '\u10D0'..'\u10F6' | '\u1E00'..'\u1E9B' | '\u1EA0'..'\u1EF9' | '\u1F00'..'\u1F15' | '\u1F18'..'\u1F1D' | '\u1F20'..'\u1F45' | '\u1F48'..'\u1F4D' | '\u1F50'..'\u1F57' | '\u1F59'..'\u1F59' | '\u1F5B'..'\u1F5B' | '\u1F5D'..'\u1F5D' | '\u1F5F'..'\u1F7D' | '\u1F80'..'\u1FB4' | '\u1FB6'..'\u1FBC' | '\u1FBE'..'\u1FBE' | '\u1FC2'..'\u1FC4' | '\u1FC6'..'\u1FCC' | '\u1FD0'..'\u1FD3' | '\u1FD6'..'\u1FDB' | '\u1FE0'..'\u1FEC' | '\u1FF2'..'\u1FF4' | '\u1FF6'..'\u1FFC' | '\u203F'..'\u2040' | '\u207F'..'\u207F' | '\u2102'..'\u2102' | '\u2107'..'\u2107' | '\u210A'..'\u2113' | '\u2115'..'\u2115' | '\u2118'..'\u211D' | '\u2124'..'\u2124' | '\u2126'..'\u2126' | '\u2128'..'\u2128' | '\u212A'..'\u2131' | '\u2133'..'\u2138' | '\u2160'..'\u2182' | '\u3005'..'\u3007' | '\u3021'..'\u3029' | '\u3041'..'\u3093' | '\u309B'..'\u309C' | '\u30A1'..'\u30F6' | '\u30FB'..'\u30FC' | '\u3105'..'\u312C' | '\u4E00'..'\u9FA5' | '\uAC00'..'\uD7A3'
//	;
	
//fragment
//NamedCharacterEntity
//	:	'quot' | 'amp' | 'lt' | 'gt' | 'OElig' | 'oelig' | 'Scaron' | 'scaron' | 'Yuml' | 'circ' | 'tilde' | 'ensp' | 'emsp' | 'thinsp' | 'zwnj' | 'zwj' | 'lrm' | 'rlm' | 'ndash' | 'mdash' | 'lsquo' | 'rsquo' | 'sbquo' | 'ldquo' | 'rdquo' | 'bdquo' | 'dagger' | 'Dagger' | 'permil' | 'lsaquo' | 'rsaquo' | 'euro' | 'nbsp' | 'iexcl' | 'cent' | 'pound' | 'curren' | 'yen' | 'brvbar' | 'sect' | 'uml' | 'copy' | 'ordf' | 'laquo' | 'not' | 'shy' | 'reg' | 'macr' | 'deg' | 'plusmn' | 'sup2' | 'sup3' | 'acute' | 'micro' | 'para' | 'middot' | 'cedil' | 'sup1' | 'ordm' | 'raquo' | 'frac14' | 'frac12' | 'frac34' | 'iquest' | 'Agrave' | 'Aacute' | 'Acirc' | 'Atilde' | 'Auml' | 'Aring' | 'AElig' | 'Ccedil' | 'Egrave' | 'Eacute' | 'Ecirc' | 'Euml' | 'Igrave' | 'Iacute' | 'Icirc' | 'Iuml' | 'ETH' | 'Ntilde' | 'Ograve' | 'Oacute' | 'Ocirc' | 'Otilde' | 'Ouml' | 'times' | 'Oslash' | 'Ugrave' | 'Uacute' | 'Ucirc' | 'Uuml' | 'Yacute' | 'THORN' | 'szlig' | 'agrave' | 'aacute' | 'acirc' | 'atilde' | 'auml' | 'aring' | 'aelig' | 'ccedil' | 'egrave' | 'eacute' | 'ecirc' | 'euml' | 'igrave' | 'iacute' | 'icirc' | 'iuml' | 'eth' | 'ntilde' | 'ograve' | 'oacute' | 'ocirc' | 'otilde' | 'ouml' | 'divide' | 'oslash' | 'ugrave' | 'uacute' | 'ucirc' | 'uuml' | 'yacute' | 'thorn' | 'yuml' | 'fnof' | 'Alpha' | 'Beta' | 'Gamma' | 'Delta' | 'Epsilon' | 'Zeta' | 'Eta' | 'Theta' | 'Iota' | 'Kappa' | 'Lambda' | 'Mu' | 'Nu' | 'Xi' | 'Omicron' | 'Pi' | 'Rho' | 'Sigma' | 'Tau' | 'Upsilon' | 'Phi' | 'Chi' | 'Psi' | 'Omega' | 'alpha' | 'beta' | 'gamma' | 'delta' | 'epsilon' | 'zeta' | 'eta' | 'theta' | 'iota' | 'kappa' | 'lambda' | 'mu' | 'nu' | 'xi' | 'omicron' | 'pi' | 'rho' | 'sigmaf' | 'sigma' | 'tau' | 'upsilon' | 'phi' | 'chi' | 'psi' | 'omega' | 'thetasym' | 'upsih' | 'piv' | 'bull' | 'hellip' | 'prime' | 'Prime' | 'oline' | 'frasl' | 'weierp' | 'image' | 'real' | 'trade' | 'alefsym' | 'larr' | 'uarr' | 'rarr' | 'darr' | 'harr' | 'crarr' | 'lArr' | 'uArr' | 'rArr' | 'dArr' | 'hArr' | 'forall' | 'part' | 'exist' | 'empty' | 'nabla' | 'isin' | 'notin' | 'ni' | 'prod' | 'sum' | 'minus' | 'lowast' | 'radic' | 'prop' | 'infin' | 'ang' | 'and' | 'or' | 'cap' | 'cup' | 'int' | 'there4' | 'sim' | 'cong' | 'asymp' | 'ne' | 'equiv' | 'le' | 'ge' | 'sub' | 'sup' | 'nsub' | 'sube' | 'supe' | 'oplus' | 'otimes' | 'perp' | 'sdot' | 'lceil' | 'rceil' | 'lfloor' | 'rfloor' | 'lang' | 'rang' | 'loz' | 'spades' | 'clubs' | 'hearts' | 'diams'
//	;


//TODO what about doc comments? e.g. /** blah blah
//testing subversion. adding this line to mimic lexer to see it get added to line lexer.
