<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<title>Insert title here</title>

<script language="javascript">

function Lexer_Context( name, startRule, endRule, rules ) 
{
	this.name = name;
	
	if (typeof(startRule) == 'function')
		this.re = String(startRule).substr(1, String(startRule).length-2);
	else
		this.re = startRule;
	
	// find capturing parenthesis by removing the escaped backslashes first...
	var reStr = this.re.replace(/\\\\/g, '');
	// and get all non-escaped parenthesis not followed by a question mark
	var result = reStr.match( /([^\\]|^)\(([^\?]|$)/g );
	if (result)
		this.parenthesis = result.length;
	else
		this.parenthesis = 0;


	if (typeof(endRule) == 'function')
		this.endRe = String(endRule).substr(1, String(endRule).length-2);
	else
		this.endRe = endRule;
	
	this.rules = rules;
}

function Lexer_Token( name, rule )
{
	this.name = name;
	
	if (typeof(rule) == 'function')
		this.re = String(rule).substr(1, String(rule).length-2);
	else
		this.re = rule;	
}

function Lexer_Keywords( name, casesensitive, special, keywords )
{
	this.name = name;
	//this.casesensitive = casesensitive;
	
	// first we sort the keywords
	keywords.sort();
	// Now we reverse the previous sort so we have the keywords properly 
	// ordered to build the regular expression
	keywords.reverse();
	
	
	// Optimize the keyword list by finding common prefixes. This could be implemented much better
	// by weighting all the possibilities, instead it currently just match the first prefix found.
	// i.e: mysql_connect, mysql_fetch_row, mysql_fetch_assoc, mysql_fetch_array
	// becomes: mysql_(connect|fetch_row|fetch_assoc|fetch_array)
	// instead of: mysql_connect|mysql_fetch_(row|assoc|array)
	// or even: mysql_(connect|fetch_(row|assoc|array))

	//TODO: Benchmark if the optimization is really somewhat faster
	
	var reStrings = [];
	var bucket = [];
	var prefix, count, size;
	var optionalSuffix;
	var escapeRe = /[\|\[\]\(\)\^\$\.\*\+\?\!\{\}\,\=\\]/g; //escapes regexp special chars
	
	this.minLength = 0;	
	
	for (var i=0; i<keywords.length; i++)
	{
		//console.log('Keyword: %s', keywords[i]);
		
		if (!this.minLength || keywords[i].length < this.minLength)
			this.minLength = keywords[i].length; 
		
		count = 1;
		prefix = 0;
		
		while ( i+count < keywords.length )
		{
			lastPrefix = prefix;
			
			// find first different character starting from the left
			prefix = Math.min( keywords[i].length, keywords[i+count].length );
			for (var j=0; j<prefix; j++)
			{
				if ( keywords[i].charAt(j) != keywords[i+count].charAt(j) )
				{
					prefix = j;
					break;
				}
			}

			//console.log('  Current: %s, Count: %d, Prefix: %d', keywords[i+count], count, prefix);
			
			if ( prefix < 3 || lastPrefix-prefix > 2 )
				break;
			else
				count++
		}

		if ( count > 2 )
		{
			optionalSuffix = false;
			bucket = keywords.slice( i, i+count );
			for (var j=0; j<bucket.length; j++)
			{
				bucket[j] = bucket[j].substr( lastPrefix ).replace( escapeRe, '\\$&' );
				if ( !bucket[j].length )
					optionalSuffix = true;
			}
			
			
			reStrings.push( keywords[i].substr(0, lastPrefix).replace( escapeRe, '\\$&' ) + '(?:' + bucket.join('|') + ')' + (optionalSuffix ? '?' : '') );
		
			//console.log('  PREFIX: %s, Keywords: %s', keywords[i].substr(0, lastPrefix), bucket);
					
			//skip already optimized items
			i += count-1;
		}
		else
		{ 
			reStrings.push( keywords[i].replace( escapeRe, '\\$&' ) );
		}
	}
	
	this.re = new RegExp( (special ? '\\'+special : '') + '(?:' + reStrings.join('|') + ')' + (special ? '\\' + special : ''), casesensitive ? 'g' : 'gi' );	
	//console.log('RE: %s', this.re);
}


function BuildLexer( definition, endRe, parent )
{
	function getObjectType( obj )
	{
		var arr = obj.constructor.toString().match( /function\s+([A-Z_]+[A-Z0-9_]*)\s*\(/i );
		if (arr[1])
			return arr[1].toLowerCase();
		else
			return false;
	}	
	
    var token;
    var tokenizer = {
    	'parent'	: parent ? parent : null,
    	're'		: null,
    	'template'	: '',
    	'tokens'	: [],
    	'keywords'	: []
    };
        
    var tokensReArr = [];
    var keywordsReArr = [];
    
    for (var i=0; i < definition.length; i++)
    {
    	token = {
    		'name'			: definition[i].name,
    		'prevContext'	: tokenizer,
    		'nextContext'	: null,
    		'parenthesis'	: 0
    	};
    	    	
    	switch ( getObjectType( definition[i] ) )
    	{
    		case 'lexer_context':
	    		token.parenthesis = definition[i].parenthesis;
				token.nextContext = BuildLexer( definition[i].rules, definition[i].endRe, token );
				tokensReArr.push( definition[i].re );
		       	tokenizer.tokens.push( token );
       		break;
       		case 'lexer_token':
       			tokensReArr.push( definition[i].re );
		       	tokenizer.tokens.push( token );       			
       		break;
       		case 'lexer_keywords':
       			token.re = definition[i].re;
       			token.weight = tokensReArr.length;
       			token.minLength = definition[i].minLength;
       			tokenizer.keywords.push( token );
       		break;
       		default:
       			alert('FATAL ERROR: The definition object seems to be corrupted');
       	}
    }
    
	if (endRe)
		tokensReArr.push( endRe );
	
	if (tokensReArr.length)
	{
		tokenizer.re = new RegExp( '(' + tokensReArr.join(')|(') + ')', 'g' );
		tokenizer.template = '(' + tokensReArr.join(')|(') + ')';
	}

	return tokenizer;
}



// define some commonly used constructs
// Note: Rules can be defined as a string or as a regexp literal WITHOUT modifiers

// match a complex-parsing-variable like ${foo}, {$foo->bar}, ${foo['bar'][2]} ...
var PHPComplexParsingVariable = new Lexer_Context( 'Variable', /(?:\$\{|\{\$)(?:[a-zA-Z_\x7f-\xff][a-zA-Z0-9_\x7f-\xff]*(?:->)?)+/, /\}/,
	[
		// match single-quoted no-multiline strings supporting \'
		new HL_Token( 'SingleQuoteString', /'[^'\\]*(?:\\.[^'\\]*)*'/ ),
	]
);

// match a parsing-variable like $foo or $foo->bar
var PHPSimpleParsingVariable = new Lexer_Token( 'Variable', /\$(?:[a-zA-Z_\x7f-\xff][a-zA-Z0-9_\x7f-\xff]*(?:->)?)+/ );


//Order matters! Keywords are ordered independently of context/tokens
var Lexer_Definition_PHP = [
	// single-line comments, either // or # style
	new Lexer_Token( 'SingleLineComment', /(?:\/\/|#).*$/ ),
	// multi-line comments
	//new HL_Context( 'MultiLineComment', /\/\*/, /\*\//, [] ),
	new Lexer_Context( 'MultiLineComment', /(\/)(\*)/, /#BACKREFERENCE2##BACKREFERENCE1#/, [] ),

	// match double-quoted-strings with support for parsing variables
	new Lexer_Context( 'DoubleQuoteString', /"/, /"/,
		[
			// match \", \$ and the like			
			new Lexer_Token( 'Backslash', /\\./ ),
			PHPComplexParsingVariable,
			PHPSimpleParsingVariable
			
		]
	),
	// Heredoc strings
	new Lexer_Context( 'HeredocString', /<<<([a-zA-Z_\x7f-\xff][a-zA-Z0-9_\x7f-\xff]*)\s*$/, /^#BACKREFERENCE1#;?$/, 
		[
			// match \$
			new Lexer_Token( 'Backslash', /\\\$/ ),
			PHPComplexParsingVariable,
			PHPSimpleParsingVariable
		]
	),
	// match single-quoted multiline strings supporting \'
	new Lexer_Context( 'SingleQuoteString', /'/, /'/,
		[
			// match \'
			new Lexer_Token( 'Backslash', /\\'/ )
		]
	),
	
	// Variables
	new Lexer_Token( 'Variable', /\$[a-zA-Z_\x7f-\xff][a-zA-Z0-9_\x7f-\xff]*/ ),
	
	// Numbers
	new Lexer_Token( 'NumberHex', /\b[+-]?0[Xx][0-9A-Fa-f]+\b/ ),
	new Lexer_Token( 'NumberFloat', /\b[+-]?[0-9]*\.+[0-9]+(?:[Ee][+-]?[0-9]*)?\b/ ),
	new Lexer_Token( 'NumberOctal', /\b[+-]?0[0-9]+\b/ ),
	new Lexer_Token( 'NumberInt', /\b[+-]?[0-9]+\b/ ), 

	// Operators
	// TODO: optimize it by placing the single-char operators in a class [\.~<>]
	new Lexer_Token( 'Operator', /~|\|\||\|\=|\||\^\=|\^|@|\?|>>\=|>>|>\=|>|\=\=\=|\=\=|\=|<\=|<<\=|<<|<|::|:|\/\=|\/|\.\=|\.|->|-\=|--|-|\+\=|\+\+|\+|\*\=|\*|&\=|&&|&|%\=|%\=|%|%|\!\=\=|\!\=|\!/ ),	
		
	// Control Structures
	new Lexer_Keywords( 'ControlStructure', false, 'b',
		[ 
			'if', 'else', 'elseif', 'endif', 'while', 'endwhile', 'do', 'for', 'endfor', 
			'foreach', 'endforeach', 'break', 'continue', 'switch', 'endswitch', 'case', 
			'default', 'as', 'declare', 'return', 'require', 'include', 'require_once', 
			'include_once'
		]
	),

	// Operators
	new Lexer_Keywords( 'Operator', false, 'b',
		[
			'new',
			'and', 'xor', 'or',
			'instanceof',
		]
	),
	
	// Keywords
	new Lexer_Keywords( 'Keyword', false, 'b',
		[
			'abstract', 'catch', 'class', 'const', 'extends', 'final', 'function', 'implements', 
			'interface', 'new', 'self', 'static', 'parent', 'private', 'protected', 'public', 
			'throw', 'try', 'var'
		]
	),
	
	// Idents
	new Lexer_Token( 'Ident', /[a-zA-Z_\x7f-\xff][a-zA-Z0-9_\x7f-\xff]*/ )	

];

var lexer = BuildLexer( Lexer_Definition_PHP );


//console.log(lexer.toSource());



/*
function parseLine( src, tknz, state )
{  
    function findKeyword( payload, keywords, tmp )
    {
    	var result = { 'unmatched': '', 'keyword': '' };
    	keywords.re.lastIndex = 0;
    	if ( (match = keywords.re.exec( payload )) !== null )
    	{
    		if (match.index > 0)
    		{
    			addToken( 'unmatched', payload.substr( 0, match.index ) );
    			console.log('Keyword_'+tmp+' unmatched text: %s', payload.substr( 0, match.index ));
    			result.unmatched = payload.substr( 0, match.index );
    		}
    		
    		addToken( keywords.name, payload.substr( match.index, match[0].length ) );
    		console.log('Keyword_'+tmp+': %s', payload.substr( match.index, match[0].length ));
    		    		
    		result.keyword = payload.substr( match.index, match[0].length );
    		return result;
    	}
    	return false;
    }    
    
    
       
    var escapeRe = /[\|\[\]\(\)\^\$\.\*\+\?\!\{\}\,\=\\]/g;
    
    var i;
    var endPos = 0;
    var pos = 0;
    
    var dinamic;
    var newContext;
    
    console.log( tknz.re.toSource() );
    
	//reset the regular expression
    tknz.re.lastIndex = 0;
    // find next token
    while ( (startMatch = tknz.re.exec( src )) !== null )
    {
    	//console.log('RE: %s - StartMatch: %s', tknz.re, startMatch);
    	dinamic = false;
    	newContext = false;
    	skipLoop = false;

        if ( endPos < startMatch.index )
        {          
            if (tknz.keywords)
            {
            	var substr = src.substr( endPos, startMatch.index );
            	for (j=0; j<tknz.keywords.length && substr.length; j++)
            	{
            		//console.log('j: ' + j);
            		if (substr.length >= tknz.keywords[j].minLength)
            		{
            			var kw = findKeyword( substr, tknz.keywords[j], 'pre' );
            			if (kw)
            			{
            				console.log('('+endPos+') UNMATCHED: %s - KEYWORD: %s', kw.unmatched, kw.keyword);
            				substr = substr.substring( kw.unmatched.length + kw.keyword.length );
            				endPos += kw.unmatched.length + kw.keyword.length;  //TODO: get overrided below!

            				skipLoop = true;
            					 
            				j = -1;  //reset the loop           				           			
            			}
            		}
            	}
            }
            
            if (endPos < startMatch.index)
            {
            	addToken( 'unmatched', src.substring( endPos, startMatch.index ) );
            	console.log( 'UNMATCHED TEXT %d: %s', endPos, src.substring( endPos, startMatch.index ) );
            }
                       
            // at least a keyword was found so we check for a new token after its position            
            if (skipLoop)
            {
                tknz.re.lastIndex = endPos;
            	continue;
            }    

        }
    
    	pos = startMatch.index;
        endPos = pos + startMatch[0].length;
                
		// loop thru the captured parenthesis
        for (i=1, tkn=0; tkn < tknz.tokens.length; i++, tkn++)
        {
        
    		// check if we matched a keyword with a general rule  		
    		if (tknz.keywords)
    		{
    			var substr = src.substring( pos );
    			for (j=0; j<tknz.keywords.length; j++)
    			{

    				if ( tkn > tknz.keywords[j].weight && substr.length >= tknz.keywords[j].minLength)
    				{
    					//console.log('tkn: %d - NAME: %d - WEIGHT: %d - MINLLENGTH: %d', tkn, tknz.keywords[j].name, tknz.keywords[j].weight, tknz.keywords[j].minLength );   					
   						
   						var kw = findKeyword( src.substring( pos ), tknz.keywords[j], 'in' );
   						if (kw)
   						{
   							console.log('('+endPos+') UNMATCHED: %s - KEYWORD: %s', kw.unmatched, kw.keyword);
   							tknz.re.lastIndex = pos + kw.unmatched.length + kw.keyword.length;
   							skipLoop = true;
   							break;
   						}
    				}
    			}
    		}
    		               	
            // check the first capture parentesis which was matched
            if (!skipLoop && startMatch[i])
            {          
                //console.log('Match %d %s: %s', tkn, tknz.tokens[tkn].name, startMatch[i]);
                
                //blocks.push( startMatch[0], tknz.tokens[tkn].name );
                addToken( tknz.tokens[tkn].name, startMatch[0] );
                
                // check if the token has an end delimiter
                if (tknz.tokens[tkn].children)
                {
				    console.log('START OF CONTEXT FOUND (' + tknz.tokens[tkn].name + ') : ' + startMatch[0]);
                    
                    newContext = true;                    
                    
                    // we have matched a dinamic context, so we need to rebuild the escape rule
                    if (tknz.tokens[tkn].parenthesis)
                    {
                    	dinamic = tknz.tokens[tkn].children.template;
                    	console.log('TEMPLATE: ' + dinamic);
                    	for (var j=1; j<=tknz.tokens[tkn].parenthesis; j++)
                    	{
                    		if (startMatch[i+j])
                       			dinamic = dinamic.replace( '#BACKREFERENCE'+j+'#', startMatch[i+j].replace( escapeRe, '\\$&' ) );
                    	}
                    }
                    
                    // point the active tokenizer to the new context
                    tknz = tknz.tokens[tkn].children;
                    // if it's dinamic then we rebuild the regular expression
                    if (dinamic)
                    	tknz.re = new RegExp( dinamic, 'g' );
                    tknz.re.lastIndex = endPos;
                    
                    //console.log('RE: ' + tknz.re);
                }
                else
                {
                	console.log('TOKEN %s: %s', tknz.tokens[tkn].name, startMatch[0]); 
                }
                
                skipLoop = true;
                //break;
            }
        	
        	if (tknz.tokens[tkn] && tknz.tokens[tkn].parenthesis)
        		i += tknz.tokens[tkn].parenthesis;
     	}
        
        // if none of the start tokens were matched, check if the end of context one was
        if (!newContext && startMatch[i])
        {
            console.log('END OF CONTEXT FOUND: %s', startMatch[0]);
            //blocks.push( new TAP_Buffer_Block( pos, state.slice(0,state.length) ) );
            
            
            
            tknz = tknz.parent;
            tknz.re.lastIndex = endPos;
            
            addToken( tknz.name, startMatch[0] );
        }
    }
    
    if ( endPos < src.length )
    {
    	//console.log( 'UNMATCHED TEXT ' + src.substring( endPos, startMatch.index ) );
            
        if (tknz.keywords)
        {
        	var substr = src.substring( endPos );
            for (j=0; j<tknz.keywords.length && substr.length; j++)
            {
            	if (substr.length >= tknz.keywords[j].minLength)
            	{
            		var kw = findKeyword( substr, tknz.keywords[j], 'post' );
            		if (kw)
            		{
            			console.log('UNMATCHED: %s - KEYWORD: %s', kw.unmatched, kw.keyword);
            			substr = substr.substring( kw.unmatched.length + kw.keyword.length );
            			endPos += kw.unmatched.length + kw.keyword.length;   
            			j = -1;  //reset the loop
            		}
            	}
            }
        }
            
        if (endPos < src.length)
        {
        	addToken( 'unmatched', src.substring( endPos ) );
        	console.log( 'UNMATCHED TEXT %d: %s', endPos, src.substring( endPos ) );
        }
       	//this.blocks.push( new TAP_Buffer_Block( endPos, state.concat( [-1] ) ) );
	}
}
*/

function addToken( token, value )
{
	console.log('ADDTOKEN %s: %s', token.name, value);
}


function parseLine( src, token )
{  
    function findKeyword( payload, keywords, tmp )
    {
    	var result = { 'unmatched': '', 'keyword': '' };
    	keywords.re.lastIndex = 0;
    	if ( (match = keywords.re.exec( payload )) !== null )
    	{
    		if (match.index > 0)
    		{
    			console.log('Keyword_'+tmp+' unmatched text: %s', payload.substr( 0, match.index ));
    			result.unmatched = payload.substr( 0, match.index );
    		}
    		
    		console.log('Keyword_'+tmp+': %s', payload.substr( match.index, match[0].length ));
    		    		
    		result.keyword = payload.substr( match.index, match[0].length );
    		return result;
    	}
    	return false;
    }    
    
    
       
    var escapeRe = /[\|\[\]\(\)\^\$\.\*\+\?\!\{\}\,\=\\]/g;
    
    var i;
    var endPos = 0;
    var pos = 0;
    
    var dinamic;
    var newContext;
    
    //console.log( tknz.context.re.toSource() );
      
	//reset the regular expression
    token.children.re.lastIndex = 0;
    // find next token
    while ( (startMatch = token.children.re.exec( src )) !== null )
    {
    	//console.log('RE: %s - StartMatch: %s', tknz.re, startMatch);
    	dinamic = false;
    	newContext = false;
    	skipLoop = false;

        if ( endPos < startMatch.index )
        {          
            if (token.children.keywords)
            {
            	var substr = src.substr( endPos, startMatch.index );
            	for (j=0; j<token.children.keywords.length && substr.length; j++)
            	{
            		//console.log('j: ' + j);
            		if (substr.length >= token.children.keywords[j].minLength)
            		{
            			var kw = findKeyword( substr, token.children.keywords[j], 'pre' );
            			if (kw)
            			{
            				console.log('('+endPos+') UNMATCHED: %s - KEYWORD: %s', kw.unmatched, kw.keyword);
   							if (kw.unmatched)
   								addToken( token, kw.unmatched );
   							if (kw.keyword)
   								addToken( token.children.keywords[j], kw.keyword);
            				
            				substr = substr.substring( kw.unmatched.length + kw.keyword.length );
            				endPos += kw.unmatched.length + kw.keyword.length;

            				skipLoop = true;
            					 
            				j = -1;  //reset the loop           				           			
            			}
            		}
            	}
            }
            
            if (endPos < startMatch.index)
            {
            	addToken( token, src.substring( endPos, startMatch.index ) );
            	console.log( 'UNMATCHED TEXT_%s_ %d: %s', token.name, endPos, src.substring( endPos, startMatch.index ) );
            	console.log( token.children.re.toSource() );
            }
                       
            // at least a keyword was found so we check for a new token after its position            
            if (skipLoop)
            {
                token.children.re.lastIndex = endPos;
            	continue;
            }    

        }
    
    	pos = startMatch.index;
        endPos = pos + startMatch[0].length;
                
		// loop thru the captured parenthesis
        for (i=1, tkn=0; tkn < token.children.tokens.length; i++, tkn++)
        {
        
    		// check if we matched a keyword with a general rule  		
    		if (token.children.keywords)
    		{
    			var substr = src.substring( pos );
    			for (j=0; j<token.children.keywords.length; j++)
    			{

    				if ( tkn > token.children.keywords[j].weight && substr.length >= token.children.keywords[j].minLength)
    				{
    					//console.log('tkn: %d - NAME: %d - WEIGHT: %d - MINLLENGTH: %d', tkn, tknz.keywords[j].name, tknz.keywords[j].weight, tknz.keywords[j].minLength );   					
   						
   						var kw = findKeyword( src.substring( pos ), token.children.keywords[j], 'in' );
   						if (kw)
   						{
   							console.log('('+endPos+') UNMATCHED: %s - KEYWORD: %s', kw.unmatched, kw.keyword);
   							if (kw.unmatched)
   								addToken( token, kw.unmatched );
   							if (kw.keyword)
   								addToken( token.children.keywords[j], kw.keyword);
   								
   							token.children.re.lastIndex = pos + kw.unmatched.length + kw.keyword.length;
   							skipLoop = true;
   							break;
   						}
    				}
    			}
    		}
    		               	
            // check the first capture parentesis which was matched
            if (!skipLoop && startMatch[i])
            {          
                //console.log('Match %d %s: %s', tkn, tknz.tokens[tkn].name, startMatch[i]);
                
                //blocks.push( startMatch[0], tknz.tokens[tkn].name );
                addToken( token.children.tokens[tkn], startMatch[0] );
                
                // check if the token has an end delimiter
                if (token.children.tokens[tkn].children)
                {
				    console.log('START OF CONTEXT FOUND (' + token.children.tokens[tkn].name + ') : ' + startMatch[0]);
                    
                    newContext = true;                    
                    
                    // we have matched a dinamic context, so we need to rebuild the escape rule
                    if (token.children.tokens[tkn].parenthesis)
                    {
                    	dinamic = token.children.tokens[tkn].children.template;
                    	console.log('TEMPLATE: ' + dinamic);
                    	for (var j=1; j<=token.children.tokens[tkn].parenthesis; j++)
                    	{
                    		if (startMatch[i+j])
                       			dinamic = dinamic.replace( '#BACKREFERENCE'+j+'#', startMatch[i+j].replace( escapeRe, '\\$&' ) );
                    	}
                    }
                    
                    // point the active tokenizer to the new context
                    //token.context = token.context.tokens[tkn].children;
                    token = token.children.tokens[tkn];
                    // if it's dinamic then we rebuild the regular expression
                    if (dinamic)
                    	token.children.re = new RegExp( dinamic, 'g' );
                    token.children.re.lastIndex = endPos;
                    
                    console.log('RE: ' + token.children.re);
                }
                else
                {
                	console.log('TOKEN %s: %s', token.children.tokens[tkn].name, startMatch[0]); 
                }
                
                skipLoop = true;
                //break;
            }
        	
        	if (token.children.tokens[tkn] && token.children.tokens[tkn].parenthesis)
        		i += token.children.tokens[tkn].parenthesis;
     	}
        
        // if none of the start tokens were matched, check if the end of context one was
        if (!newContext && startMatch[i])
        {
            console.log('END OF CONTEXT FOUND: %s', startMatch[0]);
            //blocks.push( new TAP_Buffer_Block( pos, state.slice(0,state.length) ) );
                        
            addToken( token, startMatch[0] );
            
            token = token.context.parent;
            token.children.re.lastIndex = endPos;
        }
    }
    
    if ( endPos < src.length )
    {
    	//console.log( 'UNMATCHED TEXT ' + src.substring( endPos, startMatch.index ) );
            
        if (token.children.keywords)
        {
        	var substr = src.substring( endPos );
            for (j=0; j<token.children.keywords.length && substr.length; j++)
            {
            	if (substr.length >= token.children.keywords[j].minLength)
            	{
            		var kw = findKeyword( substr, token.children.keywords[j], 'post' );
            		if (kw)
            		{
            			console.log('UNMATCHED: %s - KEYWORD: %s', kw.unmatched, kw.keyword);
   						if (kw.unmatched)
   							addToken( token, kw.unmatched );
   						if (kw.keyword)
   							addToken( token.children.keywords[j], kw.keyword);

            			substr = substr.substring( kw.unmatched.length + kw.keyword.length );
            			endPos += kw.unmatched.length + kw.keyword.length;   
            			j = -1;  //reset the loop
            		}
            	}
            }
        }
            
        if (endPos < src.length)
        {
        	addToken( token, src.substring( endPos ) );
        	console.log( 'UNMATCHED TEXT %d: %s', endPos, src.substring( endPos ) );
        }
       	//this.blocks.push( new TAP_Buffer_Block( endPos, state.concat( [-1] ) ) );
	}
}

var phpLexer = { 'name': 'php' };
lexer.parent = phpLexer;
phpLexer.children = lexer;
parseLine( '$var /* as */ function test();', phpLexer );
console.log( phpLexer.toSource() );
</script>

</head>
<body>
<div id="out"></div>
</body>
</html>