/*
 * ModelCC, distributed under ModelCC Shared Software License, www.modelcc.org
 */

package org.modelcc.lexer.lamb;

import java.io.Reader;
import java.io.Serializable;
import java.io.StringWriter;
import java.io.Writer;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;

import org.modelcc.language.lexis.LexicalSpecification;
import org.modelcc.language.lexis.TokenOption;
import org.modelcc.language.lexis.TokenSpecification;
import org.modelcc.lexer.LexicalGraph;
import org.modelcc.lexer.Token;
import org.modelcc.lexer.recognizer.MatchedObject;

/**
 * Lamb - Lexer with AMBiguity Support
 * 
 * @author Luis Quesada (lquesada@modelcc.org), partially refactored by Fernando Berzal (berzal@modelcc.org)
 */
public class Lamb implements Serializable 
{
	/** 
	 * Input string
	 */
	private String inputs;
	    
    /**
     * Token list.
     */
    private List<Token> tokens;
    
    
    /**
     * Build a token corresponding to the given token specification starting at a specific position
     * @param ts Token specification
     * @param index Input string index
     * @return Token of the desired type at the given index
     */
    private Token buildToken (TokenSpecification ts, int index) 
    {
		MatchedObject match = ts.getRecognizer().read(inputs,index);

		if (match != null) {
			int start = index;
			int end = index+match.getText().length()-1;
			String text = match.getText();    	
			Token t = new Token(ts.getType(),match.getObject(),start,end,text);

			if (ts.getBuilder().build(t))
				return t;
		}
		
		return null;
    }

    /**
     * Search state enumeration.
     */
    private enum Search {
        // Search has to be performed.
        OPEN,
        // Search has not been performed.
        CLOSED,
        // Ignored pattern found.
        IGNORE
    }


    /**
     * Perform lexical analysis.
     * @param ls the lexer specification.
     * @param input the input string.
     * @return the obtained lexical graph.
     */
    public LexicalGraph scan(LexicalSpecification ls, Reader input) 
    {
    	inputs = readString(input);
    	
    	int inputstart = 0;
    	int inputend = inputs.length()-1;
        
        List<TokenSpecification> stspecs = ls.getTokenSpecifications();
        Map<TokenSpecification,Set<TokenSpecification>> precedes = ls.getPrecedences();
        
        Search[] search = scanInput (stspecs, precedes);

        while (inputstart<inputend && search[inputstart]==Search.IGNORE)
        	inputstart++;
        while (inputend>=inputstart && search[inputend]==Search.IGNORE)
        	inputend--;

        return buildLexicalGraph(inputstart, inputend, search);
    }


    // Read input string
   
	private String readString(Reader input) 
	{
		String inputs;
		int n;
        Writer writer = new StringWriter();
        char[] buffer = new char[1024];
        try {
            while ((n = input.read(buffer)) != -1) {
                writer.write(buffer, 0, n);
            }
        } catch (Exception e) {
        }
        
        inputs = writer.toString();
		return inputs;
	}


    // Scanning phase
    // --------------

	private Search[] scanInput (List<TokenSpecification> stspecs,
			Map<TokenSpecification, Set<TokenSpecification>> precedes) 
	{
        int i,j,k;
        int start,end;         // Start and end positions of a token.
        Token t;               // Current token.
        TokenSpecification ts; // Current token specification.
        Set<TokenSpecification> pset; // Precedence set
        
        // List of elements forbidden by precedence in each position.
        Set<TokenSpecification>[] forbidden = new Set[inputs.length()+1];
        
        for (i=0; i<forbidden.length; i++)
        	forbidden[i] = new HashSet<TokenSpecification>();

        // Determine if search must be performed starting at each input string index.
		Search search[] = new Search[inputs.length()+1];

        for (i=0; i<search.length; i++)
        	search[i] = Search.CLOSED;

        search[0] = Search.OPEN;
        

        // Main loop

        tokens = new ArrayList<Token>();

        for (i=0; i<inputs.length(); i++) {
        	if (search[i] == Search.OPEN) {
        		
        		for (j=0; j<stspecs.size(); j++) {
        			ts = stspecs.get(j);
            		if (!forbidden[i].contains(ts)) { // Forbidden token specifications are omitted
        			
        				t = buildToken(ts,i);
        				if (t != null) {

        					start = t.getStartIndex();
        					end = t.getEndIndex();

        					pset = precedes.get(ts);
        					if (pset != null)
        						forbidden[i].addAll(pset);

        					if (ts.getTokenOption()==TokenOption.CONSIDER) {
        						if (t.getStartIndex()<=t.getEndIndex())
        							tokens.add(t);
        					} else { // Ignored token found
        						if (search[end+1] == Search.CLOSED)
        							search[end+1] = Search.OPEN;
        						for (k=start; k<=end; k++)
        							search[k] = Search.IGNORE;
        					}

        					if (start>end)
        						end = start;
        					if (end+1 < inputs.length())
        						if (search[end+1] == Search.CLOSED)
        							search[end+1] = Search.OPEN;
        					for (k=start; k<=end; k++) {
        						pset = precedes.get(ts);
        						if (pset != null)
        							forbidden[k].addAll(pset);
        					}
        				}
        			}
        		}
        	}
        }

        // Filter token list to discard tokens to be ignored
                	
        Iterator<Token> ite;

        for (ite = tokens.iterator();ite.hasNext();) {
			t = ite.next();
			if (search[t.getStartIndex()] == Search.IGNORE)
				ite.remove();
        }
        
        return search;
	}

    // Graph generation step
    // ---------------------
    
    private LexicalGraph buildLexicalGraph(int inputStart, int inputEnd, Search[] search) 
    {
        int i,j;
        Token ti,tj; // Tokens
        int state;   // State.
        int minend;  // Minimum end position.
        
        LexicalGraph graph = new LexicalGraph(tokens,inputStart, inputEnd);

        // Link tokens.
        
        for (i=tokens.size()-1; i>=0; i--) {
        	ti = tokens.get(i);
        	state = 0;
        	minend = inputs.length()+1;
        	for (j=i+1; j<tokens.size() && state!=2; j++) {
        		tj = tokens.get(j);
        		switch (state) {
        		case 0:
        			if (tj.getStartIndex()>ti.getEndIndex())
        				state = 1;
        			// no break!
        		case 1:
        			if (tj.getStartIndex()>ti.getEndIndex()) {
        				if (tj.getStartIndex()>minend) {
        					state = 2;
        				} else {
        					minend = Math.min(minend,tj.getEndIndex());
        					boolean consecutive = true;
        					for (int f = ti.getEndIndex()+1;f < tj.getStartIndex();f++) {
        						if (search[f]!=Search.IGNORE)
        							consecutive = false;
        					}
        					if (consecutive)
        						graph.link(ti,tj);
        				}
        			}
        		}
        	}
        }

        // Start token set
        
        for (Token t: tokens) {
        	if (graph.getPreceding(t) == null)
        		graph.addStartToken(t);
        }

        return graph;
	}
}
