package no.uio.ifi.cop.parse.formula;

import java.util.Set;
import java.util.HashSet;

import no.uio.ifi.cop.cl.Axiom;
import no.uio.ifi.cop.cl.Conjunction;
import no.uio.ifi.cop.cl.RHS;
import no.uio.ifi.cop.cl.Theory;
import no.uio.ifi.cop.cl.Term;
import no.uio.ifi.cop.cl.Constant;
import no.uio.ifi.cop.cl.Variable;
import no.uio.ifi.cop.cl.Formula;
import no.uio.ifi.cop.cl.Predicate;
import no.uio.ifi.cop.cl.Substitution;
import no.uio.ifi.cop.common.Log;
import no.uio.ifi.cop.lex.RegLexer;
import no.uio.ifi.cop.lex.Token;

/**
 * Parser for Geolog like syntax, FOL axioms (implications).
 *
 * RegLexer lexes file based on regular expressions, i.e., 
 * devides input stream or file into legal tokens. This class
 * makes sure they come in a legal order according to the
 * grammer, and if so, the term structure is returned.
 *
 * @version 1.0
 * @author bjarneh@ifi.uio.no
 */

public class Parser{

    private static Parser instance;

    RegLexer lexer;

    // Q: why don't we use enums? A: they stink
    private final int  LOWERCASE   = 1;
    private final int  UPPERCASE   = 2;
    private final int  SEPARATOR   = 3; 
    private final int  STARTBRACE  = 4;
    private final int  ENDBRACE    = 5; 
    private final int  ENDPROLOG   = 6;
    private final int  COMMENT     = 7;
    private final int  WHITESPACE  = 8;

    private Parser(){
        // the lexer will give us tokens based on the grammer below
        lexer = new RegLexer();
        // regex's for the different tokens
        lexer.addToken( LOWERCASE, "[a-z][A-Za-z0-9_]*", "LOWERCASE");
        lexer.addToken( COMMENT, "%[^\n]*", "COMMENT");
        lexer.addToken( UPPERCASE, "[A-Z_][A-Za-z0-9_]*", "UPPERCASE");
        lexer.addToken( SEPARATOR, ",|;|=>", "SEPARATOR");
        lexer.addToken( STARTBRACE, "\\(", "STARTBRACE");
        lexer.addToken( ENDBRACE, "\\)", "ENDBRACE");
        lexer.addToken( ENDPROLOG, "\\.", "ENDPROLOG");
        lexer.addToken( WHITESPACE, "\\s+", "WHITESPACE");
        // never return comments or whitespace
        lexer.ignoreToken( WHITESPACE );
        lexer.ignoreToken( COMMENT );
    }

    public static Parser getParser(){
        if( instance == null ){
            instance = new Parser();
        }
        return instance;
    }

    private Token skipExpectedToken(String what){

        StringBuilder sb;

        if(lexer.empty()){
            sb = new StringBuilder();
            lexer.printValidTokens(sb);
            sb.append(" <= HERE\n\nunexpected end of tokens\n");
            Log.fatalln(sb.toString());
        }

        Token t = lexer.next();

        if( ! t.content.equals(what) ){
            sb = new StringBuilder();
            lexer.printValidTokens(sb, t.placement);
            sb.append(" <= HERE\n\nexpected: ");
            sb.append(String.format("'%s' got '%s'", what, t.content));
            Log.fatalln(sb.toString());
        }
        
        return t;
        
    }

    private Token skipExpectedType(int type){

        StringBuilder sb;

        if(lexer.empty()){
            sb = new StringBuilder();
            lexer.printValidTokens(sb);
            sb.append(" <= HERE\n\nunexpected end of tokens");
            Log.fatalln(sb.toString());
        }

        Token t = lexer.next();

        if( t.ordinal != type ){
            sb = new StringBuilder();
            lexer.printValidTokens(sb, t.placement);
            sb.append(" <= HERE\n\n[ERROR] expected type:");
            sb.append(String.format("'%s' got '%s'",t2s(type),t2s(t.ordinal)));
            Log.fatalln(sb.toString());
        }
        
        return t;
    }

    public Theory parseFile(String filepath){
        lexer.lexFile(filepath);
        return parseTheory();
    }

    public Theory parseCharSequence(CharSequence buffer){
        lexer.lexCharSequence(buffer);
        return parseTheory();
    }

    // just for testing purposes
    public void parseTestString(String buffer){
        lexer.lexCharSequence(buffer);
    }

    public Term parseTerm(){

        Token t, tmp;

        t = lexer.nextOK();

        if( t.ordinal == UPPERCASE ){
            return new Variable( t.content );
        }

        if( t.ordinal != LOWERCASE ){
            StringBuilder sb = new StringBuilder();
            lexer.printValidTokens(sb, t.placement);
            sb.append(" <= HERE\n\nunexpected type:");
            sb.append(String.format("'%s'", t2s(t.ordinal)));
            Log.fatalln(sb.toString());
        }else{

            if(! lexer.empty()){

                tmp = lexer.peek();

                if( tmp.ordinal == STARTBRACE ){

                    Predicate p = new Predicate(TermFactory.getPredicateId( t.content ));

                    skipExpectedType( STARTBRACE );
                    parseTermList( p );
                    skipExpectedType( ENDBRACE );

                    return p;

                }
            }
        }

        return TermFactory.getConstant( t.content );
    }


    protected void parseTermList(Predicate p){

        // needs to have at least one inner Term
        p.addTerm( parseTerm() );

        Token token = lexer.peekOK();

        if( token.ordinal == ENDBRACE ){
            return;
        }else if( token.content.equals(",") ){
            lexer.next(); // go past ','
            parseTermList( p );
        }else{
            lexer.next(); // go past 'whatever'
            StringBuilder sb = new StringBuilder();
            lexer.printValidTokens( sb, token.placement );
            sb.append(" <= HERE\n\nexpected ',' or ')'");
            sb.append(String.format("got:'%s'",token.content));
            Log.fatalln(sb.toString());
        }
    }

    private Predicate constant2predicate(Term t) {
        return new Predicate(TermFactory.getPredicateId( t.getId() ));
    }

    protected Conjunction parseConjunction() {

        Term tmp;
        Token token = null;

        Conjunction conjunct = new Conjunction();

        do{
            // needs to have at least one term
            tmp = parseTerm();

            if( tmp.isVariable() ){

                StringBuilder sb = new StringBuilder();
                if( token != null ){
                    lexer.printValidTokens(sb, token.placement);
                }else{
                    lexer.printValidTokens(sb);
                }
                sb.append(" <= HERE\n\n[ERROR] expected 'Predicate' got 'Variable'\n");
                Log.fatalln(sb.toString());

            }else if( tmp.isConstant() ){

                // special constants
                if( tmp.getId().equals("true")  ||
                    tmp.getId().equals("false") ||
                    tmp.getId().equals("goal")  )
                {

                    conjunct.add( tmp );
                    return conjunct;

                }else{
                    conjunct.add( constant2predicate( tmp ) );
                }

            }else{ // tmp.isPredicate()
                conjunct.add( tmp );
            }

        // parse terms as long as we hit ','
        }while( (token = lexer.peekOK()).content.equals(",") && lexer.next() != null); 


        return conjunct;

    }

    protected RHS parseRHS(){

        Conjunction conjunct;
        Token token = null;

        RHS rhs = new RHS();

        do{
            // needs to have at least one conjunct
            conjunct = parseConjunction();

            for(Term t: conjunct) {
                rhs.add(t);
            }

        // parse terms as long as we hit ';'
        }while( (token = lexer.peekOK()).content.equals(";") &&
                lexer.next() != null &&
                rhs.disjunct()); 

        return rhs;
    }

    public Axiom parseAxiom(){

        Axiom ax = new Axiom();

        ax.setLHS( parseConjunction() );
        
        skipExpectedToken("=>");
        
        ax.setRHS( parseRHS() );

        skipExpectedToken(".");
        
        sanity(ax); // don't accept open terms in fact-rules etc.

        return ax;
    }

    private void sanity(Axiom ax) {

        // 0. do not accept disjunct in RHS
        ax.classify();

        // 1. do not accept anything but true as a LHS constant
        // 2. do not accept variables in RHS (replace with constants)
        if( ax.getLHS().getType() == Formula.CONSTANT ){

            // 1.
            if(! ax.getLHS().toString().equals("true") ){
                Log.fatal("malformed: %s => %s", ax.getLHS(),ax.getRHS());
            }

            // 2.
            Set<String> hashSet = new HashSet<String>();
            RHS rhs = ax.getRHS();
            rhs.gatherVariables(hashSet);

            // free variables in fact-rule
            if( hashSet.size() > 0 ){

                // this code is also in Prover but to avoid the
                // cyclic dependency it is placed here as well...
                Constant fresh;
                Substitution subst = new Substitution();

                for(String v : hashSet){
                    fresh = TermFactory.getFreshConstant();
                    subst.add(new Variable(v), fresh);
                }
                rhs.apply( subst );
            }
        }
    }

    protected Theory parseTheory(){

        Theory theory = new Theory();

        // needs to have at least one axiom
        Axiom ax = parseAxiom();
        
        theory.add(ax);

        while( ! lexer.empty() ){
            theory.add( parseAxiom() );
        }

        return theory;
    }

    // type2string
    private String t2s(int type){
        switch(type){
            case LOWERCASE:  return "LOWERCASE";
            case UPPERCASE:  return "UPPERCASE";
            case SEPARATOR:  return "SEPARATOR";
            case STARTBRACE: return "STARTBRACE";
            case ENDBRACE:   return "ENDBRACE";
            case ENDPROLOG:  return "ENDPROLOG";
            case COMMENT:    return "COMMENT";
            case WHITESPACE: return "WHITESPACE";
            default:         return "<UNKNOWN>";
        }
    }
}
