/*
 * Parser.java
 *
 * Created on August 3, 2006, 4:44 PM
 *
 */

package galronnlp.perceptron;

import galronnlp.io.XMLGrammarReader;
import galronnlp.util.ProductionGenerator;
import galronnlp.util.SortedLinkedList;
import galronnlp.util.Symbol;
import galronnlp.util.SymbolGenerator;
import galronnlp.util.Tuple;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import org.xml.sax.XMLReader;

/**
 * This is an implementation of the incremental beam-search parser described in
 * the Collins & Roark 2004 paper.
 *
 *
 * @author Daniel A. Galron
 */
public class Parser {
    
    Grammar g;
    SymbolGenerator sg;
    ProductionGenerator pg;
    ParserStack stack;
    FeatureVector parameters;
    double gamma;
    
    boolean DEBUG = false;
    boolean D2 = false;
    
    /** Creates a new instance of Parser */
    public Parser() {
        pg = new ProductionGenerator();
        sg = new SymbolGenerator();
        g = new Grammar(pg, sg);
    }
    
    /**
     * Create a new instance of parser
     *
     * @param g The grammar for the parser to use
     * @param gamma The value for the beam to use.
     */
    public Parser(Grammar g, double gamma) {
        this.g = g;
        this.pg = g.prodGen;
        this.sg = g.symGen;
        this.gamma = gamma;
    }
    
    public static void main(String[] args) {
        Parser parser = new Parser();
        XMLGrammarReader reader = new XMLGrammarReader(args[0], parser.pg, parser.sg);
        parser.g = reader.g;
        LinkedList<String> is = new LinkedList<String>();
        try {
            BufferedReader filereader = new BufferedReader(new FileReader(new File(args[1])));
            String[] sent = filereader.readLine().split(" ");
            for(int i = 0; i < sent.length; i++) {
                is.addLast(sent[i]);
            }
        } catch(IOException e) {
            e.printStackTrace();
        }
        System.out.println(parser.parse(is, 0));
        
    }
    
    public Node earlyUpdateParse(LinkedList<String> sentence, int sentNo, Node goldStandard) {
        if(DEBUG || D2)
            System.out.println(sentence);
        // Get the initial chains - all chains going from S! to the first word of the sentence
        LinkedList<Node> initialChains = g.getChains(sg.create("S!"), sg.create(sentence.getFirst()), sg.create(sentence.get(1)), g.lexicon.get(sg.create(sentence.get(1))));
        SortedLinkedList<Node> candidateHypotheses = new SortedLinkedList<Node>();
        // Initialize the stack parser with the given parameters, gamma, with a null hypothesis list
        stack = new ParserStack(parameters, gamma, null, D2, true);
        if(DEBUG) {
            System.out.println("INITIALIZING");
            System.out.println("================================================");
        }
        // Push each chain on the stack
        for(Iterator<Node> it = initialChains.iterator(); it.hasNext();) {
            Node n = it.next();
            //n.fireFeatures(g.rules);   // This is unneccesary since the features get
                                         // fired when the chains are created
            if(DEBUG)
                System.out.println(n);
            stack.push(n);
        }
        if(DEBUG) {
            System.out.println("INITIAL STACK SIZE: " + stack.size());
            System.out.println("================================================");
        }
        // Iterate through all remaining words in the sentence
        String word = null;
        ParserStack lastStep = null;
        for(int i = 1; i < sentence.size(); i++) {
            word = sentence.get(i);
            if(DEBUG) {
                System.out.println("================================================");
                System.out.println("CONSIDERING CHAINS FOR WORD,    " + word);
            }
            LinkedList<Node> chains = new LinkedList<Node>();
            // Retrieve all chains whose tail is the word, if the word isn't the last word in the sentence
            if(i + 1 != sentence.size())
                chains = g.getChains(sg.create(word), sg.create(sentence.get(i+1)), g.lexicon.get(sg.create(sentence.get(i+1))));
            // if it is the last word in the sentence, then get all chains, except use null as the lookahead token and POS
            else
                chains = g.getChains(sg.create(word), null, null);
            if(DEBUG) {
                System.out.println(chains.size() + " CHAINS FOUND");
                System.out.println("-------------------------------------------");
                for(Iterator<Node> chain = chains.iterator(); chain.hasNext();) {
                    System.out.println(chain.next());
                    System.out.println("-------------");
                }
            }
            // Copy the stack to lastStep, so that if nothing ends up getting pushed onto the stack
            // we can back up and return a partial parse. This is needed for the parameter estimation
            // process, since initially the parameter vector is set to 0, and viable complete parses
            // may be filtered out mid-way through the parsing process.
            lastStep = new ParserStack(stack);
            // initialize the list of enumerated candidate hypotheses
            candidateHypotheses = new SortedLinkedList<Node>();
            // Go through each element on the stack, and pop it.
            while(stack.size() > 0) {
                Node leftSubtree = (Node)stack.pop();
                // Go through all possible chains
                Node chain = null;
                SortedLinkedList<Node> list = null;
                for(Iterator<Node> it = chains.iterator(); it.hasNext();) {
                    chain = it.next();
                    if(DEBUG) {
                        System.out.println("CONSIDERING LEFT SUBTREE: ");
                        System.out.println(leftSubtree);
                        System.out.println("ATTACHING CHAIN: ");
                        System.out.println(chain);
                    }
                    // attach all the possible chains to the candidate popped from
                    // the stack
                    list = attachAll(leftSubtree, chain);
                    
                    // <<<< DEBUG CODE
                    
                    /*System.err.println(list);
                    System.err.println(candidateHypotheses);
                    System.out.println("MERGING: --------------------------------");
                    System.out.print("[");
                    if(candidateHypotheses.size() == 0)
                        System.out.println("]");
                    else {
                        System.out.print(candidateHypotheses.getFirst().score());
                        for(int pc = 1; pc < candidateHypotheses.size(); pc++)
                            System.out.print(", " + candidateHypotheses.get(pc).score());
                        System.out.println("]");
                    }
                    System.out.println("INTO");
                    System.out.print("[");
                    if(list.size() == 0)
                        System.out.println("]");
                    else {
                        System.out.print(list.getFirst().score());
                        for(int pc = 1; pc < list.size(); pc++)
                            System.out.print(", " + list.get(pc).score());
                        System.out.println("]");
                    }*/
                    // >>>> END DEBUG CODE
                    
                    // merge all resulting possible partial parses onto the candidate Hypotheses list
                    candidateHypotheses.merge(list);
                    if(DEBUG) {
                        System.out.println("YIELDED " + list.size() + " ATTACHMENT POSSIBILITIES");
                        System.out.println("CANDIDATE HYPOTHESES SIZE: " + candidateHypotheses.size());
                    }
                }
            }
            // Reset the stack with the new list of candidate Hypotheses
            stack = new ParserStack(parameters, gamma, candidateHypotheses, D2, true);
            // Iterate through the candidate hypotheses backwards (i.e. in descending
            // order of score
            Node p = null;
            for(int j = candidateHypotheses.size() - 1; j >= 0; j--) {
                p = candidateHypotheses.get(j);
                // push the hypothesis on the stack. Note that the push method 
                // returns true if it was pushed onto the stack, and false if
                // it was filtered out.
                if(stack.push(p)) {
                    if(DEBUG) {
                        System.out.println("PUSHED ONTO STACK: ");
                        System.out.println(p);
                    }
                } else {
                    if(DEBUG) {
                        System.out.println("FILTERED OUT: ");
                        System.out.println(p);
                    }
                }
            }
            // This is the early update part: test to see if the prefix of the tree
            // is contained in the stack. if not, then pop the highest scoring parse
            // and return it.
            if(!stack.contains(goldStandard.getPrefix(i + 1))) {
                System.err.println("Early Update on sentence " + sentNo);
                System.exit(2);
                return (Node)stack.getHighest();
            }
            if(DEBUG || D2) {
                System.out.println("+-------------------------------------------------------------");
                System.out.println("|");
                System.out.println("|");
                System.out.println("|   THERE ARE " + stack.size() + " PARTIAL PARSES ON THE STACK");
                System.out.println("|   " + stack.printScores());
                System.out.println("|");
                System.out.println("|");
                System.out.println("+-------------------------------------------------------------");
            }
            // In case the stack is empty, then return the highest scoring partial parse
            // from the last step in the parser.
            if(stack.size() == 0) {
                System.err.println("SENTENCE: " + sentNo + " FULL PARSE NOT FOUND: RETURNING PARTIAL PARSE");
                return (Node)lastStep.getHighest();
            }
        }
        if(DEBUG)
            System.out.println("THERE ARE " + stack.size() + " POSSIBLE PARSES");
        return (Node)stack.getHighest();
    }
    
    /**
     * The implementation of the parse functions described in the paper.
     *
     * @param sentence The sentence to parse (as a <code>LinkedList</code> of <code>String</code>)
     * @param sentNo The sentence number to parse
     */
    public Node parse(LinkedList<String> sentence, int sentNo) {
        if(DEBUG || D2)
            System.out.println(sentence);
        // Get the initial chains - all chains going from S! to the first word of the sentence
        LinkedList<Node> initialChains = g.getChains(sg.create("S!"), sg.create(sentence.getFirst()), sg.create(sentence.get(1)), g.lexicon.get(sg.create(sentence.get(1))));
        SortedLinkedList<Node> candidateHypotheses = new SortedLinkedList<Node>();
        // Initialize the stack parser with the given parameters, gamma, with a null hypothesis list
        stack = new ParserStack(parameters, gamma, null, D2, true);
        if(DEBUG) {
            System.out.println("INITIALIZING");
            System.out.println("================================================");
        }
        // Push each chain on the stack
        for(Iterator<Node> it = initialChains.iterator(); it.hasNext();) {
            Node n = it.next();
            //n.fireFeatures(g.rules);   // This is unneccesary since the features get
                                         // fired when the chains are created
            if(DEBUG)
                System.out.println(n);
            stack.push(n);
        }
        if(DEBUG) {
            System.out.println("INITIAL STACK SIZE: " + stack.size());
            System.out.println("================================================");
        }
        // Iterate through all remaining words in the sentence
        LinkedList<Node> chains = null;
        for(int i = 1; i < sentence.size(); i++) {
            String word = sentence.get(i);
            if(DEBUG) {
                System.out.println("================================================");
                System.out.println("CONSIDERING CHAINS FOR WORD,    " + word);
            }
            chains = new LinkedList<Node>();
            // Retrieve all chains whose tail is the word, if the word isn't the last word in the sentence
            if(i + 1 != sentence.size())
                chains = g.getChains(sg.create(word), sg.create(sentence.get(i+1)), g.lexicon.get(sg.create(sentence.get(i+1))));
            // if it is the last word in the sentence, then get all chains, except use null as the lookahead token and POS
            else
                chains = g.getChains(sg.create(word), null, null);
            if(DEBUG) {
                System.out.println(chains.size() + " CHAINS FOUND");
                System.out.println("-------------------------------------------");
                for(Iterator<Node> chain = chains.iterator(); chain.hasNext();) {
                    System.out.println(chain.next());
                    System.out.println("-------------");
                }
            }
            // Copy the stack to lastStep, so that if nothing ends up getting pushed onto the stack
            // we can back up and return a partial parse. This is needed for the parameter estimation
            // process, since initially the parameter vector is set to 0, and viable complete parses
            // may be filtered out mid-way through the parsing process.
            ParserStack lastStep = new ParserStack(stack);
            // initialize the list of enumerated candidate hypotheses
            candidateHypotheses = new SortedLinkedList<Node>();
            // Go through each element on the stack, and pop it.
            Node leftSubtree = null, chain = null;
            while(stack.size() > 0) {
                leftSubtree = (Node)stack.pop();
                // Go through all possible chains
                for(Iterator<Node> it = chains.iterator(); it.hasNext();) {
                    chain = it.next();
                    if(DEBUG) {
                        System.out.println("CONSIDERING LEFT SUBTREE: ");
                        System.out.println(leftSubtree);
                        System.out.println("ATTACHING CHAIN: ");
                        System.out.println(chain);
                    }
                    // attach all the possible chains to the candidate popped from
                    // the stack
                    SortedLinkedList<Node> list = attachAll(leftSubtree, chain);
                    //System.err.println(list);
                    //System.err.println(candidateHypotheses);
                    /*System.out.println("MERGING: --------------------------------");
                    System.out.print("[");
                    if(candidateHypotheses.size() == 0)
                        System.out.println("]");
                    else {
                        System.out.print(candidateHypotheses.getFirst().score());
                        for(int pc = 1; pc < candidateHypotheses.size(); pc++)
                            System.out.print(", " + candidateHypotheses.get(pc).score());
                        System.out.println("]");
                    }
                    System.out.println("INTO");
                    System.out.print("[");
                    if(list.size() == 0)
                        System.out.println("]");
                    else {
                        System.out.print(list.getFirst().score());
                        for(int pc = 1; pc < list.size(); pc++)
                            System.out.print(", " + list.get(pc).score());
                        System.out.println("]");
                    }*/
                    // merge all resulting possible partial parses onto the candidate Hypotheses list
                    candidateHypotheses.merge(list);
                    if(DEBUG) {
                        System.out.println("YIELDED " + list.size() + " ATTACHMENT POSSIBILITIES");
                        System.out.println("CANDIDATE HYPOTHESES SIZE: " + candidateHypotheses.size());
                    }
                }
            }
            // Reset the stack with the new list of candidate Hypotheses
            stack = new ParserStack(parameters, gamma, candidateHypotheses, D2, true);
            // Iterate through the candidate hypotheses backwards (i.e. in descending
            // order of score
            Node p = null;
            for(int j = candidateHypotheses.size() - 1; j >= 0; j--) {
                p = candidateHypotheses.get(j);
                // push the hypothesis on the stack. Note that the push method 
                // returns true if it was pushed onto the stack, and false if
                // it was filtered out.
                if(stack.push(p)) {
                    if(DEBUG) {
                        System.out.println("PUSHED ONTO STACK: ");
                        System.out.println(p);
                    }
                } else {
                    if(DEBUG) {
                        System.out.println("FILTERED OUT: ");
                        System.out.println(p);
                    }
                }
            }
            if(DEBUG || D2) {
                System.out.println("+-------------------------------------------------------------");
                System.out.println("|");
                System.out.println("|");
                System.out.println("|   THERE ARE " + stack.size() + " PARTIAL PARSES ON THE STACK");
                System.out.println("|   " + stack.printScores());
                System.out.println("|");
                System.out.println("|");
                System.out.println("+-------------------------------------------------------------");
            }
            // In case the stack is empty, then return the highest scoring partial parse
            // from the last step in the parser.
            if(stack.size() == 0) {
                System.err.println("SENTENCE: " + sentNo + " FULL PARSE NOT FOUND: RETURNING PARTIAL PARSE");
                return (Node)lastStep.getHighest();
            }
        }
        if(DEBUG)
            System.out.println("THERE ARE " + stack.size() + " POSSIBLE PARSES");
        return (Node)stack.getHighest();
    }
    
    /**
     * Creates all possible trees derived from attaching newChain to leftSubtree
     */
    public SortedLinkedList<Node> attachAll(Node leftSubtree, Node newChain) {
        SortedLinkedList<Node> ret = new SortedLinkedList<Node>();

        // Get the set of allowable triples for the head of the chain to attach
        HashSet<Tuple> allowables = g.allowableTriples.get(newChain.label());
        // If there are allowable triples for the head of the chain
        if(allowables != null) {
            // copy the leftSubtree 
            Node leftSubCopy = leftSubtree.copy();
            // initialize a counter
            int nodeDepthCounter = 0;
            // iterate through all values of the right depth of the leftSubtree
            for(int i = 0; i < leftSubtree.rightDepth() - 1; i++) {
                // Get a pointer to the right-most node at level i
                Node nodePointer = leftSubCopy.getRightMostNode(i);
                // Create a tuple to represent the allowable triple
                Tuple tup = new Tuple(2);
                tup.put(0, nodePointer.label());
                tup.put(1, nodePointer.getChildren().getLast().label());
                // If the node pointed at by nodePoitner is a viable attachment point
                if(allowables.contains(tup)) {
                    // make a copy of newChain, attach it, and fire the features on
                    // the new tree
                    nodePointer.addChild(newChain.copy());
                    leftSubCopy.fireFeatures(g.rules);
                    // add it to the return list
                    // CHANGED!!!
                    ret.insort(leftSubCopy);
                    leftSubCopy = leftSubtree.copy();
                }
            }
        }
        return ret;
    }
}
