/*
 *  Copyright (C) 2010 Martin Haulrich <mwh.isv@cbs.dk>
 *
 *  This file is part of the MatrixParser package.
 *
 *  The MatrixParser program is free software: you can redistribute it and/or modify
 *  it under the terms of the GNU Lesser General Public License as published by
 *  the Free Software Foundation, either version 3 of the License, or
 *  (at your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU Lesser General Public License for more details.
 *
 *  You should have received a copy of the GNU Lesser General Public License
 *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */
package org.osdtsystem.matrixparser.main;


import java.io.File;
import java.io.PrintWriter;
import java.io.Writer;
import java.util.ArrayList;
import java.util.List;
import org.osdtsystem.matrixparser.data.CONLLIterator;
import org.osdtsystem.matrixparser.data.CONLLSentence;
import org.osdtsystem.matrixparser.featureextraction.AbstractFeatureExtractor;
import org.osdtsystem.matrixparser.parsers.output.DependencyTree;
import org.osdtsystem.matrixparser.featureextraction.HigherOrderExtractor;
import org.osdtsystem.matrixparser.featureextraction.FeatureExtractor;
import org.osdtsystem.matrixparser.featureextraction.FeatureExtractorRegistry;
import org.osdtsystem.matrixparser.featureextraction.FirstOrderExtractorLabelled;
import org.osdtsystem.matrixparser.featureextraction.FirstOrderExtractorUnlabelled;
import org.osdtsystem.matrixparser.featureextraction.UnionExtractor;
import org.osdtsystem.matrixparser.features.DenseFeatureVector;
import org.osdtsystem.matrixparser.features.FeatureVector;
import org.osdtsystem.matrixparser.learners.Learner;
import org.osdtsystem.matrixparser.learners.LinearScorer;
import org.osdtsystem.matrixparser.learners.MIRALearner;
import org.osdtsystem.matrixparser.parsers.ParsingModel;
import org.osdtsystem.matrixparser.learners.Scorer;
import org.osdtsystem.matrixparser.features.WeightVector;
import org.osdtsystem.matrixparser.logging.DebugData;
import org.osdtsystem.matrixparser.logging.Log;
import org.osdtsystem.matrixparser.parsers.AbstractParser;
import org.osdtsystem.matrixparser.parsers.IncrementalLabMSTParser;
import org.osdtsystem.matrixparser.parsers.LabelledMSTParser;
import org.osdtsystem.matrixparser.parsers.Parser;
import org.osdtsystem.matrixparser.parsers.Trainer;
import org.osdtsystem.matrixparser.parsers.loss.LabelledLoss;
import org.osdtsystem.matrixparser.parsers.loss.Loss;

/**
 *
 * @author Martin Haulrich and Matthias Buch-Kromann
 */
public class MstTester {
    final static boolean computeLoss = false;
    
    // Scorer
    static Scorer scorer;
    static Loss lossFunction;

    // Feature extractors
    static FirstOrderExtractorUnlabelled unlabelledMstExtractor;
    static FirstOrderExtractorLabelled labelledMstExtractor;
    static HigherOrderExtractor cfExtractor;

    // Parsers
    static LabelledMSTParser mstParser;
    static IncrementalLabMSTParser incParser;

    // Learner
    static Learner learner;

    // Trainer
    static Trainer trainer;

    // Parsing model
    static ParsingModel model = null;

    public static void main(String[] args) throws Exception {
        run(args);
    }

    static void run(String[] args) throws Exception {
        // Print tester
        Log.config("Running MstTester");

        // Parse options
        Options.parseArgs(args);
        
        // Create parser
        File mstModelFile = Options.fileOption("model.mst");
        File irpModelFile = Options.fileOption("model.irp");
        int iterations1 = Options.intOption("train.iterations1");
        int iterations2 = Options.intOption("train.iterations2");

        if (irpModelFile != null && irpModelFile.canRead()) {
            model = ParsingModel.load(irpModelFile);
        } else if (mstModelFile != null && mstModelFile.canRead()) {
            model = ParsingModel.load(mstModelFile);
            model.fixWeights();
        } else {
            model = new ParsingModel();
        }
        DebugData.model = model;
        
        // Scorer
        scorer = new LinearScorer();
        lossFunction = new LabelledLoss();

        // Feature extractors
        UnionExtractor extractor = new FeatureExtractorRegistry(model,
                Options.option("extractors")).unionExtractor();
        unlabelledMstExtractor = extractor;
        labelledMstExtractor = extractor;
        cfExtractor = extractor;

        // Parsers
        mstParser = new LabelledMSTParser("mst", scorer, unlabelledMstExtractor, labelledMstExtractor, model.labelHandler());
        incParser = new IncrementalLabMSTParser("irp", scorer, cfExtractor, unlabelledMstExtractor, labelledMstExtractor, model.labelHandler());

        // Learner and trainer
        learner = new MIRALearner(model);
        trainer = new Trainer(learner, mstParser, lossFunction, incParser);

        //Log.chandler.setLevel(Options.levelOption("train.loglevel.console"));
        //Log.phandler.setLevel(Options.levelOption("train.loglevel.html"));

        // Start parser logging
        //Log.phandler.openMain();

        // Set parameters from options
        lossFunction.setLossForIncorrectLabel(Options.doubleOption("loss.labelweight"));

        // Set genes for feature extractors
        String genes = Options.option("genes");
        FeatureExtractor.deadGenes.or(Options.bitsetOption("genes"));

        // Read complement file and notify parsers
        String complementFile = Options.option("complements.file");
        if (complementFile != null) {
            int lastComplementId = AbstractParser.readComplements(complementFile, model.labelHandler());
            mstParser.setLastComplementID(lastComplementId);
            incParser.setLastComplementID(lastComplementId);
        }

        // CONLL training and evaluation file names, and iterators
        String conllTrainingFilename = Options.option("train.file");
        String conllEvaluationFilename = Options.option("eval.file");
        CONLLIterator trainIterator = new CONLLIterator(conllTrainingFilename);

        // Compute memory usage
        System.gc();
        long heapSize = Runtime.getRuntime().totalMemory();

        // Grow alphabet for the gold features of each sentence
        Log.info("Growing alphabet...");
        long a = System.currentTimeMillis();
        int sent = 0;
        
        DenseFeatureVector mstFeatures = (iterations1 > 0 && iterations2 > 0)
                ? new DenseFeatureVector() : null;
        while(trainIterator.hasNext()) {
            CONLLSentence sentence = trainIterator.next();
            for (int i = 1; i < sentence.size(); ++i)
                model.labelHandler().getFeature(sentence.get(i).deprel());
            DependencyTree goldTree = incParser.goldTree(sentence);
            FeatureVector fv = incParser.featureVector(goldTree);
            if (mstFeatures != null)
                mstFeatures.add(mstParser.featureVector(goldTree));
            ++sent;
        }
        trainer.setSentences(sent);
        trainIterator.reset();
        model.featureHandler().stopGrowth();
        model.labelHandler().stopGrowth();
        CONLLIterator.stopGrowth();
        long b = System.currentTimeMillis();
        long t = (b - a) / 1000;
        System.gc();
        Log.info("   " + sent + " sentences [" + model.featureHandler().alphabetSize() + " features, " + t + " seconds" +
                    ", "+ ((int) ((Runtime.getRuntime().totalMemory()
                - Runtime.getRuntime().freeMemory()) / 1024 / 1024)) + " MB heap]");


        // Print out gene use
        Log.config("Dead genes: " + Options.bitString(FeatureExtractor.deadGenes)
                + " " + FeatureExtractor.deadGenes.toString()
                + " inverted: " + AbstractFeatureExtractor.invertBitString(FeatureExtractor.deadGenes).toString());
        //Log.info("Active genes: " + cfExtractor.geneUseActual());
        //Log.info("Multi-genes: " + cfExtractor.genesMulti());
        Log.info(unlabelledMstExtractor.geneUse());
        Log.info(labelledMstExtractor.geneUse());
        Log.info(cfExtractor.geneUse());

        // Train weights and block new features after training has ended
        trainer.train(trainIterator,
                Options.intOption("train.iterations1"),
                Options.intOption("train.iterations2"));
        WeightVector weightVector = model.weights();
        System.gc();
        Log.info("Heap usage: " + ((int) ((Runtime.getRuntime().totalMemory()
                - Runtime.getRuntime().freeMemory()) / 1024 / 1024)) + " MB");

        // Parse the evaluation file
        Log.chandler.setLevel(Options.levelOption("parse.loglevel.console"));
        Log.phandler.setLevel(Options.levelOption("parse.loglevel.html"));
        a = System.currentTimeMillis();
        Log.info("Parsing...");
        List<String> evalFiles = new ArrayList<String>();
        List<Parser> evalParsers = new ArrayList<Parser>();
        if (Options.option("eval.output1") != null) {
            evalParsers.add(mstParser);
            evalFiles.add(Options.option("eval.output1"));
        }
        if (Options.option("eval.output2") != null) {
            evalParsers.add(incParser);
            evalFiles.add(Options.option("eval.output2"));
        }
        trainIterator.reset();
        CONLLIterator[] evalIterators = {trainIterator,
            new CONLLIterator(conllEvaluationFilename)};
        if (! Options.booleanOption("eval.traincorpus")) {
            evalIterators[0] = null;
        }
        for (CONLLIterator evalIterator : evalIterators) {
            if (evalIterator == null)
                continue;
            boolean trainCorpus = evalIterator == trainIterator;
            for (int p = 0; p < evalParsers.size(); ++p) {
                // Open output file
                Parser parser = evalParsers.get(p);
                String outputFile = evalFiles.get(p);
                Writer writer = new PrintWriter(outputFile, "UTF-8");
                Writer writerDTAG = new PrintWriter(outputFile + ".dtag", "UTF-8");

                // Print logging information
                Log.phandler.openIteration("Parse " + (trainCorpus ? "training corpus" : "test corpus")
                        + " with " + parser.getClass().getSimpleName());
                Log.config("Parse " + (trainCorpus ? "training corpus" : "test corpus") 
                        + " with " + parser.getClass().getSimpleName());

                // Process sentences
                double totalLoss = 0;
                int words = 0;
                int i = 0;
                while (evalIterator.hasNext()) {
                    // Print status
                    if (i % 100 == 0)
                        Log.infoW(".");

                    // Read sentence
                    CONLLSentence sentence = evalIterator.next();
                    WeightVector weights = learner.getWeightVector();
                    //System.err.println("weights[7]: " + learner.getWeightVector().checksum64());

                    words += sentence.size() - 1;
                    Log.phandler.openSentence();
                    Log.phandler.setSentenceName(sentence.toTokenString());

                    // Parse sentence and write it to conll file
                    DependencyTree parse = parser.parse(sentence, weights);
                    if (! trainCorpus) {
                        parse.writeAsCONLL(writer);
                        parse.writeAsDTAG(writerDTAG);
                    }

                    // Try to compute loss wrt. gold tree
                    if (computeLoss) {
                        DependencyTree gold = parser.goldTree(sentence);
                        if (Log.debugParserActions) {
                            System.err.println("gold:   " + gold.toString());
                            System.err.println("system: " + parse.toString());
                        }

                        totalLoss += lossFunction.loss(gold, parse);
                    }
                    i++;
                    Log.phandler.closeSentence();
                }
                evalIterator.reset();
                b = System.currentTimeMillis();
                t = (b - a) / 1000;
                System.gc();
                Log.info("   finished parsing[" + (p + 1) + "]: " + " [" + t + " seconds,  "
                            + totalLoss / words + " avg.loss/word, "
                            + ((int) ((Runtime.getRuntime().totalMemory()
                                - Runtime.getRuntime().freeMemory()) / 1024 / 1024)) + " MB heap]");

                // Close writer
                writer.close();
                writerDTAG.close();
                Log.phandler.closeIteration();
                evalIterator.reset();
            }
            evalIterator.close();
        }

        
        Log.phandler.closeMain();
    }
}

