/*
 * Copyright 2012 Subhabrata Ghosh <subho.ghosh at langene.net>.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */



package net.langene.nlp.stanford;

//~--- non-JDK imports --------------------------------------------------------

import edu.stanford.nlp.ling.CoreAnnotations.PartOfSpeechAnnotation;
import edu.stanford.nlp.ling.CoreAnnotations.SentencesAnnotation;
import edu.stanford.nlp.ling.CoreAnnotations.TextAnnotation;
import edu.stanford.nlp.ling.CoreAnnotations.TokensAnnotation;
import edu.stanford.nlp.ling.CoreLabel;
import edu.stanford.nlp.pipeline.Annotation;
import edu.stanford.nlp.pipeline.StanfordCoreNLP;
import edu.stanford.nlp.util.CoreMap;

import net.langene.nlp.Context;
import net.langene.nlp.pg.PGDBHelper;
import net.langene.nlp.pg.PGItem;

import org.apache.commons.configuration.ConfigurationException;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

//~--- JDK imports ------------------------------------------------------------

import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileReader;

import java.util.List;
import java.util.Properties;

//~--- classes ----------------------------------------------------------------

/**
 *
 * @author Subhabrata Ghosh <subho.ghosh at langene.net>
 */
public class StanfordParser {
    private static final Logger	_logger = LoggerFactory.getLogger(StanfordParser.class);

    //~--- fields -------------------------------------------------------------

    private String		outdir   = null;
    private String		pgtxtdir = null;
    private StanfordCoreNLP	pipeline = null;

    //~--- methods ------------------------------------------------------------

    /**
     * Setup the parser environment.
     * All initialization tasks are performed here.
     *
     * @throws Exception
     */
    public void setup() throws Exception {
        outdir = Context.get().getConfig().getString("net.langene.core.work.directory");

        if ((outdir == null) || outdir.isEmpty()) {
            throw new ConfigurationException(
                "Invalid Configuration : Missing parameter net.langene.core.work.directory.");
        }

        pgtxtdir = Context.get().getConfig().getString("net.langene.pg.outdir");
        pgtxtdir = pgtxtdir + "/text/";

        if ((pgtxtdir == null) || pgtxtdir.isEmpty()) {
            throw new ConfigurationException(
                "Invalid Configuration : Missing or empty parameter [net.langene.pg.outdir]");
        }

        _logger.info("Text files will be read from [" + pgtxtdir + "]");

        List<String>	listan = Context.get().getConfig().getList("net.langene.stanford.annotators");

        if ((listan == null) || listan.isEmpty()) {
            throw new ConfigurationException(
                "Invalid Configuration : Missing parameter net.langene.stanford.annotators.");
        }

        String	annotators = "";

        for (String an : listan) {
            if (!annotators.isEmpty()) {
                annotators = annotators + ", ";
            }

            annotators = annotators + an;
        }

        Properties	props = new Properties();

        props.put("annotators", annotators);
        pipeline = new StanfordCoreNLP(props);
    }

    /**
     * Parse all the text files in/under the specified directory.
     *
     * @param directory Directory containing input text files.
     * @throws Exception
     */
    public void parseDirectory(String directory) throws Exception {
        File		di       = new File(directory);
        String[]	contents = di.list();

        for (String part : contents) {
            File	sdi = new File(di.getAbsoluteFile() + "/" + part);

            if (sdi.isDirectory()) {
                parseDirectory(sdi.getAbsolutePath());
            } else {
                if (part.endsWith(".txt")) {
                    _logger.debug("Parsing text input [" + sdi.getAbsolutePath() + "]...");
                    parseFile(sdi.getAbsolutePath());
                }
            }
        }
    }

    /**
     * Load the unparsed records from the Db and parse the corresponding
     * test input files.
     *
     * @throws Exception
     */
    public void parse() throws Exception {
        List<PGItem>	items = PGDBHelper.getItemUnparsedRecords();

        if ((items != null) && (items.size() > 0)) {
            for (PGItem item : items) {
                String	filename = pgtxtdir + "/" + item.Id + ".txt";
                File	fi       = new File(filename);

                if (!fi.exists()) {
                    _logger.error("Input text [" + fi.getAbsoluteFile() + "] not found...");
                    item.ParsedFile = null;
                    item.Processed  = false;
                    PGDBHelper.save(item);

                    continue;
                }

                item.ParsedFile = parseFile(filename);
                PGDBHelper.save(item);
            }
        }
    }

    /**
     * Parse the specified file.
     *
     * @param filename File to Parse.
     * @return Parser Output file.
     *
     * @throws Exception
     */
    public String parseFile(String filename) throws Exception {
        StringBuilder	buffer = new StringBuilder();
        BufferedReader	br     = new BufferedReader(new FileReader(filename));

        while (true) {
            String	line = br.readLine();

            if (line == null) {
                break;
            }

            buffer.append(line).append("\n");
        }

        // create an empty Annotation just with the given text
        Annotation	document = new Annotation(buffer.toString());

        // run all Annotators on this text
        pipeline.annotate(document);

        File	fi     = new File(filename);
        String	ofname = fi.getName();
        int	lindx  = ofname.lastIndexOf('.');

        ofname = ofname.substring(0, lindx);

        File			ofi = new File(outdir + "/" + ofname + ".stanford.txt");
        FileOutputStream	fos = new FileOutputStream(ofi);

        // these are all the sentences in this document
        // a CoreMap is essentially a Map that uses class objects as keys and has values with custom types
        List<CoreMap>	sentences = document.get(SentencesAnnotation.class);

        for (CoreMap sentence : sentences) {
            StringBuilder	sentbuf = new StringBuilder();
            boolean		start   = true;

            // traversing the words in the current sentence
            // a CoreLabel is a CoreMap with additional token-specific methods
            for (CoreLabel token : sentence.get(TokensAnnotation.class)) {

                // this is the text of the token
                String	word = token.get(TextAnnotation.class);

                // this is the POS tag of the token
                String	pos = token.get(PartOfSpeechAnnotation.class);

                if (start) {
                    start = false;
                } else {
                    sentbuf.append(" ");
                }

                sentbuf.append(word).append("/").append(pos);
            }

            sentbuf.append("\n");
            fos.write(sentbuf.toString().getBytes());
        }

        fos.flush();
        fos.close();

        return ofi.getAbsolutePath();
    }
}


//~ Formatted by Jindent --- http://www.jindent.com
