package telex;
/*
 * QueryExtractor.java
 *
 * Copyright (c) 2007-2008 Colin Bayer, Douglas Downey, Oren Etzioni,
 *   University of Washington Computer Science and Engineering
 * 
 * See the file "COPYING" in the root directory of this distribution for
 * full licensing information.
 */

import java.io.*;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.Set;

import com.aliasi.chunk.Chunk;
import com.aliasi.chunk.Chunking;
import com.aliasi.sentences.IndoEuropeanSentenceModel;
import com.aliasi.sentences.MedlineSentenceModel;
import com.aliasi.sentences.SentenceChunker;
import com.aliasi.sentences.SentenceModel;
import com.aliasi.tokenizer.IndoEuropeanTokenizerFactory;
import com.aliasi.tokenizer.TokenizerFactory;

public class QueryExtractor {

    public int getQueryCount() {
        return mQueryCount;
    }
    private int mQueryCount = 0;

    public boolean extract(String aInputPath, String aOutputPath, String aLocTag) {
        //System.err.println("Extracting queries from file `" + aInputPath + "'...");

        BufferedReader br;
        SentenceChunker sd;
        LEXComputer lex = new LEXComputer(5);
        IQFWriter iw;

        ArrayList<String> sents = new ArrayList<String>();
        Sentence[] doc;

        try {
            TokenizerFactory TOKENIZER_FACTORY = IndoEuropeanTokenizerFactory.INSTANCE;
            // new MedlineSenteceModel()
            SentenceModel SENTENCE_MODEL = new IndoEuropeanSentenceModel(true,false);
            sd = new SentenceChunker(TOKENIZER_FACTORY, SENTENCE_MODEL);
        } catch (Exception e) {
            System.err.println("Couldn't create LingPipe sentence detector (" + e + ")");
            return false;
        }

        try {
            iw = new IQFWriter(aOutputPath);
        } catch (Exception e) {
            System.err.println("Couldn't open IQF file for writing (" + e + ")");
            return false;
        }

        try {
            String para = "";

            br = new BufferedReader(new FileReader(aInputPath));

            while (true) {
                String buf = br.readLine();

                if (buf == null || buf.trim().equals("")) {
                    // end of paragraph/file! check to make sure last paragraph had some text in it;
                    // if not, skip to the next paragraph or break if we're at the end of the file.
                    if (para.length() == 0) {
                        if (buf == null) {
                            break;
                        } else {
                            continue;
                        }
                    }

                    // Chunking do texto
                    // dump paragraph to sentence detector.
                    Chunking chunking = sd.chunk(para);
                    Set<Chunk> set = chunking.chunkSet();
                    String[] stp = new String[set.size()];
                    int index = 0;
                    for (Chunk c : set) {
                        // Cada indice de stp vai conter cada frase do texto a anaalisar
                        stp[index++] = chunking.charSequence().subSequence(c.start(), c.end()).toString();
                    }

                    // cada posição de sents vai conter um frase do texto
                    sents.addAll(Arrays.asList(stp));
                    para = "";

                    // end of file; break out of the loop.
                    if (buf == null) {
                        break;
                    }
                } else {
                    //vai por todas as linhas do texto a analisar na string para
                    para += buf.trim() + "\n";
                }
            }

            br.close();
        } catch (FileNotFoundException e) {
            System.err.println("Error: file `" + aInputPath + "' not found!");
            return false;
        } catch (IOException e) {
            System.err.println("Error: couldn't extract queries from file (" + e + ")");
            return false;
        }
        //estrutura para representar todo o documento em que cada posição do array corresponde
        //a uma frase do texto
        doc = new Sentence[sents.size()];

        //ciclo para marcar como true todas as possíveis entidades no texto
        for (int i = 0; i < sents.size(); i++) {
            try {
                doc[i] = LEXComputer.markCapsEntities(new Sentence(sents.get(i)));
            } catch (Exception e) {
                System.err.println("Couldn't tokenize sentence (" + e + ")");
                return false;
            }
        }

        // Ciclo que vai gerar todas as queries a realizar à colecção de n-gramas
        for (Sentence s : doc) {
            //ents irá conter em cada indice a sequência das posições de palavras com a
            //1ª letra em maiuscula consecutiva
            Sentence.EntityRec[] ents = s.getNamedEntities();

            try {
                /* generate delta-check queries. */
                Query q1 = new Query(new String[]{s.getWord(0)}),
                q2 = new Query(new String[]{"#EOS#", s.getWord(0)});
                        //q2 = new Query(new String[]{"<S>", s.getWord(0)});

                q1.setUserFeature("loc", aLocTag);
                q2.setUserFeature("loc", aLocTag);

                iw.writeQuery(q1);
                iw.writeQuery(q2);
                mQueryCount += 2;
            } catch (Exception e) {
                System.err.println("Couldn't write query (" + e + ")");
                return false;
            }

            for (int i = 0; i <= ents.length - 1; i++) {
                int next_ent_idx = i + 1;
                Sentence.EntityRec this_ent = ents[i];
                boolean merge_too_wide = false;

                while (next_ent_idx < ents.length && !merge_too_wide) {
                    int a = this_ent.getEndIndex(), b = ents[next_ent_idx].getStartIndex();

                    if (b - a > 3) {
                        /*
                         * if the two entities to be merged are separated by more than three words,
                         * mark the merge as "too wide"; the entities cannot be merged any further,
                         * so consider merges starting with the next entity.
                         */
                        //System.err.println("Skipping merge of " + Arrays.toString(this_ent.getWords()) + " and " +
                        //		Arrays.toString(ents[next_ent_idx].getWords()) + " across " + Arrays.toString(s.getWordSpan(a, b)));
                        merge_too_wide = true;
                        continue;
                    } else {
                        //System.err.println("Merging " + Arrays.toString(this_ent.getWords()) + " and " +
                        //		Arrays.toString(ents[next_ent_idx].getWords()) + " across " + Arrays.toString(s.getWordSpan(a, b)));
                    }
                    /* qs vai conter as queries das entidades separadas por palavras em minusculas
                       e.g., Pride and Prejudice, vai conter Pride, and, Prejudice e Pride and
                       Prejudice */
                    Query[] qs = lex.requiredQueries(this_ent.getWords(), s.getWordSpan(a, b), ents[next_ent_idx].getWords());


                    // vai escrever no ficheiros as queries necessárias
                    for (Query q : qs) {
                        try {
                            q.setUserFeature("loc", aLocTag);
                            iw.writeQuery(q);
                            mQueryCount++;
                        } catch (Exception e) {
                            System.err.println("Couldn't write query (" + e + ")");
                            return false;
                        }
                    }

                    if (i == 0 && ents[0].getEndIndex() > 1) {
                        /*
                         * generate "delta-failed" queries -- those queries on merging this entity with its neighbors
                         * if the first word in the sentence is not part of a named entity.  for one-word initial base
                         * entities ("<E> Surveys </E> say...") the first entity is skipped completely, so we don't need
                         * to issue any additional queries.  and since we only consider merging entities rightward,
                         * if the first base entity being merged is not entity 0, we don't need to issue any additional
                         * queries because the entities being merged are unaffected.
                         */
                        String[] df_words = new String[this_ent.getWords().length - 1];
                        System.arraycopy(this_ent.getWords(), 1, df_words, 0, this_ent.getWords().length - 1);

                        //System.err.println("Merging " + Arrays.toString(df_words) + " (delta failed) and " +
                        //		Arrays.toString(ents[next_ent_idx].getWords()) + " across " + Arrays.toString(s.getWordSpan(a, b)));

                        Query[] qs_df = lex.requiredQueries(df_words, s.getWordSpan(a, b), ents[next_ent_idx].getWords());

                        for (Query q : qs_df) {
                            try {
                                q.setUserFeature("loc", aLocTag);

                                iw.writeQuery(q);
                                mQueryCount++;
                            } catch (Exception e) {
                                System.err.println("Couldn't write query (" + e + ")");
                                return false;
                            }
                        }
                    }

                    this_ent = s.merge(this_ent, ents[next_ent_idx], false);
                    next_ent_idx++;
                }
            }
        }

        try {
            //Query[] qs_fim = new Query(ents[next_ent_idx]);
            iw.finish();
        } catch (Exception e) {
            System.err.println("Couldn't finalize IQF file (" + e + ")");
            return false;
        }

        return true;
    }

    static void banner() {
        System.out.print(
                "teLEX Query Extractor v. 0.1\n"
                + "Colin Bayer and Douglas Downey <{vogon,ddowney}@cs.washington.edu>\n"
                + "Based on an algorithm in 'Locating Named Entities in Web Text'\n"
                + "\t(Downey, Broadhead, Etzioni 2007, published in IJCAI 2007 Proceedings)\n"
                + "See README for a description of the Query Extractor and teLEX in general.\n"
                + "=====================================================================================\n");
    }

    static void usage() {
        banner();
        System.err.print(
                "Usage: java QueryExtractor [-nonlp|-nlp path] doc [output]\n"
                + "\t-nonlp: don't perform NLP on the input file (assume space-delimited tokens, one sentence per line; default)\n"
                + "\t-nlp path: perform NLP, with specified path to OpenNLP Tools distribution (with models subdirectory)\n"
                + "\tdoc: path to document file, or directory containing one or more\n"
                + "\t\tdocuments to extract queries from\n"
                + "\toutput: path to output IQF file(s) in (default: doc + \".iqf\" for a single file,\n"
                + "\tdoc + \"-iqf\" for a directory)\n");
        System.exit(255);
    }

    public static void main(String[] aArgs) {
        if (aArgs.length < 1) {
            // no filename specified!
            usage();
        }

        String nlp_path = null, doc_path = null, output_path = null;
        boolean no_nlp = true;

        for (int i = 0; i < aArgs.length; i++) {
            boolean silence_pathname_warning = false;

            if (aArgs[i].equals("-nlp")) {
                // -nlp option.
                if (i == aArgs.length - 1) {
                    System.err.println("-nlp option requires an argument.");
                    usage();
                }

                nlp_path = aArgs[i + 1];
                i++;
            } else if (aArgs[i].equals("-nonlp")) {
                no_nlp = true;
            } else if (aArgs[i].charAt(0) == '-') {
                System.err.println("Unrecognized option '" + aArgs[i] + "'.");
                usage();
            } else {
                // pathname in arguments list.  assign it to the first of doc_path or output_path,
                // or warn if both are already assigned.
                if (doc_path == null) {
                    doc_path = aArgs[i];
                } else if (output_path == null) {
                    output_path = aArgs[i];
                } else if (!silence_pathname_warning) {
                    System.err.println("Warning: unused pathname argument ignored.");
                    silence_pathname_warning = true;
                }
            }
        }

        if (!no_nlp) {
            if (nlp_path == null) {
                nlp_path = ".";
            }
            OpenNLPProvider.setSentenceDetectorModelPath(nlp_path + "/models/sentdetect/EnglishSD.bin.gz");
            OpenNLPProvider.setTokenizerModelPath(nlp_path + "/models/tokenize/EnglishTok.bin.gz");
        }

        ArrayList<String[]> files_to_process = new ArrayList<String[]>();

        banner();

        if (doc_path == null) {
            System.err.println("No input path specified.");
            usage();
        } else {
            File f = new File(doc_path);
            boolean input_is_dir = false;

            // detect directory-ness of doc_path.
            if (!f.exists()) {
                System.err.println("Input file does not exist.");
                System.exit(1);
            } else if (f.isDirectory()) {
                System.out.println("<== Input directory: " + f);
                input_is_dir = true;
            } else if (f.isFile()) {
                System.out.println("<== Input file: " + f);
                input_is_dir = false;
            } else {
                System.err.println("Input file exists, but is not a normal file or a directory.");
                System.exit(1);
            }

            // generate output path, if none provided.
            if (output_path == null) {
                if (input_is_dir) {
                    output_path = doc_path + "-iqf";
                } else {
                    output_path = doc_path + ".iqf";
                }
            }

            // check that the output path either doesn't exist, or is of the right type to overwrite.
            // create the output directory if we're writing a directory.
            f = new File(output_path);

            System.out.println("==> Output " + (input_is_dir ? "directory" : "file")
                    + (f.exists() ? ": " : " (creating): ") + output_path);

            if (!f.exists() && input_is_dir) {
                try {
                    f.mkdirs();
                } catch (Exception e) {
                    System.err.println("Error trying to create output directory: " + e);
                    System.exit(1);
                }
            } else if (f.isDirectory() && !input_is_dir) {
                System.err.println("Output file already exists, but is a directory; refusing to overwrite it.");
                System.exit(1);
            } else if (f.isFile() && input_is_dir) {
                System.err.println("Output file already exists, but isn't a directory; refusing to overwrite it.");
                System.exit(1);
            }

            // generate list of input and output filenames to process.
            if (input_is_dir) {
                f = new File(doc_path);

                String[] files_in_dir = f.list();

                if (files_in_dir == null) {
                    System.err.println("Error getting list of files in input directory.  Halting.");
                    System.exit(1);
                }

                for (String fname : files_in_dir) {
                    files_to_process.add(new String[]{doc_path + "/" + fname,
                                output_path + "/" + fname,
                                fname});
                }
            } else {
                files_to_process.add(new String[]{doc_path, output_path, f.getName()});
            }
        }

        QueryExtractor ex = new QueryExtractor();

        for (String[] fnames : files_to_process) {
            System.out.println("Extracting queries from " + fnames[0] + " to " + fnames[1] + "...");

            if (!ex.extract(fnames[0], fnames[1], fnames[2])) {
                System.out.println("Extraction failed.");
                System.exit(1);
            }
        }

        System.out.println("Extraction completed successfully (extracted " + ex.getQueryCount()
                + " queries from " + files_to_process.size() + " files).");
        return;
    }
}



