package ac.manchester.cs.afzal.autoservd.parser_handler;

import ac.manchester.cs.afzal.autoservd.file_handler.File_Manager;

import edu.stanford.nlp.parser.lexparser.LexicalizedParser;
import edu.stanford.nlp.trees.Tree;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.io.PrintWriter;
import java.util.Properties;
import java.util.logging.Level;
import java.util.logging.Logger;

/**
 * Title: Term Classifier
 *
 * Description: A toolkit to find the relevance of candidate terms from a text corpus
 * to any particular topic/sub-domain using a term classification driven approach. The
 * system utilizes the lexical and contextual profiles of the candidate and domain-representing
 * "Resource Terms" (Seed and Ontological). The lexical and contextual profiles of candidate terms
 * are compared with those of Resource Terms to measure their relevance to the topic of interest.
 *
 * Parse Sentences class converts the
 * <p>Company: The University of Manchester</p>
 *
 * @author Hammad Afzal
 * @version 1.0
 *
 */
public class Parse_Sentences
{

    private String MODE = "MODE";
    private String SEPERATE_SENTENCES_CORPUS = "SEPERATE_SENTENCES_CORPUS";
    private String SEPERATE_SENTENCES_TRAINING = "SEPERATE_SENTENCES_TRAINING";
    private String PARSED_SENTENCES_CORPUS = "PARSED_SENTENCES_CORPUS";
    private String PARSED_SENTENCES_TRAINING = "PARSED_SENTENCES_TRAINING";
    private String STANFORD_PARSER_PATH = "STANFORD_PARSER_PATH";
    private String EN_SER_MODEL = "";
    private String EN_FCT_MODEL = "";
    private File_Manager File_Manager = new File_Manager();
    private Properties props;

    public Parse_Sentences()
    {
        try
        {
            props = new Properties();
            props.load(new FileInputStream("Term_Classification.properties"));
            MODE = props.getProperty(MODE);
            PARSED_SENTENCES_TRAINING = props.getProperty(PARSED_SENTENCES_TRAINING);
            PARSED_SENTENCES_CORPUS = props.getProperty(PARSED_SENTENCES_CORPUS);
            SEPERATE_SENTENCES_CORPUS = props.getProperty(SEPERATE_SENTENCES_CORPUS);
            SEPERATE_SENTENCES_TRAINING = props.getProperty(SEPERATE_SENTENCES_TRAINING);
            STANFORD_PARSER_PATH = props.getProperty(STANFORD_PARSER_PATH);
            EN_SER_MODEL = STANFORD_PARSER_PATH + "//englishPCFG.ser.gz";
            EN_FCT_MODEL = STANFORD_PARSER_PATH + "//englishFactored.ser.gz";
        }
        catch (IOException ex)
        {
            Logger.getLogger(Parse_Sentences.class.getName()).log(Level.SEVERE, null, ex);
        }
    }

    /**
     * Converts the sentences from text format to Penn format
     */
    public void txt_to_penn()
    {
        LexicalizedParser lp = new LexicalizedParser(EN_SER_MODEL);

        StringBuffer file_contents_buffer = new StringBuffer();

        File input_file_dir, input_file_doc, output_file_doc;

        input_file_dir = MODE.equals("Corpus") ? new File(SEPERATE_SENTENCES_CORPUS) : new File(SEPERATE_SENTENCES_TRAINING);

        String[] list_dirs = input_file_dir.list();

        for(String directory_name : list_dirs)
        {
            input_file_doc =
            MODE.equals("Corpus") ? new File(SEPERATE_SENTENCES_CORPUS + "//" + directory_name) : new File(SEPERATE_SENTENCES_TRAINING + "//" + directory_name);

            output_file_doc =
            MODE.equals("Corpus") ? new File(PARSED_SENTENCES_CORPUS + "//" + directory_name) : new File(PARSED_SENTENCES_TRAINING + "//" + directory_name);
            output_file_doc.mkdir();

            String[] list_files = input_file_doc.list();

            for(String file_name : list_files)
            {

                File input_file_per_term, output_file_per_term;

                input_file_per_term = MODE.equals("Corpus") ? new File(SEPERATE_SENTENCES_CORPUS + "//" + directory_name + "//" + file_name) : new File(SEPERATE_SENTENCES_TRAINING + "//" + directory_name + "//" + file_name);
                input_file_per_term = input_file_per_term.getAbsoluteFile();
                file_contents_buffer = File_Manager.fileReader(input_file_per_term);

                output_file_per_term = MODE.equals("Corpus") ? new File(PARSED_SENTENCES_CORPUS + "//" + directory_name + "//" + file_name) : new File(PARSED_SENTENCES_TRAINING + "//" + directory_name + "//" + file_name);

                if( ! (file_contents_buffer.toString().contains("MathType")))
                {
                    Tree parse = null;
                    try
                    {
                        OutputStream fout = new FileOutputStream(output_file_per_term);
                        PrintWriter pw = new PrintWriter(fout, true);
                        parse = (Tree) lp.apply(file_contents_buffer.toString());
                        parse.pennPrint(pw);

                    }
                    catch (FileNotFoundException ex)
                    {
                        System.out.println("Exception : " + ex.getMessage());
                    }
                }
            }
        }
    }
}
