package ac.man.cs.afzal.tcm.nlp;

import java.io.*;
import java.util.*;

import edu.stanford.nlp.parser.lexparser.LexicalizedParser;
import edu.stanford.nlp.trees.Tree;
import edu.stanford.nlp.trees.TreePrint;
import java.util.logging.Level;
import java.util.logging.Logger;

/**
 *
 * Company: The University of Manchester
 *
 * @author Hammad Afzal
 * @version 1.0
 * Last Modified Date: 16th Jan, 2013
 *
 */
public class Parser_Impl
{

    private String MODE = "MODE";

    private String ANNOTATED_SENTENCES_CORPUS = "ANNOTATED_SENTENCES_CORPUS";
    private String ANNOTATED_SENTENCES_TRAINING = "ANNOTATED_SENTENCES_TRAINING";

    private String PARSED_SENTENCES_PENN_CORPUS = "PARSED_SENTENCES_PENN_CORPUS";
    private String PARSED_SENTENCES_PENN_TRAINING = "PARSED_SENTENCES_PENN_TRAINING";

    private String PARSED_SENTENCES_DEP_CORPUS = "PARSED_SENTENCES_DEP_CORPUS";
    private String PARSED_SENTENCES_DEP_TRAINING = "PARSED_SENTENCES_DEP_TRAINING";

    private String PARSED_SENTENCES_POS_CORPUS = "PARSED_SENTENCES_POS_CORPUS";
    private String PARSED_SENTENCES_POS_TRAINING = "PARSED_SENTENCES_POS_TRAINING";

    private String STANFORD_PARSER_PATH = "STANFORD_PARSER_PATH";
    
    private String EN_SER_MODEL = "";
    private String EN_FCT_MODEL = "";
    
    private int ELLIGIBLE_SENTENCE_LENGTH = 300;
    private File_Manager_Impl File_Manager = new File_Manager_Impl();
    private LexicalizedParser lp;
    private Properties props;

    public Parser_Impl()
    {
        try
        {
            props = new Properties();
            props.load(new FileInputStream("Term_Classification.properties"));

            MODE = props.getProperty(MODE);

            ANNOTATED_SENTENCES_CORPUS = props.getProperty(ANNOTATED_SENTENCES_CORPUS);
            ANNOTATED_SENTENCES_TRAINING = props.getProperty(ANNOTATED_SENTENCES_TRAINING);
            
            PARSED_SENTENCES_PENN_CORPUS = props.getProperty(PARSED_SENTENCES_PENN_CORPUS);
            PARSED_SENTENCES_PENN_TRAINING = props.getProperty(PARSED_SENTENCES_PENN_TRAINING);
            
            PARSED_SENTENCES_POS_TRAINING = props.getProperty(PARSED_SENTENCES_POS_TRAINING);
            PARSED_SENTENCES_POS_CORPUS = props.getProperty(PARSED_SENTENCES_POS_CORPUS);

            PARSED_SENTENCES_DEP_TRAINING = props.getProperty(PARSED_SENTENCES_DEP_TRAINING);
            PARSED_SENTENCES_DEP_CORPUS = props.getProperty(PARSED_SENTENCES_DEP_CORPUS);


            STANFORD_PARSER_PATH = props.getProperty(STANFORD_PARSER_PATH);

            EN_SER_MODEL = STANFORD_PARSER_PATH + "//englishPCFG.ser.gz";
            EN_FCT_MODEL = STANFORD_PARSER_PATH + "//englishFactored.ser.gz";
        }
        catch (IOException ex)
        {
            Logger.getLogger(Parser_Impl.class.getName()).log(Level.SEVERE, null, ex);
        }
    }

    /**
     * Converts the sentences from text format to Penn format
     */
    public void txt_to_penn()
    {
        lp = new LexicalizedParser(EN_SER_MODEL);
        File txt_sentences_dir, input_file_doc, output_file_doc;
        StringBuffer file_contents_buffer = new StringBuffer();

        txt_sentences_dir = MODE.equals("Corpus") ? new File(ANNOTATED_SENTENCES_CORPUS) : new File(ANNOTATED_SENTENCES_TRAINING);
        String[] txt_sentences_dir_array = txt_sentences_dir.list();


        for (String directory_name : txt_sentences_dir_array)
        {
            input_file_doc =
                    MODE.equals("Corpus") ? new File(ANNOTATED_SENTENCES_CORPUS + "//" + directory_name) : new File(ANNOTATED_SENTENCES_TRAINING + "//" + directory_name);

            output_file_doc =
                    MODE.equals("Corpus") ? new File(PARSED_SENTENCES_PENN_CORPUS + "//" + directory_name) : new File(PARSED_SENTENCES_PENN_TRAINING + "//" + directory_name);
            output_file_doc.mkdir();

            String[] list_files = input_file_doc.list();

            for (String file_name : list_files)
            {
                File input_file_per_term, output_file_per_term;

                input_file_per_term = MODE.equals("Corpus") ? new File(ANNOTATED_SENTENCES_CORPUS + "//" + directory_name + "//" + file_name) : new File(ANNOTATED_SENTENCES_TRAINING + "//" + directory_name + "//" + file_name);
                input_file_per_term = input_file_per_term.getAbsoluteFile();
                file_contents_buffer = File_Manager.fileReader(input_file_per_term);

                output_file_per_term = MODE.equals("Corpus") ? new File(PARSED_SENTENCES_PENN_CORPUS + "//" + directory_name + "//" + file_name) : new File(PARSED_SENTENCES_PENN_TRAINING + "//" + directory_name + "//" + file_name);
                apply_penn_parser(file_contents_buffer.toString(), output_file_per_term);
            }
        }
    }

    /**
     * Converts the sentences from text format to Penn, CollapsedDependencies and POS
     */
    
    public void txt_to_penn_dep_pos()
    {
        lp = new LexicalizedParser(EN_SER_MODEL);
        File txt_sentences_dir, input_file_doc, output_file_doc_penn, output_file_doc_dep, output_file_doc_pos;

        StringBuffer file_contents_buffer = new StringBuffer();
        txt_sentences_dir = MODE.equals("Corpus") ? new File(ANNOTATED_SENTENCES_CORPUS) : new File(ANNOTATED_SENTENCES_TRAINING);

        String[] txt_sentences_dir_array = txt_sentences_dir.list();

        for (String directory_name : txt_sentences_dir_array)
        {
            input_file_doc =
                    MODE.equals("Corpus") ? new File(ANNOTATED_SENTENCES_CORPUS + "//" + directory_name) : new File(ANNOTATED_SENTENCES_TRAINING + "//" + directory_name);

            output_file_doc_penn =
                    MODE.equals("Corpus") ? new File(PARSED_SENTENCES_PENN_CORPUS + "//" + directory_name) : new File(PARSED_SENTENCES_PENN_TRAINING + "//" + directory_name);
            output_file_doc_penn.mkdir();

            output_file_doc_dep =
                    MODE.equals("Corpus") ? new File(PARSED_SENTENCES_DEP_CORPUS + "//" + directory_name) : new File(PARSED_SENTENCES_DEP_TRAINING + "//" + directory_name);
            output_file_doc_dep.mkdir();

            output_file_doc_pos =
            MODE.equals("Corpus") ? new File(PARSED_SENTENCES_POS_CORPUS + "//" + directory_name) : new File(PARSED_SENTENCES_POS_TRAINING + "//" + directory_name);
            output_file_doc_pos.mkdir();

            String[] list_files = input_file_doc.list();
            for (String file_name : list_files)
            {
                File input_sentence_per_term, output_penn_per_term, output_dep_per_term, output_pos_per_term;
                input_sentence_per_term = MODE.equals("Corpus") ? new File(ANNOTATED_SENTENCES_CORPUS + "//" + directory_name + "//" + file_name) : new File(ANNOTATED_SENTENCES_TRAINING + "//" + directory_name + "//" + file_name);
                input_sentence_per_term = input_sentence_per_term.getAbsoluteFile();
                file_contents_buffer = File_Manager.fileReader(input_sentence_per_term);

                output_penn_per_term = MODE.equals("Corpus") ? new File(PARSED_SENTENCES_PENN_CORPUS + "//" + directory_name + "//" + file_name) : new File(PARSED_SENTENCES_PENN_TRAINING + "//" + directory_name + "//" + file_name);
                output_dep_per_term = MODE.equals("Corpus") ? new File(PARSED_SENTENCES_DEP_CORPUS + "//" + directory_name + "//" + file_name) : new File(PARSED_SENTENCES_DEP_TRAINING + "//" + directory_name + "//" + file_name);
                output_pos_per_term = MODE.equals("Corpus") ? new File(PARSED_SENTENCES_POS_CORPUS + "//" + directory_name + "//" + file_name) : new File(PARSED_SENTENCES_POS_TRAINING + "//" + directory_name + "//" + file_name);
                apply_dep_pos_penn_parser(file_contents_buffer.toString(),output_penn_per_term,output_dep_per_term,output_pos_per_term);
            }
        }
    }

    /**
     * Converts the sentences from text format to Penn, CollapsedDependencies and POS
     */

    
    public void txt_to_penn_dep()
    {
        lp = new LexicalizedParser(EN_SER_MODEL);
        File annotated_sentences_dir, input_file_doc, output_file_doc_penn, output_file_doc_dep;

        StringBuffer file_contents_buffer = new StringBuffer();

        annotated_sentences_dir = MODE.equals("Corpus") ? new File(ANNOTATED_SENTENCES_CORPUS) : new File(ANNOTATED_SENTENCES_TRAINING);

        for (String directory_name : annotated_sentences_dir.list())
        {
            input_file_doc =
                    MODE.equals("Corpus") ? new File(ANNOTATED_SENTENCES_CORPUS + "//" + directory_name) : new File(ANNOTATED_SENTENCES_TRAINING + "//" + directory_name);

            output_file_doc_penn =
                    MODE.equals("Corpus") ? new File(PARSED_SENTENCES_PENN_CORPUS + "//" + directory_name) : new File(PARSED_SENTENCES_PENN_TRAINING + "//" + directory_name);
            output_file_doc_penn.mkdir();

            output_file_doc_dep =
                    MODE.equals("Corpus") ? new File(PARSED_SENTENCES_DEP_CORPUS + "//" + directory_name) : new File(PARSED_SENTENCES_DEP_TRAINING + "//" + directory_name);
            output_file_doc_dep.mkdir();


            for (String file_name : input_file_doc.list())
            {
                File input_sentence_per_term, output_penn_per_term, output_dep_per_term;
                input_sentence_per_term = MODE.equals("Corpus") ? new File(ANNOTATED_SENTENCES_CORPUS + "//" + directory_name + "//" + file_name) : new File(ANNOTATED_SENTENCES_TRAINING + "//" + directory_name + "//" + file_name);
                input_sentence_per_term = input_sentence_per_term.getAbsoluteFile();
                file_contents_buffer = File_Manager.fileReader(input_sentence_per_term);

                output_penn_per_term = MODE.equals("Corpus") ? new File(PARSED_SENTENCES_PENN_CORPUS + "//" + directory_name + "//" + file_name) : new File(PARSED_SENTENCES_PENN_TRAINING + "//" + directory_name + "//" + file_name);
                output_dep_per_term = MODE.equals("Corpus") ? new File(PARSED_SENTENCES_DEP_CORPUS + "//" + directory_name + "//" + file_name) : new File(PARSED_SENTENCES_DEP_TRAINING + "//" + directory_name + "//" + file_name);

                if (file_contents_buffer.length() < this.ELLIGIBLE_SENTENCE_LENGTH)
                    apply_dep_penn_parser(file_contents_buffer.toString(), output_penn_per_term, output_dep_per_term);
                else
                    System.out.println("Long Sentence: " + file_contents_buffer);
            }
        }
    }

    
    public void apply_dep_penn_parser(String contents, File output_penn, File output_dep)
    {
        //System.out.println("Length: " + contents.length());
        //System.out.println("Value: " + contents);
        System.out.println("Output file is : " + output_penn.getAbsolutePath());
        long start = System.currentTimeMillis();

        Tree parse = null;

        try
        {
            OutputStream fout1 = new FileOutputStream(output_penn);
            PrintWriter pw1 = new PrintWriter(fout1, true);

            OutputStream fout2 = new FileOutputStream(output_dep);
            PrintWriter pw2 = new PrintWriter(fout2, true);

            parse = (Tree) lp.apply(contents);

            TreePrint tp1 = new TreePrint("penn");
            tp1.printTree(parse, pw1);

            tp1 = new TreePrint("typedDependenciesCollapsed");
            tp1.printTree(parse, pw2);

        }
        catch (Exception ex)
        {
            System.out.println("Exception : " + ex.getMessage());
        }
        long end = System.currentTimeMillis();

//        System.out.println("Execution time was " + (end - start) + " ms.");
    }

    public void apply_dep_pos_penn_parser(String contents, File output_penn, File output_dep, File output_pos)
    {
        System.out.println("Length: " + contents.length());
        System.out.println("Value: " + contents);
        long start = System.currentTimeMillis();

        Tree parse = null;

        try
        {
            OutputStream fout1 = new FileOutputStream(output_penn);
            PrintWriter pw1 = new PrintWriter(fout1, true);

            OutputStream fout2 = new FileOutputStream(output_dep);
            PrintWriter pw2 = new PrintWriter(fout2, true);

//                OutputStream fout3 = new FileOutputStream(output_pos);
//                PrintWriter pw3 = new PrintWriter(fout3, true);

            parse = (Tree) lp.apply(contents);

            TreePrint tp1 = new TreePrint("penn");
            tp1.printTree(parse, pw1);

            tp1 = new TreePrint("typedDependenciesCollapsed");
            tp1.printTree(parse, pw2);

//                tp1 = new TreePrint("wordsAndTags");
//                tp1.printTree(parse, pw3);

        }
        catch (Exception ex)
        {
            System.out.println("Exception : " + ex.getMessage());
        }
        long end = System.currentTimeMillis();

        System.out.println("Execution time was " + (end - start) + " ms.");
    }

    public void apply_penn_parser(String contents, File output_file)
    {
        //TODO: Remove these print commands. These are for debugging only.
        System.out.println("Length: " + contents.length());
        System.out.println("Value: " + contents);
        long start = System.currentTimeMillis();

        Tree parse = null;
        try
        {
            OutputStream fout = new FileOutputStream(output_file);
            PrintWriter pw = new PrintWriter(fout, true);
            parse = (Tree) lp.apply(contents);
            parse.pennPrint(pw);

        }
        catch (FileNotFoundException ex)
        {
            System.out.println("Exception : " + ex.getMessage());
        }
        long end = System.currentTimeMillis();

        System.out.println("Execution time was " + (end - start) + " ms.");
    }
}
