package ac.manchester.cs.afzal.term_classifier.term_similarities;

import ac.manchester.cs.afzal.term_classifier.sql_handler.SQL_Handler_Similarities_Impl;
import ac.manchester.cs.afzal.term_classifier.miscellaneous.File_Manager_Impl;
import ac.manchester.cs.afzal.term_classifier.miscellaneous.Vector_Builder_Impl;
import ac.manchester.cs.afzal.term_classifier.nlp.String_Manipulator;
import ac.manchester.cs.afzal.term_classifier.nlp.String_Manipulator_Impl;
import java.io.File;
import java.io.FileInputStream;
import java.util.Iterator;
import java.util.Properties;
import java.util.StringTokenizer;
import java.util.Vector;

/**
 * Title: Term Classifier
 *
 * Derive_Distinct_Sentences class converts the
 * <p>Company: The University of Manchester</p>
 *
 * @author Hammad Afzal
 * @version 1.0
 * Last Modification Date: 26th April, 2012

 */
public class Lexical_Similarity_Impl
{
    /*
    private Properties props;
    private String database_url = "database_url";
    private String username = "username";
    private String password = "password";


    private String SIMILARITY_MODE = "SIMILARITY_MODE";
    private String SEED_TERMS_PATH = "SEED_TERMS_PATH";

    private String CORPUS_PROFILE_PATH = "CORPUS_PROFILE_PATH";

    public Vector ontology_nodes, seed_nodes;
    public Vector[] setOntology, setSeed, setCorpus;

    File_Manager_Impl fileManager = new File_Manager_Impl();
    Vector_Builder_Impl vectorBuilder = new Vector_Builder_Impl();
    Lexical_Profile_Impl profileBuilder = new Lexical_Profile_Impl();
    String_Manipulator strMan = new String_Manipulator_Impl();
    SQL_Handler_Similarities_Impl sql_handler = new SQL_Handler_Similarities_Impl();
*/
    public Lexical_Similarity_Impl()
    {
/*        try
        {
            props = new Properties();
            props.load(new FileInputStream("Term_Classification.properties"));
            database_url = props.getProperty(database_url);
            username = props.getProperty(username);
            password = props.getProperty(password);

            SIMILARITY_MODE = props.getProperty(SIMILARITY_MODE);
            SEED_TERMS_PATH = props.getProperty(SEED_TERMS_PATH);
        }
        catch (Exception ex)
        {
            System.out.println("Exception while loading propoerties: " + ex.getCause());
        }


        sql_handler.initialize_parameters(database_url, username, password);
        sql_handler.connect_db();

        sql_handler.delete_all(new String[]
                {
                    "result_lexical"
                });
        sql_handler.reset_auto_inc(new String[]
                {
                    "result_lexical"
                });

//        loadOntology();
//        loadSeedTerms();
//        loadCorpus();

        profile_builder(SEED_FILE_PATH);
        profile_builder(ONTOLOGY_FILE_PATH);

        calculateSimilarity();
*/
    }

/*    public void loadCorpus()
    {
        String[] dirs = new File(CORPUS_FILE_PATH).list();

        for (int i = 0; i < dirs.length; i++)
        {
            int count = 0;
            Vector corpusTermsVector = new Vector();
            String[] terms, replacer_terms;

            StringBuffer contents = new StringBuffer();
            contents = fileManager.fileReader(CORPUS_FILE_PATH + "//" + dirs[i]);

            corpusTermsVector = vectorBuilder.string_to_vector(contents.toString(), "\r\n");
            terms = new String[corpusTermsVector.size()];
            replacer_terms = new String[terms.length];

            for (int j = 0; j < corpusTermsVector.size(); j++)
            {
                String entry = corpusTermsVector.elementAt(j).toString();
                entry = entry.substring(0, entry.length() - 1);
                int last_index = entry.lastIndexOf("\"");
                entry = entry.substring(last_index + 1, entry.length());
                terms[count] = entry;
                count++;
            }

            contents = new StringBuffer();

            for (int n = 0; n < terms.length; n++)
            {
                replacer_terms[n] = strMan.replace_letter(terms[n], " ", ",");
                StringBuffer str = new StringBuffer(replacer_terms[n]);
                replacer_terms[n] = str.toString();
                contents.append(replacer_terms[n] + "\n");
            }
            fileManager.fileWriter(contents, CORPUS_PROFILE_PATH + "//" + dirs[i]);
        }
    }

    public void profile_builder(String path)
    {
        String[] ontology_terms, seed_terms;
        Vector seedVector, ontologyVector;

        StringBuffer contents = new StringBuffer();
        contents = fileManager.fileReader(path);

        if (path.equals(ONTOLOGY_FILE_PATH))
        {
            ontologyVector = vectorBuilder.string_to_vector(contents.toString(), "\n");
            setOntology = new Vector[ontologyVector.size()];

            Iterator iteratorVector = ontologyVector.iterator();
            int n = 0;
            while (iteratorVector.hasNext())
            {
                Vector temp = new Vector();
                String term_in_ontology_node = iteratorVector.next().toString();
                StringTokenizer tokenizer = new StringTokenizer(term_in_ontology_node, " ");
                while (tokenizer.hasMoreTokens())
                {
                    temp.add(tokenizer.nextToken().toLowerCase());
                }
                ontology_terms = (String[]) temp.toArray(new String[temp.size()]);
                setOntology[n] = profileBuilder.term_profiles(ontology_terms);
                if (setOntology[n].size() > 1)
                {
                    setOntology[n].add(term_in_ontology_node.replaceAll(",", " "));
                }
                else;
                n++;
            }
        }
        if (path.equals(SEED_FILE_PATH))
        {
            seedVector = vectorBuilder.string_to_vector(contents.toString(), "\n");
            setSeed = new Vector[seedVector.size()];

            Iterator iteratorVector = seedVector.iterator();
            int n = 0;
            while (iteratorVector.hasNext())
            {
                Vector temp = new Vector();
                String term_in_manual_node = iteratorVector.next().toString();
                StringTokenizer tokenizer = new StringTokenizer(term_in_manual_node, " ");
                while (tokenizer.hasMoreTokens())
                {
                    temp.add(tokenizer.nextToken().toLowerCase());
                }
                seed_terms = (String[]) temp.toArray(new String[temp.size()]);
                setSeed[n] = profileBuilder.term_profiles(seed_terms);
                if (setSeed[n].size() > 1)
                {
                    setSeed[n].add(term_in_manual_node.replaceAll(",", " "));
                }
                else;
                n++;
            }
        }
        else
        {
            System.out.println("File Paths for lexical resources are incorrect");
        }
    }

    public void calculateSimilarity()
    {
        Vector terms_in_corpus_file, terms_in_corpus_nodes_without_commas;
        StringBuffer contents;

        File corpusDir = new File(CORPUS_PROFILE_PATH);
        String[] str = corpusDir.list();
        int no_of_files = str.length;


        for (int i = 0; i < no_of_files; i++)
        {
            int no_of_nodes_in_file = 0;
            terms_in_corpus_file = new Vector();

            contents = fileManager.fileReader(corpusDir + "//" + str[i]);
            Vector corpusFileVector = vectorBuilder.string_to_vector(contents.toString(), "\n");
            setCorpus = new Vector[corpusFileVector.size()];


            Iterator iteratorCorpus = corpusFileVector.iterator();
            while (iteratorCorpus.hasNext())
            {
                Vector temp = new Vector();
                String corpus_term = iteratorCorpus.next().toString();
                terms_in_corpus_file.add(corpus_term);
                StringTokenizer tokenizer = new StringTokenizer(corpus_term, " ,");
                while (tokenizer.hasMoreTokens())
                {
                    temp.add(tokenizer.nextToken().toLowerCase());
                }
                String[] corpusArray = (String[]) temp.toArray(new String[temp.size()]);
                setCorpus[no_of_nodes_in_file] = profileBuilder.term_profiles(corpusArray);
                setCorpus[no_of_nodes_in_file].add(corpus_term.replaceAll(",", " "));
                no_of_nodes_in_file++;
            }

            /****************************** The purpose of this FOR LOOP is to remove "commas" from all nodes *****
            terms_in_corpus_nodes_without_commas = new Vector();

            for (int j = 0; j < no_of_nodes_in_file; j++)
            {
                String term_with_commas = terms_in_corpus_file.elementAt(j).toString();
                String term_without_commas = term_with_commas.replaceAll(",", " ");
                terms_in_corpus_nodes_without_commas.add(term_without_commas);
            }

            /****************************** END - The purpose of this FOR LOOP is to remove "commas" from all nodes *****
            int iteration = 0;
            float[] terms_relevance_man = new float[no_of_nodes_in_file];
            float[] terms_relevance_ont = new float[no_of_nodes_in_file];
            float[] terms_relevance_man_max = new float[no_of_nodes_in_file];
            float[] terms_relevance_ont_max = new float[no_of_nodes_in_file];

            // Add functionality to chooose the lexical resources for term similarity here.
            if ((SIMILARITY_MODE.equals("SEED")) || (SIMILARITY_MODE.equals("SEED+ONT")))
            {
                for (int j = 0; j < no_of_nodes_in_file; j++)
                {
                    int flag = 0;
                    float TOTAL_INTERSECTION = 0;
                    float max = 0;

                    for (int count = 0; count < setSeed.length; count++)
                    {
                        int LOCAL_INTERSECTION = 0;
                        Iterator iter = setCorpus[j].iterator();
                        Object element = null;

                        while (iter.hasNext())
                        {
                            element = iter.next();
                            if (setSeed[count].contains(element))
                            {
                                flag++;
                                LOCAL_INTERSECTION++;
                            }
                        }

                        String header1 = seed_nodes.elementAt(count).toString();
                        String header2 = terms_in_corpus_nodes_without_commas.elementAt(j).toString();
                        float temp;
                        if ((header1.contains(" ")) || (header2.contains(" ")))
                        {
                            if (header1.contains(" "))
                            {
                                header1 = header1.substring(header1.lastIndexOf(" ") + 1, header1.length());
                            }
                            if (header2.contains(" "))
                            {
                                header2 = header2.substring(header2.lastIndexOf(" ") + 1, header2.length());
                            }

                            if (header1.equals(header2))
                            {
                                temp = (float) ((LOCAL_INTERSECTION) / (setCorpus[j].size() + setSeed[count].size()) + 0.5);
                            }
                            else
                            {
                                temp = (float) (LOCAL_INTERSECTION) / (setCorpus[j].size() + setSeed[count].size());
                            }
                        }
                        else
                        {
                            temp = (float) (2 * LOCAL_INTERSECTION) / (setCorpus[j].size() + setSeed[count].size());
                        }
                        if (max < temp)
                        {
                            max = temp;
                        }
                        TOTAL_INTERSECTION = TOTAL_INTERSECTION + temp;
                        LOCAL_INTERSECTION = 0;
                        iteration++;
                        flag = 0;
                    }
                    terms_relevance_man[j] = (float) (TOTAL_INTERSECTION / setSeed.length);
                    terms_relevance_man_max[j] = max;
                    TOTAL_INTERSECTION = 0;
                }
            }

            if (SIMILARITY_MODE.equals("SEED+ONT"))
            {
                for (int j = 0; j < no_of_nodes_in_file; j++)
                {
                    int flag = 0;
                    float TOTAL_INTERSECTION = 0;
                    float max = 0;
                    for (int count = 0; count < setOntology.length; count++)
                    {
                        int LOCAL_INTERSECTION = 0;
                        Iterator iter = setCorpus[j].iterator();
                        Object element = null;
                        while (iter.hasNext())
                        {
                            element = iter.next();
                            if (setOntology[count].contains(element))
                            {
                                flag++;
                                LOCAL_INTERSECTION++;
                            }
                        }

                        String header1 = ontology_nodes.elementAt(count).toString();
                        String header2 = terms_in_corpus_nodes_without_commas.elementAt(j).toString();
                        float temp;
                        if ((header1.contains(" ")) || (header2.contains(" ")))
                        {
                            if (header1.contains(" "))
                            {
                                header1 = header1.substring(header1.lastIndexOf(" ") + 1, header1.length());
                            }
                            if (header2.contains(" "))
                            {
                                header2 = header2.substring(header2.lastIndexOf(" ") + 1, header2.length());
                            }
                            if (header1.equals(header2))
                            {
                                temp = (float) ((LOCAL_INTERSECTION) / (setCorpus[j].size() + setOntology[count].size()) + 0.5);
                            }
                            else
                            {
                                temp = (float) (LOCAL_INTERSECTION) / (setCorpus[j].size() + setOntology[count].size());
                            }
                        }
                        else
                        {
                            temp = (float) (2 * LOCAL_INTERSECTION) / (setCorpus[j].size() + setOntology[count].size());
                        }
                        if (max < temp)
                        {
                            max = temp;
                        }
                        TOTAL_INTERSECTION = TOTAL_INTERSECTION + temp;
                        LOCAL_INTERSECTION = 0;
                        iteration++;
                        flag = 0;
                    }
                    terms_relevance_ont[j] = (float) (TOTAL_INTERSECTION / setOntology.length);
                    terms_relevance_ont_max[j] = max;
                    TOTAL_INTERSECTION = 0;
                }
            }
            /*********************** END - Adding Functionality of Similarity Calculation ***********************************
            String[] relevance_value = new String[8];
            String[] doc_table1 =
            {
                "Value", "Relevance_MT_1", "Relevance_MT_2", "Relevance_OT_1",
                "Relevance_OT_2", "Relevance_LR_1", "Relevance_LR_2", "Document"
            };
            String corpus_name = str[i].toString();
            corpus_name = corpus_name.substring(0, corpus_name.indexOf(".clist"));

            for (int k = 0; k < terms_in_corpus_nodes_without_commas.size(); k++)
            {
                float man, ont;
                float max = 0;
                man = terms_relevance_man_max[k];
                float relevance_lr1, relevance_lr2;

                if (SIMILARITY_MODE.equals("SEED+ONT"))
                {
                    ont = terms_relevance_ont_max[k];
                    relevance_lr1 = ((terms_relevance_man[k]) + (terms_relevance_ont[k])) / 2;
                    max = (man > max) ? man : max;
                    max = (ont > max) ? ont : max;
                    relevance_lr2 = max;
                }
                else
                {
                    relevance_lr1 = (terms_relevance_man[k]);
                    max = (man > max) ? man : max;
                    relevance_lr2 = max;
                }


                relevance_value[0] = terms_in_corpus_nodes_without_commas.elementAt(k).toString().trim();
                relevance_value[1] = Double.toString(terms_relevance_man[k]);
                relevance_value[2] = Double.toString(terms_relevance_man_max[k]);

                if (SIMILARITY_MODE.equals("SEED+ONT"))
                {

                    relevance_value[3] = Double.toString(terms_relevance_ont[k]);
                    relevance_value[4] = Double.toString(terms_relevance_ont_max[k]);
                }
                else
                {
                    relevance_value[3] = "0";
                    relevance_value[4] = "0";
                }

                relevance_value[5] = Double.toString(relevance_lr1);
                relevance_value[6] = Double.toString(relevance_lr2);
                relevance_value[7] = corpus_name;

                sql_handler.insert_table("result_lexical", doc_table1, relevance_value);
            }
        }
        sql_handler.close_db();
    }*/
}
