package machinelearning;

import java.util.ArrayList;
import java.io.*;
import java.util.TreeMap;
import java.util.Iterator;
import java.util.Random;
import java.util.logging.Level;
import java.util.logging.Logger;
import weka.classifiers.Evaluation;
import weka.classifiers.functions.Winnow;
import weka.core.Attribute;
import weka.core.FastVector;
import weka.core.Instance;
import weka.core.Instances;

/**
 *
 * This class will store the data and perform the classification learning
 * @author Anat Nesichi & Moshe Goren
 */
public class ClassifyTranslation
{

    String baseDirectoryPath;
    final String arabicDirectory = "individualArabic";
    final String spansihDirectory = "individualSpanish";

    WordMap arabicDictionary;
    WordMap spanishDictionary;

    int arabicWordCount;
    int spanishWordCount;

    ArrayList<Document> arabicDocuments = new ArrayList<Document>();
    ArrayList<Document> spanishDocuments = new ArrayList<Document>();

    ArrayList<Document> arabicTrainFold;
    ArrayList<Document> arabicTestFold;

    ArrayList<Document> spanishTrainFold;
    ArrayList<Document> spanishTestFold;
    
    ArrayList<Document> documentsTrainFold;
    ArrayList<Document> documentsTestFold;

    /**
     *
     * @param basePath Path to the directory that contains the document directories
     *
     * <p>
     * The basePath must contain these two directories: individualArabic, individualSpanish
     * for the classifier to work.
     * </p[>
     */
    public ClassifyTranslation(String basePath)
    {
        baseDirectoryPath = basePath;
        arabicWordCount = LoadDocuments(basePath + File.separator + arabicDirectory, arabicDocuments);
        spanishWordCount = LoadDocuments(basePath + File.separator + spansihDirectory, spanishDocuments);
    }

    /**
     * Creates a perceptron classifier based on two document collections
     * @param fromIndex The start index from the Document arrays to use
     * @param toIndex  The end index from the Document arrays to use
     */
    public void Classify(int numFolds)
    {

        for (int foldNum = 0; foldNum < numFolds; foldNum++)
        {

            ArrayList<String> featureList = new ArrayList<String>();

            // Prepare Arabic data
            CreateFolds(arabicDocuments, numFolds, foldNum);
            arabicTrainFold = documentsTrainFold;
            arabicTestFold = documentsTestFold;

            arabicDictionary = new WordMap(arabicWordCount);
            FillDictionary(arabicDictionary, arabicTrainFold);

            // Prepare Spanish data
            CreateFolds(spanishDocuments, numFolds, foldNum);
            spanishTrainFold = documentsTrainFold;
            spanishTestFold = documentsTestFold;

            spanishDictionary = new WordMap(spanishWordCount);
            FillDictionary(spanishDictionary, spanishDocuments);

            // Create FeatureList from training data
            CreateFeatureList(featureList, 200);

            // Create WEKA Instances from training data and above Feature List
            FastVector fv = new FastVector(featureList.size());
            for (String word : featureList)
            {
                fv.addElement(new weka.core.Attribute(word));
            }

            fv.addElement(new weka.core.Attribute("isFromArabic"));
            Instances inst = new Instances("documents", fv, arabicTrainFold.size() + spanishTrainFold.size());
            inst.setClassIndex(200);

            for (Document doc : arabicDocuments)
            {
                Instance in = new Instance(201);
                in.setDataset(inst);
                doc.createFeatureInstance(featureList, in);
                in.setClassValue(1.0);
                inst.add(in);
            }

            for (Document doc : spanishDocuments)
            {
                Instance in = new Instance(201);
                in.setDataset(inst);
                doc.createFeatureInstance(featureList, in);
                in.setClassValue(-1.0);
                inst.add(in);
            }       

           

        }


    }

    private void CreateFolds(ArrayList<Document> documents, int numFolds, int foldNum)
    {
        documentsTrainFold = new ArrayList<Document>();
        documentsTestFold = new ArrayList<Document>();

        documentsTrainFold.ensureCapacity(documents.size());
        documentsTestFold.ensureCapacity((documents.size() / numFolds) + 1);

        for (int i = 0; i < documents.size(); i++)
        {
            if (documents.size() % numFolds == foldNum)
            {
                documentsTestFold.add(documents.get(i));
            }
            else
            {
                documentsTrainFold.add(documents.get(i));
            }
        }
    }

    /**
     * Takes an empty list of Strings and populates it with word features
     * by calculating the Correlation Coefficient of each word and choosing
     * the best <i> maxNumOfFeatures </i> words.
     *
     *
     * @param featureList An empty <i> ArrayList </i> of Strings.
     *                    Will contain the list of Features chosen
     * @param maxNumOfFeatures
     *
     */
    private void CreateFeatureList(ArrayList<String> featureList, int maxNumOfFeatures)
    {
        // Will sort the words by their score
        TreeMap<Double, String> featureScore = new TreeMap<Double, String>();

        int arabicDocumentsCount = arabicTrainFold.size();
        int spanishDocumentsCount = spanishTrainFold.size();
        int totalDocumentsCount = arabicDocumentsCount + spanishDocumentsCount;

        for (String currWord : arabicDictionary.getWordsSet())
        {
            int freqInArabic = arabicDictionary.WordFreq(currWord);
            int freqInSpanish = spanishDictionary.WordFreq(currWord);

            // Use words that appear at least 5 times in training set
            if (freqInArabic + freqInSpanish >= 5)
            {
                double Nrp = freqInArabic;                          // Number of Relevant documents that contain word
                double Nrn = arabicDocumentsCount - freqInArabic;   // Number of Relevant documents that don't contain word
                double Nnp = freqInSpanish;                         // Number of Non-relevant documents that contain word
                double Nnn = spanishDocumentsCount - freqInSpanish; // Number of Non-relevant documents that don't contain word
                double N = totalDocumentsCount;

                // Calculate the Correlation Coefficient for each word
                double wordScore = (((Nrp * Nnn) - (Nrn * Nnp)) * Math.sqrt(N)) /
                        Math.sqrt((Nrp + Nrn) * (Nnp + Nnn) * (Nrp + Nnp) * (Nrn + Nnn));

                featureScore.put(wordScore, currWord);
            }
        }

        // Create a descending order iteration over feature words
        Iterator<String> featureIt = featureScore.descendingMap().values().iterator();

        int numberOfFeatures = maxNumOfFeatures;

        if (featureScore.size() < maxNumOfFeatures)
        {
            numberOfFeatures = featureScore.size();
        }

        featureList.ensureCapacity(numberOfFeatures); // More efficient since we know the size in advance

        // Populate the featureList with the top numberOfFeatures words.
        for (int i = 0; i < numberOfFeatures; i++)
        {
            featureList.add(featureIt.next());
        }

    }

    /**
     * Loads all *.txt files from <i> documentsPath </i> and creates a collection of
     * <i> Document </i> objects.
     * @param documentsPath Path to directory containing 
     * @param docArray Array of documents with all the pre-processed data
     * @return Upper bound for the total number of words loaded
     */
    private int LoadDocuments(String documentsPath, ArrayList<Document> docArray)
    {
        File docDir = new File(documentsPath);
        String[] docsList = docDir.list();
        int wordCount = 0;

        if (docDir.exists())
        {
            docArray.ensureCapacity(docsList.length); // We know already how many documents there will be...

            for (String currDocName : docsList)
            {
                if (currDocName.endsWith(".txt"))
                {
                    // Load and prepare Document data
                    Document currDoc = new Document(documentsPath + File.separator + currDocName);

                    // Add document to list of documents
                    docArray.add(currDoc);

                    wordCount += currDoc.words.getWordCount();
                }
            }

            return wordCount;
        }
        else
        {
            System.err.println("Error! Directory " + docDir.getPath() + " does not exist!");
            return 0;
        }
    }

    /**
     * Takes an empty <i> WordMap </i> dictionary and fills it with the words
     * from all the <i> Documents </i> in the array
     * @param dictionary The dictionary to be filled
     * @param docArray The array of <i> Document</i> s to fill the dictionary
     */
    private void FillDictionary(WordMap dictionary, ArrayList<Document> docArray)
    {
        if (dictionary != null)
        {
            for (Document currDoc : docArray)
            {
                // Add words in document into dictionary
                // this way we count how many documents have a specific word
                dictionary.Add(currDoc.words.getWordsSet());
            }
        }
    }
}
