/*
 * SaveToFile.java
 *
 * Created on the 20 October 2008, 17:18
 *
 * To change this template, choose Tools | Template Manager
 * and open the template in the editor.
 */
package weka.classifiers.collective.meta;

import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.util.Enumeration;
import java.util.List;
import java.util.Vector;
import java.util.logging.Level;
import java.util.logging.Logger;

import weka.classifiers.AbstractClassifier;
import weka.classifiers.Classifier;
import weka.classifiers.IOutOfBagCalculator;
import weka.classifiers.collective.CollectiveRandomizableSingleClassifierEnhancer;
import weka.core.Attribute;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.Utils;
import weka.core.converters.ArffSaver;

/**
 * 
 * @author Tuve Löfstr�m
 */
public class SaveToFile extends CollectiveRandomizableSingleClassifierEnhancer {

    private static final long serialVersionUID = 6904808621766611779L;
    private Classifier m_Classifier;
    protected static int fileNumber = 0;
    protected static int lastNumBase = 0;
    protected static String lastDataSet = "";
    protected static String lastClassifier = "";
    protected String m_FilePrefix = "";
    private double m_validationPercent = 0;
    private boolean m_Probabilities = false;
    private boolean m_includeDataset = false;

    @Override
    protected double[] getDistribution(Instance instance) throws Exception {
        return getClassifier().distributionForInstance(instance);
    }

    @Override
    protected void buildClassifier() throws Exception {
        Instances train;
        if ((m_validationPercent < 0) || (m_validationPercent > 100)) {
            throw new Exception("Percentage must be between 0 and 100");
        } else if (m_validationPercent > 0) {
            int trainSize = (int) Math.round(m_Trainset.numInstances()
                    * (100 - m_validationPercent) / 100);
            train = new Instances(m_Trainset, 0, trainSize);
        } else {
            train = m_Trainset;
        }

        getClassifier().buildClassifier(train);

        Instances newData = new Instances(m_Trainset);
        for (int i = 0; i < m_Testset.numInstances(); i++) {
            if (m_UseInsight) {
                newData.add(m_TestsetOriginal.instance(i));
            } else {
                newData.add(m_Testset.instance(i));
            }
        }
        if (!m_includeDataset) {
            for (int i = newData.numAttributes() - 1; i >= 0; i--) {
                if (!(newData.attribute(i).name().equalsIgnoreCase("ID") | newData.classAttribute().index() == i)) {
                    newData.deleteAttributeAt(i);
                }
            }
        }
        newData.insertAttributeAt(new Attribute("Set"), 0);
        for (int i = 0; i < train.numInstances(); i++) {
            newData.instance(i).setValue(0, 0);
        }
        for (int i = train.numInstances(); i < m_Trainset.numInstances(); i++) {
            newData.instance(i).setValue(0, 1);
        }
        for (int i = m_Trainset.numInstances(); i < m_Trainset.numInstances()
                + m_Testset.numInstances(); i++) {
            newData.instance(i).setValue(0, 2);
        }

        final int numAttributes = newData.numAttributes();
        int column = numAttributes, numBaseClassifiers = 0;
        newData.insertAttributeAt(new Attribute("NumericClass"), column++);
        newData.insertAttributeAt(new Attribute("ModelClassification"),
                column++);
        for (int i = 0; i < newData.numClasses(); i++) {
            newData.insertAttributeAt(
                    new Attribute("ModelProbabilityClass" + i), column++);
        }
        List<Classifier> baseClassifiers = getClassifier().getEnsembleMembers();
        if (baseClassifiers != null) {
            for (int i = 0; i < baseClassifiers.size(); i++) {
                newData.insertAttributeAt(new Attribute("Base" + i
                        + "Classification"), column++);
            }
            for (int i = 0; i < baseClassifiers.size(); i++) {
                for (int j = 0; j < newData.numClasses(); j++) {
                    newData.insertAttributeAt(new Attribute("Base" + i
                            + "ProbabilityClass" + j), column++);
                }
            }
            if (getClassifier() instanceof IOutOfBagCalculator) {
                for (int i = 0; i < baseClassifiers.size(); i++) {
                    newData.insertAttributeAt(new Attribute("inBag" + i),
                            column++);
                }
            }
            numBaseClassifiers = baseClassifiers.size();
        }
        newData.setClassIndex(numAttributes - 1);

        boolean[][] inBag = null;
        if (getClassifier() instanceof IOutOfBagCalculator) {
            inBag = ((IOutOfBagCalculator) getClassifier()).getOutOfBag();
        }
        for (int n = 0; n < newData.numInstances(); n++) {
            column = numAttributes;
            Instance instance = newData.instance(n), orgInstance;
            if (n < m_Trainset.numInstances()) {
                orgInstance = m_Trainset.instance(n);
            } else {
                orgInstance = m_Testset.instance(n - m_Trainset.numInstances());
            }
            instance.setValue(column++, instance.classValue());
            double classification = getClassifier().classifyInstance(orgInstance);
            instance.setValue(column++, classification);
            double[] pred = getClassifier().distributionForInstance(orgInstance);
            for (int i = 0; i < newData.numClasses(); i++) {
                instance.setValue(column++, pred[i]);
            }

            if (baseClassifiers != null) {
                for (Classifier base : baseClassifiers) {
                    classification = base.classifyInstance(orgInstance);
                    instance.setValue(column++, classification);
                }
                for (Classifier base : baseClassifiers) {
                    pred = base.distributionForInstance(orgInstance);
                    for (int i = 0; i < newData.numClasses(); i++) {
                        instance.setValue(column++, pred[i]);
                    }
                }
                if (getClassifier() instanceof IOutOfBagCalculator) {
                    if (n < train.numInstances()) {
                        for (int i = 0; i < baseClassifiers.size(); i++) {
                            if (inBag[i][n]) {
                                instance.setValue(column++, 1);
                            } else {
                                instance.setValue(column++, 0);
                            }
                        }
                    } else {
                        for (int i = 0; i < baseClassifiers.size(); i++) {
                            instance.setValue(column++, -1);
                        }
                    }
                }
            }
        }

        if (!m_includeDataset) {
            int i = newData.classAttribute().index();
            newData.setClass(newData.attribute(i + 1));
            newData.deleteAttributeAt(i);
        }

        // Make sure the numbering is restarted if some changes are made
        if (!lastDataSet.equalsIgnoreCase(newData.relationName())) {
            fileNumber = 0;
            lastDataSet = newData.relationName();
        }
        if (!lastClassifier.equalsIgnoreCase(getClassifier().getClass().getName())) {
            fileNumber = 0;
            lastClassifier = getClassifier().getClass().getName();
        }
        if (lastNumBase != numBaseClassifiers) {
            fileNumber = 0;
            lastNumBase = numBaseClassifiers;
        }

        ArffSaver as = new ArffSaver();
        as.setInstances(newData);
        String relationName = newData.relationName();
        if (relationName.contains("-weka.filters.unsupervised.attribute.AddID")) {
            relationName = relationName.substring(0, relationName.indexOf("-weka.filters.unsupervised.attribute.AddID"));
        }
        if (relationName.contains("-weka.filters.unsupervised.instance.ClassRemover-Clast-N2-H")) {
            relationName = relationName.substring(0, relationName.indexOf("-weka.filters.unsupervised.instance.ClassRemover-Clast-N2-H"));
        }
        if (relationName.contains("-weka.filters.supervised.attribute.AttributeSelection")) {
            relationName = relationName.substring(
                    0,
                    relationName.indexOf("-weka.filters.supervised.attribute.AttributeSelection"))
                    + "_AttributeSelection";
        }
        as.setDestination(new BufferedOutputStream(new FileOutputStream(
                new File(relationName + "_"
                + getClassifier().getClass().getSimpleName() + "_#Base_"
                + numBaseClassifiers + "_Fold#_" + ++fileNumber + "_"
                + m_FilePrefix + ".arff"))));
        as.writeBatch();
    }

    @Override
    protected void build() throws Exception {
        buildClassifier();
    }

    @SuppressWarnings("unchecked")
    public Enumeration listOptions() {

        Vector<Option> newVector = new Vector<Option>();

        newVector.addElement(new Option("\tPrefix of file. (default '')", "F",
                1, "-F"));

        newVector.addElement(new Option("\tInclude probabilities. (default false)", "P",
                0, "-P"));

        newVector.addElement(new Option(
                "\tPercent of training set used as validation data", "V", 1,
                "-V"));
        Enumeration<Option> enu = (Enumeration<Option>) super.listOptions();
        while (enu.hasMoreElements()) {
            newVector.addElement(enu.nextElement());
        }
        return newVector.elements();
    }

    public void setOptions(String[] options) throws Exception {

        setFilePrefix(Utils.getOption("F", options));

        setIncludeProbabilities(Utils.getFlag("P", options));

        String str = Utils.getOption("V", options);
        if (str.length() != 0) {
            setValidationPercent(Double.parseDouble(str));
        } else {
            setValidationPercent(0);
        }

        super.setOptions(options);
    }

    /**
     * Gets the current settings of the Classifier.
     *
     * @return an array of strings suitable for passing to setOptions
     */
    public String[] getOptions() {

        String[] options;
        Vector<String> result = new Vector<String>();

        if (getIncludeProbabilities()) {
            result.add("-P");
        }

        result.add("-F");
        result.add("" + getFilePrefix());
        result.add("-V");
        result.add("" + getValidationPercent());


        options = super.getOptions();
        for (int i = 0; i < options.length; i++) {
            result.add(options[i]);
        }

        String[] array = new String[result.size()];
        for (int i = 0; i < result.size(); i++) {
            array[i] = result.elementAt(i).toString();
        }

        return array;
    }

    /**
     * Returns the tip text for this property
     *
     * @return tip text for this property suitable for displaying in the
     *         explorer/experimenter gui
     */
    public String IncludeProbabilitiesTipText() {
        return "If set, the probability distributions are printed to file.";
    }

    /**
     * Get the value of cro.
     *
     * @return Value of cro.
     */
    public boolean getIncludeProbabilities() {

        return m_Probabilities;
    }

    /**
     * Set the value of cro.
     *
     * @param crossoverProbability
     *            Value to assign to cro.
     */
    public void setIncludeProbabilities(boolean value) {

        m_Probabilities = value;
    }

    /**
     * Returns the tip text for this property
     *
     * @return tip text for this property suitable for displaying in the
     *         explorer/experimenter gui
     */
    public String IncludeDatasetTipText() {
        return "If set, the attributes of the dataset is kept.";
    }

    /**
     * Get the value of cro.
     *
     * @return Value of cro.
     */
    public boolean getIncludeDataset() {

        return m_includeDataset;
    }

    /**
     * Set the value of cro.
     *
     * @param crossoverProbability
     *            Value to assign to cro.
     */
    public void setIncludeDataset(boolean value) {

        m_includeDataset = value;
    }

    /**
     * Returns the tip text for this property
     *
     * @return tip text for this property suitable for displaying in the
     *         explorer/experimenter gui
     */
    public String filePrefixTipText() {
        return "Prefix of outputfile for saved model(s).";
    }

    /**
     * Gets the size of each bag, as a percentage of the training set size.
     *
     * @return the bag size, as a percentage.
     */
    public String getFilePrefix() {

        return m_FilePrefix;
    }

    /**
     * Sets the size of each bag, as a percentage of the training set size.
     *
     * @param newBagSizePercent
     *            the bag size, as a percentage.
     */
    public void setFilePrefix(String newFilePrefix) {

        m_FilePrefix = newFilePrefix;
    }

    /**
     * Returns the tip text for this property
     *
     * @return tip text for this property suitable for displaying in the
     *         explorer/experimenter gui
     */
    public String validationPercentTipText() {
        return "Prefix of outputfile for saved model(s).";
    }

    /**
     * Gets the size of each bag, as a percentage of the training set size.
     *
     * @return the bag size, as a percentage.
     */
    public double getValidationPercent() {

        return m_validationPercent;
    }

    /**
     * Sets the size of each bag, as a percentage of the training set size.
     *
     * @param newBagSizePercent
     *            the bag size, as a percentage.
     */
    public void setValidationPercent(double newValidationPercent) {

        m_validationPercent = newValidationPercent;
    }
}
