/**
 * 2017年12月6日
 */
/*
 *   This program is free software: you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation, either version 3 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

/*
 *    AdditiveRegression.java
 *    Copyright (C) 2000-2012 University of Waikato, Hamilton, New Zealand
 *
 */

package exp.algorithm.gbdt;

import java.util.Enumeration;
import java.util.Vector;

import exp.util.DatasetsUtil;
import timeseriesweka.classifiers.TSF;
import utilities.CrossValidator;
import weka.classifiers.Classifier;
import weka.classifiers.IteratedSingleClassifierEnhancer;
import weka.classifiers.meta.AdditiveRegression;
import weka.classifiers.rules.ZeroR;
import weka.core.AdditionalMeasureProducer;
import weka.core.Attribute;
import weka.core.Capabilities;
import weka.core.Capabilities.Capability;
import weka.core.DenseInstance;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Option;
import weka.core.OptionHandler;
import weka.core.RevisionUtils;
import weka.core.TechnicalInformation;
import weka.core.TechnicalInformation.Field;
import weka.core.TechnicalInformation.Type;
import weka.core.TechnicalInformationHandler;
import weka.core.Utils;
import weka.core.WeightedInstancesHandler;
public class CascadeForest extends IteratedSingleClassifierEnhancer
		implements OptionHandler, AdditionalMeasureProducer, WeightedInstancesHandler, TechnicalInformationHandler {
	
	static final long serialVersionUID = -2368937577670527151L;
	protected double m_shrinkage = 1.0;
	protected int m_NumIterationsPerformed;
	protected ZeroR m_zeroR;
	protected boolean m_SuitableData = true;
	double epision = -1;
	
	
	public String globalInfo() {
		return " Meta classifier that enhances the performance of a regression "
				+ "base classifier. Each iteration fits a model to the residuals left "
				+ "by the classifier on the previous iteration. Prediction is "
				+ "accomplished by adding the predictions of each classifier. "
				+ "Reducing the shrinkage (learning rate) parameter helps prevent "
				+ "overfitting and has a smoothing effect but increases the learning " + "time.\n\n"
				+ "For more information see:\n\n" + getTechnicalInformation().toString();
	}

	public TechnicalInformation getTechnicalInformation() {
		TechnicalInformation result;
		result = new TechnicalInformation(Type.TECHREPORT);
		result.setValue(Field.AUTHOR, "J.H. Friedman");
		result.setValue(Field.YEAR, "1999");
		result.setValue(Field.TITLE, "Stochastic Gradient Boosting");
		result.setValue(Field.INSTITUTION, "Stanford University");
		result.setValue(Field.PS, "http://www-stat.stanford.edu/~jhf/ftp/stobst.ps");
		return result;
	}
	public CascadeForest(double epision) {
		this(new TSF());
		this.epision = epision;
	}
	protected CascadeForest(Classifier classifier) {
		m_Classifier = classifier;
	}

	protected String defaultClassifierString() {
		return "xxx";
	}

	@SuppressWarnings({ "rawtypes", "unchecked" })
	public Enumeration listOptions() {

		Vector newVector = new Vector(4);

		newVector.addElement(
				new Option("\tSpecify shrinkage rate. " + "(default = 1.0, ie. no shrinkage)\n", "S", 1, "-S"));

		Enumeration enu = super.listOptions();
		while (enu.hasMoreElements()) {
			newVector.addElement(enu.nextElement());
		}
		return newVector.elements();
	}
	public void setOptions(String[] options) throws Exception {

		String optionString = Utils.getOption('S', options);
		if (optionString.length() != 0) {
			Double temp = Double.valueOf(optionString);
			setShrinkage(temp.doubleValue());
		}

		super.setOptions(options);
	}
	public String[] getOptions() {

		String[] superOptions = super.getOptions();
		String[] options = new String[superOptions.length + 2];
		int current = 0;

		options[current++] = "-S";
		options[current++] = "" + getShrinkage();

		System.arraycopy(superOptions, 0, options, current, superOptions.length);

		current += superOptions.length;
		while (current < options.length) {
			options[current++] = "";
		}
		return options;
	}

	public String shrinkageTipText() {
		return "Shrinkage rate. Smaller values help prevent overfitting and "
				+ "have a smoothing effect (but increase learning time). " + "Default = 1.0, ie. no shrinkage.";
	}

	public void setShrinkage(double l) {
		m_shrinkage = l;
	}

	public double getShrinkage() {
		return m_shrinkage;
	}

	public Capabilities getCapabilities() {
		Capabilities result = super.getCapabilities();
		result.disableAllClasses();
		result.disableAllClassDependencies();
		result.enable(Capability.NUMERIC_CLASS);
		result.enable(Capability.DATE_CLASS);
		return result;
	}

	public void buildClassifier(Instances data) throws Exception {
		super.buildClassifier(data);
		getCapabilities().testWithFail(data);
		Instances newData = new Instances(data);
		newData.deleteWithMissingClass();
		double sum = 0;
		double temp_sum = 0;
		m_zeroR = new ZeroR();
		m_zeroR.buildClassifier(newData);
		if (newData.numAttributes() == 1) {
			System.err.println(
					"Cannot build model (only class attribute present in data!), " + "using ZeroR model instead!");
			m_SuitableData = false;
			return;
		} else {
			m_SuitableData = true;
		}
//		CrossValidator cv;
		
//		newData = residualReplace(newData, m_zeroR, false);
//		for (int i = 0; i < newData.numInstances(); i++) {
//			sum += newData.instance(i).weight() * newData.instance(i).classValue() * newData.instance(i).classValue();
//		}
		double [] error = new double[1];
		double errorBefore = error[0];
		Instances iterResult = newData;
//		error[0] = epision;
		m_NumIterationsPerformed = 0;
		do {
			temp_sum = sum;
			m_Classifiers[m_NumIterationsPerformed].buildClassifier(newData);
			//形成新的训练集 用于下一次的训练
			errorBefore = error[0];
			iterResult = predictAndExtendFeatureSpace(m_Classifiers[m_NumIterations], newData , error);
			
//			newData = residualReplace(newData, m_Classifiers[m_NumIterationsPerformed], true);
//			sum = 0;
//			for (int i = 0; i < newData.numInstances(); i++) {
//				sum += newData.instance(i).weight() * newData.instance(i).classValue() * newData.instance(i).classValue();
//			}
			m_NumIterationsPerformed++;
			//迭代的误差减少很少 且误差没达到要求
		} while (  error[0] > epision  && ( Math.abs(error[0] - errorBefore) > 0.05 ) && (m_NumIterationsPerformed < m_Classifiers.length));
	}
	
	private Instances predictAndExtendFeatureSpace(Classifier c,Instances data , double[]error) throws Exception{
		Instances newData = DatasetsUtil.initInstancesWithParmas("cas_extened_dataset", data.numAttributes()+data.numClasses(), "cas_extended_", data.numInstances(), true, (Attribute)data.classAttribute());
		int numInst = data.numInstances();
		int correct =  0;
		for(int i = 0 ;i< numInst;i++){
			Instance in = data.instance(i);
			double[]inst = in.toDoubleArray();
			double[]dist = c.distributionForInstance(in);
			int idx = Utils.maxIndex(dist);
			if(idx == (int)in.classValue() ){
				correct  ++ ;
			}
			double []vals = new double[newData.numAttributes()];
			//排除类标
			System.arraycopy(inst, 0, vals, 0, inst.length-1);
			System.arraycopy(dist, 0, vals, inst.length-2, dist.length);
			DenseInstance di = new DenseInstance(1.0,vals);
			di.setDataset(newData);
			newData.add(di);
		}
		error[0] = (double)correct /  (double)numInst;
		return newData;
	}
	
	void extend(double []inst,double []dist,double vals[]){
		
	}
	
	public double classifyInstance(Instance inst) throws Exception {
		double prediction = m_zeroR.classifyInstance(inst);
		if (!m_SuitableData) {
			return prediction;
		}
		for (int i = 0; i < m_NumIterationsPerformed; i++) {
			
			double [] dist =  m_Classifiers[i].distributionForInstance(inst);
			
			double toAdd = m_Classifiers[i].classifyInstance(inst);
			toAdd *= getShrinkage();
			prediction += toAdd;
		}
		return prediction;
	}

	private Instances residualReplace(Instances data, Classifier c, boolean useShrinkage) throws Exception {
		double pred, residual;
		Instances newInst = new Instances(data);

		for (int i = 0; i < newInst.numInstances(); i++) {
			pred = c.classifyInstance(newInst.instance(i));
			if (useShrinkage) {
				pred *= getShrinkage();
			}
			residual = newInst.instance(i).classValue() - pred;
			newInst.instance(i).setClassValue(residual);
		}
		// System.err.print(newInst);
		return newInst;
	}

	/**
	 * Returns an enumeration of the additional measure names
	 * 
	 * @return an enumeration of the measure names
	 */
	public Enumeration enumerateMeasures() {
		Vector newVector = new Vector(1);
		newVector.addElement("measureNumIterations");
		return newVector.elements();
	}

	/**
	 * Returns the value of the named measure
	 * 
	 * @param additionalMeasureName
	 *            the name of the measure to query for its value
	 * @return the value of the named measure
	 * @throws IllegalArgumentException
	 *             if the named measure is not supported
	 */
	public double getMeasure(String additionalMeasureName) {
		if (additionalMeasureName.compareToIgnoreCase("measureNumIterations") == 0) {
			return measureNumIterations();
		} else {
			throw new IllegalArgumentException(additionalMeasureName + " not supported (AdditiveRegression)");
		}
	}

	/**
	 * return the number of iterations (base classifiers) completed
	 * 
	 * @return the number of iterations (same as number of base classifier
	 *         models)
	 */
	public double measureNumIterations() {
		return m_NumIterationsPerformed;
	}

	/**
	 * Returns textual description of the classifier.
	 *
	 * @return a description of the classifier as a string
	 */
	public String toString() {
		StringBuffer text = new StringBuffer();

		// only ZeroR model?
		if (!m_SuitableData) {
			StringBuffer buf = new StringBuffer();
			buf.append(this.getClass().getName().replaceAll(".*\\.", "") + "\n");
			buf.append(this.getClass().getName().replaceAll(".*\\.", "").replaceAll(".", "=") + "\n\n");
			buf.append("Warning: No model could be built, hence ZeroR model is used:\n\n");
			buf.append(m_zeroR.toString());
			return buf.toString();
		}

		if (m_NumIterations == 0) {
			return "Classifier hasn't been built yet!";
		}

		text.append("Additive Regression\n\n");

		text.append("ZeroR model\n\n" + m_zeroR + "\n\n");

		text.append("Base classifier " + getClassifier().getClass().getName() + "\n\n");
		text.append("" + m_NumIterationsPerformed + " models generated.\n");

		for (int i = 0; i < m_NumIterationsPerformed; i++) {
			text.append("\nModel number " + i + "\n\n" + m_Classifiers[i] + "\n");
		}

		return text.toString();
	}

	/**
	 * Returns the revision string.
	 * 
	 * @return the revision
	 */
	public String getRevision() {
		return RevisionUtils.extract("$Revision: 8034 $");
	}

	/**
	 * Main method for testing this class.
	 *
	 * @param argv
	 *            should contain the following arguments: -t training file [-T
	 *            test file] [-c class index]
	 */
	public static void main(String[] argv) {
//		runClassifier(new CascadeForest(), argv);
	}
}
