package crawler.web.engine.neural_network.classifier.feedforward;

import java.io.File;

import org.encog.engine.StatusReportable;
import org.encog.normalize.DataNormalization;
import org.encog.normalize.input.InputField;
import org.encog.normalize.input.InputFieldCSV;
import org.encog.normalize.output.OutputFieldDirect;
import org.encog.normalize.output.OutputFieldRangeMapped;
import org.encog.normalize.output.nominal.OutputEquilateral;
import org.encog.normalize.segregate.IntegerBalanceSegregator;
import org.encog.normalize.segregate.index.IndexSampleSegregator;
import org.encog.normalize.target.NormalizationStorageCSV;

public class GenerateData implements StatusReportable {

	/**
	 * 复制 ((start-stop)/size)*100%的数据从source到target<br>
	 * 传入参数start:0, end:2, size:4<br>
	 * 每4行做为一个样本数据 <br>
	 * 说明样本数据有4行, 取0,1,2行<br>
	 * 
	 * @author waynechen
	 * @param source
	 * @param target
	 * @param start
	 *            开始位置
	 * @param stop
	 *            结束位置
	 * @param size
	 *            将样本数据的大小据
	 */
	public void copy(File source, File target, int start, int stop, int size) {
		//holds 55 fieldsf
		InputField inputField[] = new InputField[55];

		//new 归一化实例
		DataNormalization norm = new DataNormalization();
		norm.setReport(this);
		//设置归一化后所存储的文件
		norm.setTarget(new NormalizationStorageCSV(target));

		//定义每个字段如果归一化
		for (int i = 0; i < 55; i++) {
			//获取第i个字段
			inputField[i] = new InputFieldCSV(true, source, i);
			//设置为入口
			norm.addInputField(inputField[i]);
			//The output fields are all direct copies of the input fields
			//设置为出口
			norm.addOutputField(new OutputFieldDirect(inputField[i]));
		}

		// load only the part we actually want, i.e. training or eval
		IndexSampleSegregator segregator2 = new IndexSampleSegregator(start, stop, size);
		norm.addSegregator(segregator2);

		norm.process();
	}

	/**
	 * 将所有样本数据 分成训练数据与测试数据
	 * 
	 * @author waynechen
	 */
	public void step1() {
		System.out.println("Step 1: Generate training and evaluation files");
		System.out.println("Generate training file");
		copy(Constant.COVER_TYPE_FILE, Constant.TRAINING_FILE, 0, 2, 4); // take 3/4
		System.out.println("Generate evaluation file");
		copy(Constant.COVER_TYPE_FILE, Constant.EVALUATE_FILE, 3, 3, 4); // take 1/4
	}

	/**
	 * 按照的树的种类, 删除一部分数据, 使数据平衡
	 * 
	 * @author waynechen
	 */
	public void step2() {
		System.out.println("Step 2: Balance training to have the same number of each tree");

		//第54个字段代表, 树的种类, 这里平衡这些树
		InputField inputField[] = new InputField[55];
		DataNormalization norm = new DataNormalization();
		norm.setReport(this);
		norm.setTarget(new NormalizationStorageCSV(Constant.BALANCE_FILE));

		//遍历字段
		for (int i = 0; i < 55; i++) {
			inputField[i] = new InputFieldCSV(true, Constant.TRAINING_FILE, i);
			norm.addInputField(inputField[i]);
			norm.addOutputField(new OutputFieldDirect(inputField[i]));
		}

		//This segregator will allow at most count items from the specified balancing field
		//第54个字段为树种类字段
		//第2个参数为第一个单独的数字能重复出现3000次
		IntegerBalanceSegregator segregator = new IntegerBalanceSegregator(inputField[54], 3000);
		norm.addSegregator(segregator);

		norm.process();
		System.out.println("Samples per tree type:");
		System.out.println(segregator.dumpCounts());
	}

	/**
	 * 数据归一化
	 * 
	 * @author waynechen
	 * @param useOneOf
	 * @return
	 */
	public DataNormalization step3() {
		System.out.println("Step 3: Normalize training data");
		InputField inputElevation;
		InputField inputAspect;
		InputField inputSlope;
		InputField hWater;
		InputField vWater;
		InputField roadway;
		InputField shade9;
		InputField shade12;
		InputField shade3;
		InputField firepoint;
		InputField[] wilderness = new InputField[4];
		InputField[] soilType = new InputField[40];
		InputField coverType;

		DataNormalization norm = new DataNormalization();
		norm.setReport(this);
		norm.setTarget(new NormalizationStorageCSV(Constant.NORMALIZED_FILE));

		//--------------设置输入字段(输入到归一化类)
		norm.addInputField(inputElevation = new InputFieldCSV(true, Constant.BALANCE_FILE, 0));
		norm.addInputField(inputAspect = new InputFieldCSV(true, Constant.BALANCE_FILE, 1));
		norm.addInputField(inputSlope = new InputFieldCSV(true, Constant.BALANCE_FILE, 2));
		norm.addInputField(hWater = new InputFieldCSV(true, Constant.BALANCE_FILE, 3));
		norm.addInputField(vWater = new InputFieldCSV(true, Constant.BALANCE_FILE, 4));
		norm.addInputField(roadway = new InputFieldCSV(true, Constant.BALANCE_FILE, 5));
		norm.addInputField(shade9 = new InputFieldCSV(true, Constant.BALANCE_FILE, 6));
		norm.addInputField(shade12 = new InputFieldCSV(true, Constant.BALANCE_FILE, 7));
		norm.addInputField(shade3 = new InputFieldCSV(true, Constant.BALANCE_FILE, 8));
		norm.addInputField(firepoint = new InputFieldCSV(true, Constant.BALANCE_FILE, 9));

		//归一化类的输出时会丢弃, 所以这4个不作为神经网络的输入
		for (int i = 0; i < 4; i++) {
			norm.addInputField(wilderness[i] = new InputFieldCSV(true, Constant.BALANCE_FILE, 10 + i));
		}

		for (int i = 0; i < 40; i++) {
			norm.addInputField(soilType[i] = new InputFieldCSV(true, Constant.BALANCE_FILE, 14 + i));
		}

		//树的种类, 作为神经网络的输出
		//当调用: norm.getNetworkInputLayerSize()时会得到50
		//一共有55个输入, 4个wilderness数据不要, 有一个是coverType(用来做神经网络的计算结果)
		//所以输出有50个
		norm.addInputField(coverType = new InputFieldCSV(false, Constant.BALANCE_FILE, 54));

		//-------------设置输出字段(归一化类的输出, 做为神经网络的输入)
		norm.addOutputField(new OutputFieldRangeMapped(inputElevation, 0.1, 0.9));//这一项对应归一化的1个输出
		norm.addOutputField(new OutputFieldRangeMapped(inputAspect, 0.1, 0.9));//这一项对应归一化的1个输出
		norm.addOutputField(new OutputFieldRangeMapped(inputSlope, 0.1, 0.9));//这一项对应归一化的1个输出
		norm.addOutputField(new OutputFieldRangeMapped(hWater, 0.1, 0.9));//这一项对应归一化的1个输出
		norm.addOutputField(new OutputFieldRangeMapped(vWater, 0.1, 0.9));//这一项对应归一化的1个输出
		norm.addOutputField(new OutputFieldRangeMapped(roadway, 0.1, 0.9));//这一项对应归一化的1个输出
		norm.addOutputField(new OutputFieldRangeMapped(shade9, 0.1, 0.9));//这一项对应归一化的1个输出
		norm.addOutputField(new OutputFieldRangeMapped(shade12, 0.1, 0.9));//这一项对应归一化的1个输出
		norm.addOutputField(new OutputFieldRangeMapped(shade3, 0.1, 0.9));//这一项对应归一化的1个输出
		norm.addOutputField(new OutputFieldRangeMapped(firepoint, 0.1, 0.9));//这一项对应归一化的1个输出

		//we will have 40 different soil types
		for (int i = 0; i < 40; i++) {
			norm.addOutputField(new OutputFieldDirect(soilType[i]));//这一项对应归一化的1个输出
		}

		//设置7种树的种类的输出
		OutputEquilateral outType = new OutputEquilateral(0.9, 0.1);
		outType.addItem(coverType, 1);
		outType.addItem(coverType, 2);
		outType.addItem(coverType, 3);
		outType.addItem(coverType, 4);
		outType.addItem(coverType, 5);
		outType.addItem(coverType, 6);
		outType.addItem(coverType, 7);
		//这一项对应归一化的6个输出
		//这6个输出作为ideal数据, 用于训练网络
		//当调用: norm.getNetworkOutputLayerSize()时会得到6
		norm.addOutputField(outType, true);

		norm.process();
		return norm;
	}

	@Override
	public void report(int total, int current, String message) {
		System.out.println(current + "/" + total + " " + message);
	}

	public void step0() {
		//TODO ANN, 从mongo中取出数据, 写到csv中
	}
}

//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//