package foundation.SurModel.rndForest;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

import foundation.SurModel.rndForest.sample.MLearnTbl;
import foundation.SurModel.rndForest.sample.PerfLearnTblItem;
import foundation.SurModel.rndForest.sample.PerfSamResult;
import foundation.fileUtil.FileNameUtil;
import foundation.fileUtil.PropFileUtil;
import weka.classifiers.trees.RandomForest;
import weka.core.Instances;

public class Trainer {

	/*
	 * 将Trainer保留
	 */
	public static RandomForest maxProUtilTrainer = null;
	public static RandomForest minProUtilTrainer = null;
	public static RandomForest restTrainer = null;
	public static RandomForest relbTrainer = null;

	/*
	 * RandomForest参数设置保留在setRFParameter.properties中
	 */
	String path = FileNameUtil.getPrjPath();
	PropFileUtil propFileUtil = PropFileUtil
			.getInstance(path + "src/foundation/SurModel/rndForest/setRFParameter.properties");
	int numTrees = Integer.parseInt(propFileUtil.getParameterValue("numTrees"));
	String batchSize = propFileUtil.getParameterValue("batchSize");
	int numDecimalPlaces = Integer.parseInt(propFileUtil.getParameterValue("NumDecimalPlaces"));

	/**
	 * 最大使用率处理器分类训练
	 */
	public void trainMaxProUtil(MLearnTbl mlTbl) {
		PerfLearnTblItem sample = null;
		RandomForest rfTrainMaxProUtil = new RandomForest();

		try {
			/**
			 * @param sampleRlt
			 *            训练集以及训练集在weka中的内部表示
			 */
			PerfSamResult sampleRlt = mlTbl.getPefSamResult("trainMaxProUtil");
			ArrayList<PerfLearnTblItem> samples = sampleRlt.getSamples();

			// 得到训练数据
			Instances trainData = sampleRlt.getInstances();
			/*
			 * 根据训练数据以及指定参数进行建模
			 */
			trainData.setClassIndex(trainData.numAttributes() - 1);
			rfTrainMaxProUtil.setNumTrees(numTrees);
			rfTrainMaxProUtil.setBatchSize(batchSize);
			rfTrainMaxProUtil.setNumDecimalPlaces(numDecimalPlaces);
			rfTrainMaxProUtil.buildClassifier(trainData);

			// 得到训练集的预测值，并且将其添加到对应的表项当中
			for (int i = 0; i < trainData.numInstances(); i++) {
				sample = samples.get(i);
				int curPreMaxUtilProcNo = sample.getPredictMaxUtilProcNo();
				// 判断高层是否已填写此值，因高层发现预测错误会解析真实值并将预测值也填写了
				if (curPreMaxUtilProcNo == -1) {
					int predictMaxUtilProcNo = Math
							.round((float) (rfTrainMaxProUtil.classifyInstance(trainData.instance(i))));
					sample.setPredictMaxUtilProcNo(predictMaxUtilProcNo);
				}
			}

		} catch (Exception e) {
			e.printStackTrace();
		}
		// 返回训练器模型
		maxProUtilTrainer = rfTrainMaxProUtil;

	}

	/**
	 * 最小使用率处理器分类训练
	 */
	public void trainMinProUtil(MLearnTbl mlTbl) {

		RandomForest rfTrainMinProUtil = new RandomForest();
		PerfLearnTblItem sample = null;
		try {
			/**
			 * @param sampleRlt
			 *            样本集以及样本集在weka中的内部表示
			 */
			PerfSamResult sampleRlt = mlTbl.getPefSamResult("trainMinProUtil");
			ArrayList<PerfLearnTblItem> samples = sampleRlt.getSamples();

			// 得到训练数据
			Instances trainData = sampleRlt.getInstances();
			/*
			 * 根据训练数据以及指定参数进行建模
			 */
			trainData.setClassIndex(trainData.numAttributes() - 1);
			rfTrainMinProUtil.setNumTrees(numTrees);
			rfTrainMinProUtil.setBatchSize(batchSize);
			rfTrainMinProUtil.setNumDecimalPlaces(numDecimalPlaces);
			rfTrainMinProUtil.buildClassifier(trainData);

			// 得到训练集的预测值，并且将其添加到对应的表项当中
			for (int i = 0; i < trainData.numInstances(); i++) {
				sample = samples.get(i);
				int curPreMinUtilProcNo = sample.getPredictMinUtilProcNo();
				// 判断高层是否已填写此值，因高层发现预测错误会解析真实值并将预测值也填写了
				if (curPreMinUtilProcNo == -1) {
					int predictMinUtilProcNo = Math
							.round((float) (rfTrainMinProUtil.classifyInstance(trainData.instance(i))));
					sample.setPredictMinUtilProcNo(predictMinUtilProcNo);
				}
			}
		} catch (Exception e) {

			e.printStackTrace();
		}
		// 返回训练器模型
		minProUtilTrainer = rfTrainMinProUtil;

	}

	/**
	 * 响应时间模型训练
	 * 
	 * @param mlTbl
	 */
	public void trainRest(MLearnTbl mlTbl) {
		PerfLearnTblItem sample = null;
		RandomForest rfTrainRest = new RandomForest();
		try {
			/**
			 * @param sampleRlt
			 *            样本集以及样本集在weka中的内部表示
			 */
			PerfSamResult sampleRlt = mlTbl.getPefSamResult("trainRest");
			ArrayList<PerfLearnTblItem> samples = sampleRlt.getSamples();
			// 得到训练数据
			Instances trainData = sampleRlt.getInstances();
			/*
			 * 根据训练数据以及指定参数进行建模
			 */
			trainData.setClassIndex(trainData.numAttributes() - 1);
			rfTrainRest.setNumTrees(numTrees);
			rfTrainRest.setBatchSize(batchSize);
			rfTrainRest.setNumDecimalPlaces(numDecimalPlaces);
			rfTrainRest.buildClassifier(trainData);

			// 得到训练集的预测值，并且将其添加到对应的表项当中
			for (int i = 0; i < trainData.numInstances(); i++) {
				sample = samples.get(i);
				float curPreResTime = sample.getPredictResTime();
				// 判断高层是否已填写此值，因高层发现预测错误会解析真实值并将预测值也填写了
				if (Float.compare(curPreResTime, -1f) <= 0) {
					float predictResTime = (float) (rfTrainRest.classifyInstance(trainData.instance(i)));
					sample.setPredictResTime(predictResTime);
				}
			}

		} catch (Exception e) {
			e.printStackTrace();
		}
		// 返回训练器模型
		restTrainer = rfTrainRest;
	}

	/**
	 * 可靠性模型训练
	 * 
	 * @param mlTbl
	 */
	public void trainRelb(MLearnTbl mlTbl) {
		// 注意要写mlTbl表
	}

	public static void main(String[] args) {

		List<List<Double>> data = null;
		try {
			data = DbToCSVForData.readCSV();
		} catch (IOException e) {
			e.printStackTrace();
		}

		MLearnTbl mlTbl = MLearnTbl.getInstance();
		// 从csv文件中把加入到MLearnTbl
		for (int i = 0; i < data.size(); i++) {
			PerfLearnTblItem item = new PerfLearnTblItem();
			List<StringBuffer> mlCodes = new ArrayList<>();
			for (int j = 0; j < 14; j++) {
				StringBuffer str = new StringBuffer();
				str.append(data.get(i).get(j));
				mlCodes.add(str);

			}
			item.setMlDFcodes(mlCodes);
			float actualresTime = (float) (double) (data.get(i).get(14));
			item.setActualresTime(actualresTime);
			mlTbl.addPerfItem(item);
		}
		ArrayList<Integer> strtIdxs = new ArrayList<>();
		strtIdxs.add(3);
		strtIdxs.add(12);
		strtIdxs.add(13);
		mlTbl.setStrtIdxs(strtIdxs);
		int size = 10;
		long trainstart = System.currentTimeMillis();

		RndForestSurModel rnd = RndForestSurModel.getInstance();
		for (int i = 0; i < size; i++) {
			rnd.trainMinProUtil();
		}
		long trainend = System.currentTimeMillis();

		long timetrain = trainend - trainstart;
		// System.out.println("训练一次"+timetrain/size+"个毫秒");

		List<StringBuffer> mlcode = new ArrayList<>();
		StringBuffer str = new StringBuffer();
		str.append(11.311741);
		mlcode.add(str);
		StringBuffer str1 = new StringBuffer();
		str1.append(17.981514);
		mlcode.add(str1);
		StringBuffer str2 = new StringBuffer();
		str2.append(19.593815);
		mlcode.add(str2);
		StringBuffer str3 = new StringBuffer();
		str3.append(8.7772665);
		mlcode.add(str3);
		StringBuffer str4 = new StringBuffer();
		str4.append(1);
		mlcode.add(str4);
		StringBuffer str5 = new StringBuffer();
		str5.append(1);
		mlcode.add(str5);
		StringBuffer str6 = new StringBuffer();
		str6.append(1);
		mlcode.add(str6);
		StringBuffer str7 = new StringBuffer();
		str7.append(1);
		mlcode.add(str7);
		StringBuffer str8 = new StringBuffer();
		str8.append(1);
		mlcode.add(str8);
		StringBuffer str9 = new StringBuffer();
		str9.append(4);
		mlcode.add(str9);
		StringBuffer str10 = new StringBuffer();
		str10.append(4);
		mlcode.add(str10);
		StringBuffer str11 = new StringBuffer();
		str11.append(4);
		mlcode.add(str11);
		StringBuffer str12 = new StringBuffer();
		str12.append(1);
		mlcode.add(str12);
		StringBuffer str13 = new StringBuffer();
		str13.append(1);
		mlcode.add(str13);

		try {
			long predstar = System.currentTimeMillis();
			for (int i = 0; i < 10; i++) {
				float b = rnd.predictMinProUtil(mlcode);
			}
			long predend = System.currentTimeMillis();
			long predTime = predend - predstar;

			// System.out.println("预测一次"+predTime+"个毫秒");

		} catch (Exception e) {
			e.printStackTrace();
		}
		// mlTbl.outPerfTblToDB();
		// mlTbl.outPerfSMQultoExcel(mlTbl.outPerfSMQul("", "", "", ""));

	}
}
