package com.fr.football;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;

import org.apache.spark.mllib.linalg.Vector;
import org.apache.spark.mllib.linalg.Vectors;
import org.apache.spark.mllib.regression.LabeledPoint;

import org.apache.spark.mllib.classification.NaiveBayes;
import org.apache.spark.mllib.classification.NaiveBayesModel;

import org.apache.spark.mllib.classification.SVMModel;
import org.apache.spark.mllib.classification.SVMWithSGD;
import org.apache.spark.mllib.evaluation.MulticlassMetrics;

import java.io.FileNotFoundException;
import java.io.FileReader;
import java.util.HashMap;
import java.util.Map;
import org.apache.spark.mllib.tree.DecisionTree;
import org.apache.spark.mllib.tree.model.DecisionTreeModel;

import org.apache.spark.mllib.tree.RandomForest;
import org.apache.spark.mllib.tree.model.RandomForestModel;

import scala.Tuple2;

public class Test {
	public static final int [] txt = {1,3,4,8,9, 12};
	@SuppressWarnings("deprecation")
	public static void main(String[] arg) {
		// 生成spark对象
		SparkConf conf = new SparkConf();
		conf.set("spark.testing.memory", "2147480000"); // spark的运行配置，意指占用内存2G
		JavaSparkContext sc = new JavaSparkContext("local[*]", "Spark", conf); // 第一个参数为本地模式，[*]尽可能地获取多的cpu；第二个是spark应用程序名，可以任意取;第三个为配置文件
		for(int i : txt) {
			JavaRDD<LabeledPoint> data = readData(sc, i);
			JavaRDD<LabeledPoint>[] splitArray = data.randomSplit(new double[] { 0.8, 0.1, 0.1 });
			JavaRDD<LabeledPoint> training = splitArray[0];
			training.cache();
			JavaRDD<LabeledPoint> cvData = splitArray[1];
			cvData.cache();
			JavaRDD<LabeledPoint> testData = splitArray[2];
			testData.cache();
			// 训练集生成
//			final NaiveBayesModel nb_model = NaiveBayes.train(training.rdd());

			// 测试集生成
//			double[] d = { 1, 1, 2 };
//			Vector v = Vectors.dense(d);// 测试对象为单个vector，或者是ＲＤＤ化后的vector
	//
//			// 朴素贝叶斯
//			System.out.println(nb_model.predict(v));// 分类结果
//			System.out.println(nb_model.predictProbabilities(v)); // 计算概率值
	//
//			// 支持向量机
//			int numIterations = 100;// 迭代次数
//			final SVMModel svm_model = SVMWithSGD.train(training.rdd(), numIterations);// 构建模型
//			System.out.println(svm_model.predict(v));

			// 决策树
			Integer numClasses = 3;// 类别数量
			Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
			String impurity = "gini";// 对于分类问题，我们可以用熵entropy或Gini来表示信息的无序程度
										// ,对于回归问题，我们用方差(Variance)来表示无序程度，方差越大，说明数据间差异越大
			Integer maxDepth = 5;// 最大树深
			Integer maxBins = 32;// 最大划分数
			final DecisionTreeModel tree_model = DecisionTree.trainClassifier(training, numClasses, categoricalFeaturesInfo,
					impurity, maxDepth, maxBins);// 构建模型
			System.out.println("决策树分类结果：");
			MulticlassMetrics tree_metrics = getMetrics(tree_model, testData);
//			System.out.println(tree_metrics.confusionMatrix());
			System.out.println(i + ":" + tree_metrics.precision());
		}

		// 随机森林
//		Integer numTrees = 3; // Use more in practice.
//		String featureSubsetStrategy = "auto"; // Let the algorithm choose.
//		Integer seed = 12345;
//		// Train a RandomForest model.
//		final RandomForestModel forest_model = RandomForest.trainRegressor(training, categoricalFeaturesInfo, numTrees,
//				featureSubsetStrategy, "variance", maxDepth, maxBins, seed);// 参数与决策数基本一致，除了seed
//		System.out.println("随机森林结果：");
//		MulticlassMetrics forest_metrics = getMetrics(forest_model, testData);
//		System.out.println(forest_metrics.confusionMatrix());
//		System.out.println(forest_metrics.precision());
	}
	
	public static JavaRDD<LabeledPoint> readData(JavaSparkContext jsc,int j) {
		JavaRDD<String> rawData = jsc.textFile("F:\\data\\" + j +".txt");

		JavaRDD<LabeledPoint> data = rawData.map(line -> {

			String[] values = line.split("\t");

			double[] features = new double[values.length - 1];

			for (int i = 0; i < values.length - 1; i++) {
				double value = Double.parseDouble(values[i]);
				if (value == 0.00d) {
					continue;
				}
				features[i] = value;
			}
			Vector featureVector = Vectors.dense(features);
			Double label = (double) (Double.parseDouble(values[values.length - 1]));
			label = label ==3 ? 2 :label;
			return new LabeledPoint(label, featureVector);
		});
		return data;
	}
	
	public static MulticlassMetrics getMetrics(DecisionTreeModel model, JavaRDD<LabeledPoint> data) {
		JavaPairRDD<Object, Object> predictionsAndLabels = data.mapToPair(example -> {
			return new Tuple2<Object, Object>(model.predict(example.features()), example.label());
		});
		return new MulticlassMetrics(JavaPairRDD.toRDD(predictionsAndLabels));
	}
	
	public static MulticlassMetrics getMetrics(RandomForestModel model, JavaRDD<LabeledPoint> data) {
		JavaPairRDD<Object, Object> predictionsAndLabels = data.mapToPair(example -> {
			return new Tuple2<Object, Object>(model.predict(example.features()), example.label());
		});
		return new MulticlassMetrics(JavaPairRDD.toRDD(predictionsAndLabels));
	}
}
