package spark.Task3;


import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.mllib.classification.NaiveBayes;
import org.apache.spark.mllib.classification.NaiveBayesModel;
import org.apache.spark.mllib.linalg.Vectors;
import org.apache.spark.mllib.regression.LabeledPoint;

import scala.Tuple2;


public class Task3 {
 
	public static void main(String[] args) {
		SparkConf conf = new SparkConf().setAppName("NaiveBayesTest").setMaster("local[*]");
		try (JavaSparkContext sc = new JavaSparkContext(conf)) {
			JavaRDD<String> lines = sc.textFile("/home/jinhaitao/stock_data.csv");
			JavaRDD<LabeledPoint> data = lines.map(new Function<String, LabeledPoint>() {
				private static final long serialVersionUID = 1L;
				@Override
				public LabeledPoint call(String str) throws Exception {
					String[] t1 = str.split(",");
					// System.out.println("6为："+(Double.parseDouble(t1[4])));
					LabeledPoint a = new LabeledPoint(Double.parseDouble(t1[7]),
							Vectors.dense(Double.parseDouble(t1[2].substring(0,4)), Double.parseDouble(t1[2].substring(5,7)), Double.parseDouble(t1[3]), Double.parseDouble(t1[4]), Double.parseDouble(t1[5])));
							//System.out.println("6为："+a);
					return a;
				}
			});
			
			JavaRDD<LabeledPoint>[] splits = data.randomSplit(new double[] { 0.8, 0.2 }, 11L);
			JavaRDD<LabeledPoint> traindata = splits[0];
			JavaRDD<LabeledPoint> testdata = splits[1];
			final NaiveBayesModel model = NaiveBayes.train(traindata.rdd(), 1.0, "multinomial");
			JavaPairRDD<Double, Double> predictionAndLabel = testdata
					.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
						private static final long serialVersionUID = 1L;
						@Override
						public Tuple2<Double, Double> call(LabeledPoint p) {
							return new Tuple2<Double, Double>(model.predict(p.features()), p.label());
						}
					});
			//得到模型分类精度
			double accuracy = predictionAndLabel.filter(new Function<Tuple2<Double, Double>, Boolean>() {
				private static final long serialVersionUID = 1L;
				@Override
				public Boolean call(Tuple2<Double, Double> pl) {
					return pl._1().equals(pl._2());
				}
			}).count() / (double) testdata.count();
			
			System.out.println("模型精度为："+accuracy);
		}
		
	}
}