package com.zhny.algorithm;

import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import com.google.common.base.Strings;
import org.apache.spark.api.java.JavaPairRDD;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.mllib.classification.NaiveBayes;
import org.apache.spark.mllib.classification.NaiveBayesModel;
import org.apache.spark.mllib.linalg.Vector;
import org.apache.spark.mllib.linalg.Vectors;
import org.apache.spark.mllib.regression.LabeledPoint;
import org.apache.spark.mllib.tree.RandomForest;
import org.apache.spark.mllib.tree.model.RandomForestModel;
import org.apache.spark.mllib.util.MLUtils;
import scala.Tuple2;

//随机森林
public class NaiveBayesUtil {

    public static void generatePredictModel(String dataFilePath, String modelFilePath) {
        SparkConf sparkConf = new SparkConf().setAppName("NaiveBayes").setMaster("local[*]");

        sparkConf.set("spark.driver.allowMultipleContexts", "true");
        JavaSparkContext jsc = new JavaSparkContext(sparkConf);

        if (Strings.isNullOrEmpty(dataFilePath)) return;

        JavaRDD<LabeledPoint> inputData = MLUtils.loadLibSVMFile(jsc.sc(), dataFilePath).toJavaRDD();

        NaiveBayesModel model = NaiveBayes.train(inputData.rdd(), 1.0);

        model.save(jsc.sc(), modelFilePath);

        jsc.stop();
    }

    public static void exc(String modelFilePath, String libSvmDataFile, String resultFilePath) {
        SparkConf sparkConf = new SparkConf().setAppName("NaiveBayes").setMaster("local[*]");

        sparkConf.set("spark.driver.allowMultipleContexts", "true");
        JavaSparkContext jsc = new JavaSparkContext(sparkConf);

        NaiveBayesModel model = NaiveBayesModel.load(jsc.sc(), modelFilePath);
        JavaRDD<LabeledPoint> predictData = MLUtils.loadLibSVMFile(jsc.sc(), libSvmDataFile).toJavaRDD();

        try {
            final FileWriter fos = new FileWriter(new File(resultFilePath));

            JavaPairRDD<Double, Double> predictionAndLabel =
                    predictData.mapToPair(p -> {
                        return new Tuple2<>(model.predict(p.features()), p.label());
                    });

            double testErr =
                    predictionAndLabel.filter(pl -> !pl._1().equals(pl._2())).count() / (double) predictData.count();
            System.out.println("Test Error: " + testErr);

            JavaRDD<String> result =
                    predictData.map(p -> {
                        return model.predict(p.features()) + "";
                    });

            for (String str : result.collect()) {
                fos.write(str + "\n");
            }

            fos.flush();
            fos.close();
        } catch (IOException e) {
            e.printStackTrace();
        }

        jsc.stop();
    }
}
