package com.zhny.algorithm;

import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import org.apache.spark.api.java.JavaPairRDD;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.mllib.regression.LabeledPoint;
import org.apache.spark.mllib.tree.RandomForest;
import org.apache.spark.mllib.tree.model.RandomForestModel;
import org.apache.spark.mllib.util.MLUtils;
import scala.Tuple2;

//随机森林
public class RandomForestClassificationUtil {

    public static void generatePredictModel(String libSVMDataFile,
                           String modelFilePath,
                           int maxDepth,
                           int maxBins,
                           int seed,
                           int numClasses,
                           int numTrees) {
        SparkConf sparkConf = new SparkConf().setAppName("RandomForestClassification").setMaster("local[*]");

        sparkConf.set("spark.driver.allowMultipleContexts", "true");
        JavaSparkContext jsc = new JavaSparkContext(sparkConf);

        JavaRDD<LabeledPoint> data = MLUtils.loadLibSVMFile(jsc.sc(), libSVMDataFile).toJavaRDD();

        Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();

        String featureSubsetStrategy = "auto";
        String impurity = "gini";

        RandomForestModel model = RandomForest.trainClassifier(data,
                numClasses,
                categoricalFeaturesInfo,
                numTrees,
                featureSubsetStrategy,
                impurity,
                maxDepth,
                maxBins,
                seed);

        model.save(jsc.sc(), modelFilePath);

        jsc.stop();
    }

    public static void exc(String modelFilePath, String libSvmDataFile, String resultFilePath) {
        SparkConf sparkConf = new SparkConf().setAppName("RandomForestClassification").setMaster("local[*]");

        sparkConf.set("spark.driver.allowMultipleContexts", "true");
        JavaSparkContext jsc = new JavaSparkContext(sparkConf);

        RandomForestModel model = RandomForestModel.load(jsc.sc(), modelFilePath);
        JavaRDD<LabeledPoint> predictData = MLUtils.loadLibSVMFile(jsc.sc(), libSvmDataFile).toJavaRDD();

        try {
            final FileWriter fos = new FileWriter(new File(resultFilePath));
            List<String> resultList = new ArrayList<>();

            JavaPairRDD<Double, Double> predictionAndLabel =
                    predictData.mapToPair(p -> {
                        System.out.println("-------------------");
                        System.out.println(model.predict(p.features()));

                        return new Tuple2<>(model.predict(p.features()), p.label());
                    });

            double testErr =
                    predictionAndLabel.filter(pl -> !pl._1().equals(pl._2())).count() / (double) predictData.count();
            System.out.println("Test Error: " + testErr);

            JavaRDD<String> result =
                    predictData.map(p -> {
                        return model.predict(p.features()) + "";
                    });

            for (String str : result.collect()) {
                fos.write(str + "\n");
            }

            fos.flush();
            fos.close();
        } catch (IOException e) {
            e.printStackTrace();
        }

        jsc.stop();
    }
}
