package com.zhny.test;

import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import org.apache.parquet.Strings;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.broadcast.Broadcast;
import org.apache.spark.mllib.linalg.Vector;
import org.apache.spark.mllib.linalg.Vectors;
import org.apache.spark.mllib.regression.LabeledPoint;
import org.apache.spark.mllib.tree.DecisionTree;
import org.apache.spark.mllib.tree.model.DecisionTreeModel;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaStreamingContext;

public class DecisionTreeUtil {
    public static void exc(String dataFilePath, String resultFilePath) {
        SparkConf sparkConf = new SparkConf().setAppName("DecisionTree Algorithm").setMaster("local[*]");
        sparkConf.set("spark.driver.allowMultipleContexts", "true");
        JavaSparkContext jsc = new JavaSparkContext(sparkConf);

        JavaRDD<String> data = null;
        if (Strings.isNullOrEmpty(dataFilePath)) {
            data = jsc.textFile("src/main/resources/data/DecisionTreeData.txt");
        } else {
            data = jsc.textFile(dataFilePath);
        }

        JavaRDD<LabeledPoint> parsedData = data.map(new Function<String, LabeledPoint>() {
            private static final long serialVersionUID = 1L;
            @Override
            public LabeledPoint call(String line) throws Exception {
                String[] parts = line.split(",");
                String[] dsStr = parts[1].split(" ");

                double[] ds = new double[dsStr.length];

                for (int i = 0; i < dsStr.length; i++) {
                    ds[i] = Double.parseDouble(dsStr[i]);
                }

                return new LabeledPoint(Double.parseDouble(parts[0]), Vectors.dense(ds));
            }
        }).cache();

        Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>();
        String impurity = "variance";
        Integer maxDepth = 5;
        Integer maxBins = 10000000;

        DecisionTreeModel model = DecisionTree.trainRegressor(parsedData, categoricalFeaturesInfo, impurity, maxDepth, maxBins);
        System.out.println("Learned regression tree model: \n" + model.toDebugString());

        FileWriter fos = null;
        try {
            fos = new FileWriter(new File(resultFilePath));
            fos.write(model.toDebugString() + "\n");

            Vector v = Vectors.dense(new double[] {4272,1418,8972,4612});
            Vector v2 = Vectors.dense(new double[] {4272,8418,8972,4612});

            List<Vector> lst = new ArrayList<Vector>();
            lst.add(v);
            lst.add(v2);

            final Broadcast<DecisionTreeModel> bcModle = jsc.broadcast(model);

            JavaRDD<Vector> features = jsc.parallelize(lst);

            JavaRDD<String> result = features.map(new Function<Vector, String>() {
                @Override
                public String call(Vector arg0) throws Exception {
                    return bcModle.value().predict(arg0) + arg0.toString();
                }
            });

//		    JavaRDD<Object> result = model.predict(features);
            fos.write("------------------------------------\n");
            fos.write(result.collect().get(0) + "\n");
            fos.write("------------------------------------\n");
            fos.write(result.collect().get(1) + "\n");
            fos.write("------------------------------------\n");
            fos.write(String.valueOf(new Double(model.predict(v)).longValue()));

            fos.flush();
            fos.close();
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
}
