package cn.lgwen.spark.ml.learning.kaggle;

import org.apache.spark.ml.evaluation.BinaryClassificationEvaluator;
import org.apache.spark.ml.feature.VectorAssembler;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;

/**
 * 2020/3/19
 * aven.wu
 * danxieai258@163.com
 */
public class TitanicUtil {


    public static Dataset<Row> trainData(SparkSession spark) {
        Dataset<Row> trainData = spark.read().format("csv").option("header", true).option("inferSchema", true)
                .load(TitanicRandomForestClass.class.getResource("/").getPath() + "train.csv");

        VectorAssembler vectorAssem4 = new VectorAssembler()
                .setInputCols(new String[]{"Sex", "SibSp", "Cabin", "Embarked",
                        "Fare", "Age", "Pclass", "Parch"}).
                        setOutputCol("features").setHandleInvalid("keep");
        return vectorAssem4.transform(trainData);
    }

    public static Dataset<Row> testData(SparkSession spark) {
        Dataset<Row> testData = spark.read().format("csv").option("header", true).option("inferSchema", true)
                .load(TitanicRandomForestClass.class.getResource("/").getPath()  + "test.csv");

        VectorAssembler vectorAssem4 = new VectorAssembler()
                .setInputCols(new String[]{"Sex", "SibSp", "Cabin", "Embarked",
                        "Fare", "Age", "Pclass", "Parch"}).
                        setOutputCol("features").setHandleInvalid("keep");
        return vectorAssem4.transform(testData);
    }

    public static double evaluate(Dataset<Row> rowDataset) {
        BinaryClassificationEvaluator evaluator = new BinaryClassificationEvaluator()
                .setLabelCol("Survived").setRawPredictionCol("prediction");
        return evaluator.evaluate(rowDataset);
    }
}
