package cn.lgwen.spark.ml.learning.example;

import cn.lgwen.spark.ml.learning.kaggle.TitanicRandomForestClass;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.ml.feature.*;
import org.apache.spark.ml.linalg.Vector;
import org.apache.spark.ml.linalg.VectorUDT;
import org.apache.spark.ml.linalg.Vectors;
import org.apache.spark.ml.regression.LinearRegression;
import org.apache.spark.ml.regression.LinearRegressionModel;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.catalyst.encoders.RowEncoder;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.Metadata;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;

import java.util.ArrayList;
import java.util.List;

/**
 * 2021/4/12
 *
 * @author aven@didiglobal.com
 * 线性回归 example
 */
public class LineRegression {

    public static void main(String[] args) {
        SparkConf conf = new SparkConf()
                .setAppName("ParallelizeCollection")
                .setMaster("local[*]");
        SparkSession spark = SparkSession.builder().config(conf).getOrCreate();
        JavaSparkContext sc = JavaSparkContext.fromSparkContext(spark.sparkContext());

        StructType schema = DataTypes.createStructType(new StructField[]{
                new StructField("label", DataTypes.DoubleType, false, Metadata.empty()),
                new StructField("features", new VectorUDT(), false, Metadata.empty())
        });

        Dataset<Row> origin = spark.read().format("csv").option("header", true)
                .load(TitanicRandomForestClass.class.getResource("/")
                        .getPath() + "line_regression.csv");
        Dataset<Row> map = origin.map((MapFunction<Row, Row>) value -> {
            Vector vector = Vectors.dense(Double.parseDouble(value.getString(0)));
            return RowFactory.create(Double.parseDouble(value.getString(1)), vector);
        }, RowEncoder.apply(schema));

        // 读取数据和格式转换

        map.show(10);


        MinMaxScaler scaler = new  MinMaxScaler()
                .setInputCol("features")
                .setOutputCol("scaledFeatures");

        // 归一化 也不是必须的。出现奇异样本的时候才需要
        MinMaxScalerModel fit = scaler.fit(map);
        Dataset<Row> scaledData = fit.transform(map);
        Dataset<Row> trainData = scaledData.select("label", "scaledFeatures")
                .withColumnRenamed("scaledFeatures", "features");
        //trainData.show(10);


        // 线性回归
        LinearRegression linearRegression = new LinearRegression()
                .setMaxIter(10).setRegParam(0.3).setElasticNetParam(0.8);
        LinearRegressionModel model = linearRegression.fit(trainData);



//        LinearRegressionTrainingSummary trainingSummary = model.summary();
//        System.out.println("numIterations: " + trainingSummary.totalIterations());
//        System.out.println("objectiveHistory: " + Vectors.dense(trainingSummary.objectiveHistory()));
//        trainingSummary.residuals().show();
//        System.out.println("RMSE: " + trainingSummary.rootMeanSquaredError());
//        System.out.println("r2: " + trainingSummary.r2());

        // 构建测试数据
        List<Double> doubleList = new ArrayList<>();
        doubleList.add(3.0);
        doubleList.add(4.0);

        JavaRDD<Row> numberRDD = sc.parallelize(doubleList).map(RowFactory::create);
        StructType schema1 = DataTypes.createStructType(new StructField[]{
                new StructField("x", DataTypes.DoubleType, false, Metadata.empty())});
        Dataset<Row> dataFrame = spark.createDataFrame(numberRDD, schema1);

        Dataset<Row> features = dataFrame.map((MapFunction<Row, Row>) value -> {
            Vector vector = Vectors.dense(value.getDouble(0));
            return RowFactory.create(vector);
        }, RowEncoder.apply(new StructType(
                    new StructField[]{new StructField("features", new VectorUDT(), false, Metadata.empty())}
                )));


        Dataset<Row> predict = fit.transform(features);
        Dataset<Row> dataset = predict.select("scaledFeatures")
                .withColumnRenamed("scaledFeatures", "features");

        // 预测
        Dataset<Row> res = model.transform(dataset);
        res.show();
    }
}
