package cn.lgwen.spark.ml.data;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.*;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.Metadata;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import org.junit.Test;

import java.util.ArrayList;
import java.util.List;
import java.util.Random;

/**
 * 2021/4/12
 *
 * @author aven@didiglobal.com
 */
public class DataGenerator {

    @Test
    public void createDate() {
        Random rm = new Random();

        List<Float> param = new ArrayList<>(1000);
        for (int i = 0; i < 1000; i++) {
            param.add(rm.nextFloat() * 1000);
        }

        SparkConf conf = new SparkConf()
                .setAppName("ParallelizeCollection")
                .setMaster("local[1]");

        // 创建JavaSparkContext
        SparkSession spark = SparkSession.builder().config(conf).getOrCreate();
        JavaSparkContext sc = JavaSparkContext.fromSparkContext(spark.sparkContext());

        JavaRDD<Row> numberRDD = sc.parallelize(param).map(x -> {
            float v = rm.nextFloat();
            if (rm.nextBoolean()) {
                v = -v;
            }
            float y = x * 2 + v;
            return RowFactory.create(x, y);
        });
        StructType schema = DataTypes.createStructType(new StructField[]{
                new StructField("x", DataTypes.FloatType, false, Metadata.empty()),//0
                new StructField("y", DataTypes.FloatType, false, Metadata.empty()),//0
        });
        Dataset<Row> dataset = spark.createDataFrame(numberRDD, schema);
        dataset.write().mode(SaveMode.Overwrite).option("header", true).csv("line_regression_train.csv");
        spark.cloneSession();
        sc.close();
    }

}
