package cn.lgwen.spark.ml.learning.example;

import cn.lgwen.spark.ml.learning.kaggle.TitanicRandomForestClass;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.ml.linalg.VectorUDT;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.Metadata;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

/**
 * 2021/5/8
 *
 * @author aven@didiglobal.com
 */
public class ReadFileExample {

    public static void main(String[] args) throws IOException {
        SparkConf conf = new SparkConf()
                .setAppName("ParallelizeCollection")
                .setMaster("local[*]");
        SparkSession spark = SparkSession.builder().config(conf).getOrCreate();
        JavaSparkContext sc = JavaSparkContext.fromSparkContext(spark.sparkContext());
        File file = new File("/Users/didi/Develop/package/spark-2.4.7-bin-hadoop2.7/conf/tmp");
        StructType schema = DataTypes.createStructType(new StructField[]{
                new StructField("a", DataTypes.StringType, false, Metadata.empty()),
                new StructField("b", DataTypes.StringType, false, Metadata.empty()),
                new StructField("c", DataTypes.StringType, false, Metadata.empty())

        });
        Dataset<Row> trainData = spark.read().format("csv")
                .load("/Users/didi/Develop/package/spark-2.4.7-bin-hadoop2.7/conf/tmp");
        long count = trainData.count();
        System.out.println(count);
        sc.close();
    }

}
