package com.hngy.java.sql;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;

/**
 * 需求：load和save的使用
 */
public class LoadAndSaveOpJava {

    public static void main(String[] args){
        SparkConf conf = new SparkConf();
        conf.setMaster("local");

        //创建SparkSession对象，里面包含SparkContext和SqlContext
        SparkSession sparkSession = SparkSession.builder()
                .appName("SqlDemoJava")
                .config(conf)
                .getOrCreate();

        JavaSparkContext sc = JavaSparkContext.fromSparkContext(sparkSession.sparkContext());

        //读取数据
        Dataset<Row> stuDf = sparkSession.read().format("json").load("F:\\BaiduNetdiskDownload\\hadoop\\source\\bigdata_course_materials\\spark2\\student.json");

        //保存数据
        stuDf.select("name","age").write().format("csv").save("D:\\cache\\bigdata\\spark2\\loadAndSaveOpJava");
        //stuDf.select("name","age").write().format("csv").save("hdfs://hadoop001:9001/out-save002");

        sparkSession.stop();
    }
}
