package com.spark.WorCount.sql

import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession

object LoadAndSaveByScala {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      .setMaster("local")
    //创建SparkSession对象，里面包含SparkContext和SqlContext
    val sparkSession = SparkSession.builder()
      .appName("LoadAndSaveByScala")
      .config(conf)
      .getOrCreate()
    val studentDf=sparkSession.read
      .format("json") //指定读取的文件数据类型JSON
      .load("datas/gift_record.log") //读取的文件路径
    studentDf.select("uid","vid","gold")
      .write //只有DataFrame对象或者DataSet
      .format("csv")//指定生成的文件类型
      .save("hdfs://hadoop101:9000/out-save001") //指定保存路径 --- HDFS路径
    sparkSession.stop()
  }

}
