package com.bw.sparksql1.job3

import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
  *
  *  load数据不同的数据源生成DF数据集
  */
object Job8 {
    def main(args: Array[String]): Unit = {
      Logger.getLogger("org").setLevel(Level.ERROR)
      val spark = SparkSession
        .builder()
        .master("local")
        .appName("Spark SQL basic example")
        .getOrCreate()
      //格式一：parquet,如果调用load方法默认支持就是parquet文件
      val df: DataFrame = spark.read.load("users.parquet")
      df.createOrReplaceTempView("users")
//      spark.sql("select * from users").show()

//      //格式二：json
//      val df1 = spark.read.format("json").load("person.json")
//      df1.createOrReplaceTempView("people1")
//      spark.sql("select * from people1").show()
//
//      //格式三：CSV
//      val df2 = spark.read.format("csv").option("header","true").option("delimiter",",").load("users.csv")

      //option("header","true")  表头当作列名
      //option("delimiter",",")  制定分隔符

      val df2 = spark.read.format("csv").option("header","false").option("delimiter",",").load("users2.csv")
      val df3 = df2.withColumnRenamed("_c0","user_id")
      //新增一个字段?
      val df4 = df3.withColumn("local_id",col("_c1")).drop("_c1")

      df4.createOrReplaceTempView("users")
      spark.sql("select * from users").show()
    }
}