package doit20.sparksql

import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.types.{DataTypes, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Dataset, Encoders, Row, SparkSession}

/**
 * @author 涛哥
 * @nick_name "deep as the sea"
 * @contact qq:657270652 wx:doit_edu
 * @site www.doitedu.cn
 * @date 2021-04-09
 * @desc 从文件中加载数据得到dataframe
 */

case class Soldier(id:Int,nickName:String,age:Int,score:Float,gender:String)

object Demo4 {

  def main(args: Array[String]): Unit = {
    // loadJson()
    // genParuqetFile
    loadParuqet2Dataframe
  }


  def loadParuqet2Dataframe(): Unit ={
    val spark = SparkSession.builder()
      .appName("")
      .master("local")
      .getOrCreate()

    val df: DataFrame = spark.read.parquet("data/parquetfile/")
    df.show()

    spark.close()


  }




  def genParuqetFile(): Unit ={
    val spark = SparkSession.builder()
      .appName("")
      .master("local")
      .getOrCreate()

    val df = spark.read.option("header", true).csv("data/stu2.txt")

    // 将df保存为一个parquet文件
    df.write.parquet("data/parquetfile/")


    spark.close()
  }





  def loadJson(): Unit = {

    Logger.getLogger("org.apache.spark").setLevel(Level.WARN)

    val spark = SparkSession.builder()
      .appName("")
      .master("local")
      .getOrCreate()

    // TODO 能否将这种嵌套的json加载为扁平的表结构
    /**
     * root
     * |-- friend: struct (nullable = true)
     * |    |-- fage: long (nullable = true)
     * |    |-- fname: string (nullable = true)
     * |    |-- fsex: string (nullable = true)
     * |    |-- info: struct (nullable = true)
     * |    |    |-- addr: string (nullable = true)
     * |    |    |-- job: string (nullable = true)
     * |-- name: string (nullable = true)
     *
     * name,fname,fage,fsex,faddr,fjob
     */
    val df = spark.read.json("data/p2.txt")
    df.printSchema()
    df.show()

    spark.close()
  }


  def loadCsv() = {
    val spark = SparkSession.builder()
      .appName("")
      .master("local")
      .getOrCreate()


    // 创建一个schema
    val schema = StructType(
      Seq(
        StructField("id",DataTypes.IntegerType),
        StructField("nickName",DataTypes.StringType),
        StructField("age",DataTypes.IntegerType),
        StructField("score",DataTypes.FloatType),
        StructField("gender",DataTypes.StringType)
      )
    )


    // 加载csv文件
    val df:Dataset[Row] = spark.read.schema(schema).csv("data/stu.txt")

    // 把dataframe转成dataset[U]
    import spark.implicits._
    // val ds:Dataset[Soldier] = df.as[Soldier](Encoders.product)
    val ds:Dataset[Soldier] = df.as[Soldier]


    // 添加option（header=true)后，就会指定文件的第一行作为表头
    val df2 = spark.read.option("header","true").option("sep",",").schema(schema).csv("data/stu2.txt")
    df2.show()


    // 添加inferSchema=true后，会自动推断schema（不过会带来一次额外的全局扫描）
    val df3 = spark.read.options(Map("header" -> "true", "inferSchema" -> "true")).csv("data/stu2.txt")
    df3.printSchema()

    /**
     * csv文件加载，还有更多option参数，请参考  org.apache.spark.sql.DataFrameReader
     */



    spark.close()

  }



}
