package com.xiaoxu.spark.DateFrameWithDataset

import org.apache.hadoop.classification.InterfaceAudience.Public
import org.apache.spark.sql.SparkSession

object DatasetDemo {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().appName("DatasetApp")
      .master("local[2]").getOrCreate()

    //注意：需要导入隐式转换
    import spark.implicits._

    val path = "/data/sales.csv"

    //spark如何解析csv文件？
    val df = spark
      .read
      .option("header", "true")
      .option("inferSchema", "true")
      .csv(path)

    df.show

    //通过 case class 转换
    val dataset = df.as[Sales]
    dataset.map(line => line.itemId).show


    //df.seletc("name")
    df.select("nname")

    //itemId  和 case class 相对应 写错编译不通过
    dataset.map(line => line.itemId).show()

    spark.stop()
  }

  case class Sales(transactionId: Int, customerId: Int, itemId: Int, amountPaid: Double)

  private  def  a(): String ={
    return "5.1TuningSpark.txt"
  }

}
