package chapter04

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{BooleanType, IntegerType, LongType, StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

object InteroperatingRDDApp {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession.builder()
      .appName("DataFrameAPIApp")
      .master("local")
      .getOrCreate()

    //runInferSchema(spark)
    runProgrammaticSchema(spark)


  }

  def runProgrammaticSchema(spark:SparkSession):Unit={
    import spark.implicits._
    val peopleRDD: RDD[String] = spark.sparkContext.textFile("file:///D:\\JAVApros\\spark_pk\\data\\people.txt")

    val peopleRowRDD: RDD[Row] = peopleRDD.map(_.split(","))
      .map(x => Row(x(0), x(1).trim.toInt))

    val structType: StructType = StructType(
      StructField("name", StringType, true) ::
        StructField("age", IntegerType, false) :: Nil)

    val peopleDF: DataFrame = spark.createDataFrame(peopleRowRDD, structType)
    peopleDF.show()
  }

  /**
   * 第一种方式
   * 定义case class
   * RDD map，map中每一行数据转化为
   * @param spark
   */
   def runInferSchema(spark: SparkSession) = {
    import spark.implicits._

    val peopleRDD: RDD[String] = spark.sparkContext.textFile("file:///D:\\JAVApros\\spark_pk\\data\\people.txt")

    //todo rdd==>df
    val peopleDF: DataFrame = peopleRDD.map(_.split(","))
      //trim 去空格
      .map(x => People(x(0), x(1).trim.toInt))
      .toDF()
    //peopleDF.show(false)

    peopleDF.createOrReplaceTempView("peopleDF")
    val queryDF: DataFrame = spark.sql(
      """
        |SELECT
        |     name,
        |     age
        |FROM peopleDF
        |     WHERE age BETWEEN 19 AND 29
        |""".stripMargin)
    // queryDF.map(x=>"name:"+x(0)).show() //from index
    //queryDF.map(x=>"name:"+x.getAs("name")).show() //from index
    //select也是一样的
  }

  case class People(name: String, age: Int)

}
