package com.jinghang.spark_base._020_SQL

import org.apache.spark.sql.SparkSession

/**
  * 结构推断
  *
  * RDD --> DataSet
  * RDD --> DataFrame
  *
  * 隐式转换的引用：SQLImplicits
  */
object _030_InferSchemaExample {

  case class Person(name: String, age: Long)

  def main(args: Array[String]): Unit = {

    val sparkSession = SparkSession.builder()
      .appName("_010_BasicDataFrameExample")
      .master("local[1]")
      .getOrCreate()

    sparkSession.sparkContext.setLogLevel("ERROR")

    runInferSchemaExample(sparkSession)
  }

  private def runInferSchemaExample(spark: SparkSession): Unit = {
    // For implicit conversions from RDDs to DataFrames
    import spark.implicits._


    val peopleDF = spark.sparkContext
      .textFile("data/practiceOperator/people.txt")//RDD[String]
      .map(x => x.split(","))//{["Michael"," 29"],["Andy"," 30"]}
      .map(x => Person(x(0), x(1).trim.toInt))//RDD[Person]
      .toDF()


    /*val peopleDF = spark.sparkContext
      .textFile("data/practiceOperator/people.txt")
      .map(line => line.split(","))//{["Michael","29"],["Michael","29"]}
      .map(attributes => Person(attributes(0), attributes(1).trim.toInt))
      .toDS()*/

    // Register the DataFrame as a temporary view
    peopleDF.createOrReplaceTempView("people")

    // SQL statements can be run by using the sql methods provided by Spark
    val teenagersDF = spark.sql("SELECT name, age FROM people WHERE age BETWEEN 13 AND 19")

    // The columns of a row in the result can be accessed by field index
    teenagersDF.map(teenager => "Name: " + teenager(0)).show()
    teenagersDF.select("name").show()
    // +------------+
    // |       value|
    // +------------+
    // |Name: Justin|
    // +------------+

    // or by field name
    teenagersDF.map(teenager => "Name: " + teenager.getAs[String]("name")).show()
    // +------------+
    // |       value|
    // +------------+
    // |Name: Justin|
    // +------------+

    //todo 了解
    // No pre-defined encoders for Dataset[Map[K,V]], define explicitly
    implicit val mapEncoder = org.apache.spark.sql.Encoders.kryo[Map[String, Any]]

    // row.getValuesMap[T] retrieves multiple columns at once into a Map[String, T]
    val dataSet = teenagersDF.map(teenager => teenager.getValuesMap[Any](List("name", "age")))
    val array = dataSet.collect()
    for (elem <- array) {
      println(elem.toString())
    }
    // Array(Map("name" -> "Justin", "age" -> 19))
  }

}
