package cn.itcast.spark.sql

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.encoders.RowEncoder
import org.apache.spark.{SparkConf, SparkContext}
import org.junit.Test

class Intro {

  @Test
  def rddIntro(): Unit = {
    val conf = new SparkConf().setMaster("local[6]").setAppName("rdd intro")
    val sc = new SparkContext(conf)

    sc.textFile("dataset/wordcount.txt")
      .flatMap(_.split(" "))
      .map((_, 1))
      .reduceByKey(_ + _)
      .collect()
      .foreach(println(_))
  }

  @Test
  def dsIntro(): Unit = {
    val spark = new SparkSession.Builder()
      .appName("ds intro")
      .master("local[6]")
      .getOrCreate()

    import spark.implicits._

    val sourceRDD = spark.sparkContext.parallelize(Seq(Person("zhangsan", 10), Person("lisi", 15)))

    val personDS = sourceRDD.toDS()

    val resultDS = personDS.where('age > 10)
      .where('age < 20)
      .select('name)
      .as[String]

    resultDS.show()

  }

  @Test
  def dfIntro(): Unit = {
    val spark = new SparkSession.Builder()
      .appName("ds intro")
      .master("local[6]")
      .getOrCreate()

    import spark.implicits._

    val sourceRDD = spark.sparkContext.parallelize(Seq(Person("zhangsan", 10), Person("lisi", 15)))

    val df = sourceRDD.toDF()
    df.createOrReplaceTempView("person")

    val resultDF = spark.sql("select name from person where age > 10 and age < 20")

    resultDF.show()
  }

  @Test
  def dataset1(): Unit= {
    // 1. 创建SparkSession
    val spark = new SparkSession.Builder()
      .appName("dataset1")
      .master("local[6]")
      .getOrCreate()  // 通过Builder的getOrCreate创建SparkSession对象spark

    // 2. 导入隐式转换
    import spark.implicits._   // 此处的spark的是指前面创建的对象spark

    // 3. 演示
    val sourceRDD = spark.sparkContext.parallelize(Seq(Person("zhangsan", 10), Person("lisi", 15)))
    val dataset = sourceRDD.toDS()

    // 4. 需求
    // Dataset 支持RDD的API（强类型的API）
    dataset.filter(item => item.age > 10).show()
    // Dataset 支持弱类型API
    dataset.filter('age > 10).show()
    dataset.filter($"age" > 10).show()
    // Dataset 可以直接编写sql表达式
    dataset.filter("age > 10").show()

  }

  @Test
  def dataset2(): Unit= {
    // 1. 创建SparkSession
    val spark = new SparkSession.Builder()
      .appName("dataset1")
      .master("local[6]")
      .getOrCreate() // 通过Builder的getOrCreate创建SparkSession对象spark

    // 2. 导入隐式转换
    import spark.implicits._ // 此处的spark的是指前面创建的对象spark

    // 3. 演示
    val sourceRDD = spark.sparkContext.parallelize(Seq(Person("zhangsan", 10), Person("lisi", 15)))
    val dataset = sourceRDD.toDS()

//    dataset.explain(true)
    // 无论Dataset中防止的是什么类型的对象，最终执行计划中的RDD都是InternalRow
    val executionRDD: RDD[InternalRow] = dataset.queryExecution.toRdd

  }

  @Test
  def dataset3(): Unit= {
    // 1. 创建SparkSession
    val spark = new SparkSession.Builder()
      .appName("dataset1")
      .master("local[6]")
      .getOrCreate() // 通过Builder的getOrCreate创建SparkSession对象spark

    // 2. 导入隐式转换
    import spark.implicits._ // 此处的spark的是指前面创建的对象spark

    // 3. 演示
//    val sourceRDD = spark.sparkContext.parallelize(Seq(Person("zhangsan", 10), Person("lisi", 15)))
//    val dataset = sourceRDD.toDS()
    val dataset: Dataset[Person] = spark.createDataset(Seq(Person("zhangsan", 10), Person("lisi", 15)))

    //    dataset.explain(true)
    // 无论Dataset中防止的是什么类型的对象，最终执行计划中的RDD都是InternalRow

    // 直接获取到已经分析和解析过的dataset的执行计划，从中拿到rdd
    val executionRDD: RDD[InternalRow] = dataset.queryExecution.toRdd

    // 通过将dataset底层的rdd[internalRow]通过Decoder转成了和dataset一样类型的rdd
    val typeRDD: RDD[Person] = dataset.rdd

    println(executionRDD.toDebugString)
    println()
    println()
    println(typeRDD.toDebugString)

  }

  @Test
  def dataframe1(): Unit = {
    // 1. 创建SparkSession
    val spark = SparkSession.builder()
      .appName("dataframe1")
      .master("local[6]")
      .getOrCreate()

    // 2. 创建DataFrame
    import spark.implicits._

    val dataFrame: DataFrame = Seq(Person("zhangsan", 15), Person("lisi", 20)).toDF()

    // 3. DataFrame操作
    dataFrame.where('age > 10)
      .select('name)
      .show()
  }

  @Test
  def dataframe2(): Unit = {
    // 1. 创建SparkSession
    val spark = SparkSession.builder()
      .appName("dataframe1")
      .master("local[6]")
      .getOrCreate()

    // 2. 创建DataFrame
    import spark.implicits._

    val personList = Seq(Person("zhangsan", 15), Person("lisi", 20))

    // 1. toDF
    val df1 = personList.toDF()
    val df2 = spark.sparkContext.parallelize(personList).toDF()

    // 2. createDataFrame
    val df3 = spark.createDataFrame(personList)

    // 3. read
    val df4 = spark.read.csv("dataset/BeijingPM20100101_20151231_noheader.csv")
    df4.show()

  }

  @Test
  def dataframe3(): Unit = {
    // 1. 创建SparkSession
    val spark = new SparkSession.Builder()
      .appName("pm an")
      .master("local[6]")
      .getOrCreate()

    import spark.implicits._

    // 2. 读取数据集
    val sourceDF = spark.read
      .option("header", value = true)
      .csv("dataset/BeijingPM20100101_20151231.csv")
//    sourceDF.show()
//      sourceDF.printSchema()

    // 3.处理
    //    1. 选择列
    //    2. 过滤NA
    //    3. 分组
    //    4. 聚合
//    sourceDF.select('year, 'month, 'PM_Dongsi)
//      .where('PM_Dongsi =!= "NA")
//      .groupBy('year, 'month)
//      .count()
//      .show()

    // 直接使用SQL
    // 1. 将DF注册为临表
    sourceDF.createOrReplaceTempView("pm")

    // 2. 执行查询
    spark.sql("select year, month, count(PM_Dongsi) from pm where PM_Dongsi != 'NA' group by year, month")
      .show()

    spark.stop()
  }

  @Test
  def dataframe4(): Unit = {
    // 1. 创建SparkSession
    val spark = SparkSession.builder()
      .appName("dataframe1")
      .master("local[6]")
      .getOrCreate()

    // 2. 创建DataFrame
    import spark.implicits._

    val personList = Seq(Person("zhangsan", 15), Person("lisi", 20))

    // DF是弱类型的
    val df = personList.toDF()
    df.map((row: Row) => Row(row.get(0), row.getAs[Int](1) * 2))(RowEncoder.apply(df.schema))
      .show()

    // DF所代表的弱类型的操作时编译时不安全的
    df.groupBy("name, school")

    // DS是强类型的
    val ds = personList.toDS()
    ds.map((person : Person) => Person(person.name, person.age * 2))
      .show()

    // DS所代表的操作是类型安全的
//    ds.filter(person => person.name)

  }

  @Test
  def row(): Unit = {
    // 1. Row如何创建
    // row对象必须配合Schema对象才会有列名
    val p = Person("zhangsan", 15)
    val row = Row("zhangsan", 15)

    // 2. 如何从Row中获取数据
    row.getString(0)
    row.getInt(1)

    // 3. Row也是样例类
    row match {
      case Row(name, age) => println(name, age)
    }
  }

}

/**
 * @param name
 * @param age
 */
case class Person(name: String, age: Int)





















