package SQL_L

import org.apache.spark.sql.{DataFrame, Dataset, KeyValueGroupedDataset, Row, SparkSession}
import org.apache.spark.sql.types.{FloatType, IntegerType, StringType, StructField, StructType}
import org.junit.Test

import java.lang

/**
 * 有类型的转换操作
 */
class TypedTransformation {
  //创建SparkSession
  val spark = SparkSession.builder()
    .master("local[6]")
    .appName("transformation")
    .getOrCreate()
  //隐式转换
  import spark.implicits._

  @Test
  def trans(): Unit = {
    //flatmap
    val ds = Seq("hello spark", "hello hadoop").toDS()
      .flatMap(_.split(" "))
    //map
    val ds2 = Seq(Person("zhangsan", 15), Person("lisi", 15)).toDS()
    ds2.map(person => Person(person.name, person.age * 2))


    //mapPartition
    /**
     * mapPartition是一个分区的数据，那么他的存储的就是一个数组，我们需要对数组中的每一个数据来进行处理
     * 并且在处理结束后再次返回一个数组
     */
    ds2.mapPartitions(iter => {
      val result = iter.map(person => Person(person.name, person.age * 2))
      result
    }).show()
  }

  @Test
  def trans1(): Unit = {
    //生成0-10
    val ds = spark.range(5)
    //transform是对一个dataset也就是一个列来进行操作的
    ds.transform(dataset => dataset.withColumn("doubled", 'id * 2))
      .show()
  }

  /**
   * 可以将dataFrame（弱类型）转换成dataSet（强类型）
   */
  @Test
  def as(): Unit = {
    //读取
    val structType = StructType(
      Seq(
        StructField("name", StringType),
        StructField("age", IntegerType),
        StructField("gpa", FloatType)
      )
    )
    val df: DataFrame = spark.read
      .schema(structType)
      .option("delimiter", "\t")
      .csv("data/studenttab10k")
    //转换
    val ds: Dataset[Student] = df.as[Student]
    ds.show()
    //输出
  }

  @Test
  def filter(): Unit = {
    val ds = Seq(Person("zhangsan", 19), Person("lisi", 21)).toDS()
    ds.filter(_.age > 20)
      .show()
  }

  @Test
  def groupByKey(): Unit = {
    val ds = Seq(Person("zhangsan", 19), Person("lisi", 21), Person("lisi", 22)).toDS()
    val group: KeyValueGroupedDataset[String, Person] = ds.groupByKey(_.name)
    val result: Dataset[(String, Long)] = group.count()
    result.show()
  }

  @Test
  def randomSplit() = {
    val ds = spark.range(15)
    val datasets = ds.randomSplit(Array(0.1, 0.2, 0.3, 0.4))
    datasets.foreach(dataset => dataset.show())
    println(datasets.size)
  }


  @Test
  def sample(): Unit = {
    val ds = spark.range(15)
    ds.sample(withReplacement = false, fraction = 0.4).show()
  }

  @Test
  def orderBy={
    val ds = Seq(Person("zhangsan", 12), Person("zhangsan", 8), Person("lisi", 15)).toDS()
    ds.orderBy("age").show()
    ds.orderBy('age.desc).show()
  }

  @Test
  def sort={
    val ds = Seq(Person("zhangsan", 12), Person("zhangsan", 8), Person("lisi", 15)).toDS()
    ds.sort('age.desc).show()
  }

  //去重
  @Test
  def dropDuplicates()= {
    val ds = spark.createDataset(Seq(Person("zhangsan", 15), Person("zhangsan", 15), Person("lisi", 15)))
    ds.dropDuplicates("age").show()
  }

  @Test
  def distinct()= {
    val ds = spark.createDataset(Seq(Person("zhangsan", 15), Person("zhangsan", 15), Person("lisi", 15)))
    ds.distinct().show()
  }

  @Test
  def collection(): Unit = {
    val ds1 = spark.range(1, 10)
    val ds2 = spark.range(5, 15)
    //差集
    ds1.except(ds2).show()
    //交集
    ds1.intersect(ds2).show()
    //并集
    ds1.union(ds2).show()
    //limit，限制输出的条数
    ds1.limit(3).show()
  }
}

case class Student(name: String, age: Int, gpa: Float)