package SQL

import org.apache.spark.sql
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.junit.Test

class TypedTransformation {
  val spark = new sql.SparkSession.Builder()
    .master("local[6]")
    .appName("DataSet")
    .getOrCreate()
  //导入隐式转换
  import spark.implicits._


  @Test
  def trans(): Unit = {
    //flatMap
    val ds1 = Seq("hello spark", "hello hadoop").toDS()
      .flatMap(item => item.split("  "))
    //4.map
    val ds2 = Seq(person("zs", 15), person("ww", 18)).toDS()
    //ds2.map(person => person(person.name, person.age * 2))
    //  .show()
    //mapPartitions
    ds2.mapPartitions(
      iter => {
        iter.map(item => (item.name, item.age * 2))
      }
    ).show()
  }

  @Test
  def transform(): Unit = {
    //将一个Dataset转换成另一个DataSet
    val ds = spark.range(10)
      .transform(dataset => dataset.withColumn("double", 'id * 2))
      .show()
  }

  @Test
  def as(): Unit = {
    val schema = StructType(
      Seq(
        StructField("id", IntegerType),
        StructField("date", StringType),
        StructField("temp", StringType)
      )
    )
    //读取
    val df: DataFrame = spark.read
      .schema(schema)
      .option("delimiter", ",")
      .csv("D:\\MP\\wendu\\input\\data.csv")
    //转换
    val ds: sql.Dataset[Temp] = df.as[Temp]
    //输出
    ds.show()
  }

  @Test
  def filter(): Unit = {
    val ds = Seq(person("zs", 15), person("ww", 18), person("ls", 10)).toDS()
      .filter(item => item.age > 15)
      .show()
  }
  @Test
  def groupByKey(): Unit = {
    val ds = Seq(person("zs", 15), person("ww", 18), person("ww", 10)).toDS()
      .groupByKey(p=>p.name)

    println(ds.count().foreach(println(_)))
  }
  @Test
  def split(): Unit ={
    val ds = spark.range(15)
    //randomSplit 切成三份，每份比重为3  3   4
    val array = ds.randomSplit(Array(3,3,4))
    array.foreach(_.show())
    //采样，彩百分之50，不放回
    ds.sample(false,0.5).show()
  }
  @Test
  def sort(): Unit ={
    val ds = Seq(person("zs", 15), person("ww", 18), person("ww", 10)).toDS()
    //按照姓名降序排序，并且空值在最后
    ds.orderBy('name.desc_nulls_last).show()
    ds.sort('age.asc).show()
  }
  @Test
  def distinct(): Unit ={
    val ds = spark.createDataset(Seq(person("zs", 15), person("ww", 18), person("ww", 15)))
    //第一种去重方式,必须全部相同才认为是相同
    ds.distinct().show()
    //比较所选的列,age列相同就算重复
    ds.dropDuplicates("age").show()
  }
  //集合操作
  @Test
  def collection(): Unit ={
    val ds1 = spark.range(1,10)
    val ds2 = spark.range(5,15);
    //差集,ds1中有的，ds2没有的
    ds1.except(ds2).show()
    //交集,ds1   ds2都有的
    ds1.intersect(ds2).show()
    //并集   不会重复
    ds1.union(ds2).show()
    //limit的使用
    ds1.limit(5).show()
  }
}

case class Temp(id: Int, date: String, temp: String)