import java.lang

import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.junit.Test

class 有类型的转换类操作 {
  //创建SparkSession
  val spark = SparkSession.builder()
    .master("local[6]")
    .appName("Test")
    .getOrCreate()

  import spark.implicits._

  @Test
  def trans(): Unit = {
    //flatMap
    val ds1 = Seq("hello spark", "hello hadoop").toDS()
    ds1.flatMap(item => item.split(" ")).show()
    //map
    val ds2 = Seq(Person("zhangsan", 10), Person("lisi", 15)).toDS()
    ds2.map(person => Person(person.name, person.age * 2)).show()
    //mapPartitions
    ds2.mapPartitions(
      iter => {
        val result = iter.map(person => Person(person.name, person.age * 2))
        result
      }
    )
  }

  /**
    * 他的作用对象是一个dataset
    */
  @Test
  def transformTest(): Unit = {
    //从0-9
    val ds = spark.range(10)
    //创建一个交double的列,id是ds
    ds.transform(dataset => dataset.withColumn("double", 'id * 2))
      .show()
  }

  /**
    * as 进行转换操作，或者进行取别名
    */
  @Test
  def asTest(): Unit = {
    /**
      * 1500100001,施笑槐,22,女,文科六班
      */
    //读取数据
    //指定每一行里单独的数据类型
    val schema = StructType(
      Seq(
        StructField("id", IntegerType),
        StructField("name", StringType),
        StructField("age", IntegerType),
        StructField("sex", StringType),
        StructField("className", StringType)
      )
    )
    val df: DataFrame = spark.read
      //指定数据类型
      .schema(schema)
      .option("delimiter", ",")
      .csv("C:\\Users\\HR\\Desktop\\students.txt")
    //转换
    val ds: Dataset[Student] = df.as[Student]
    //输出
    ds.show()
  }

  /**
    * 过滤操作
    */
  @Test
  def filterTest(): Unit = {
    val ds = Seq(Person("zhangsan", 19), Person("lisi", 30)).toDS()
    ds.filter('age < 20)
      .show()
  }

  /**
    * 聚合
    */
  @Test
  def groupbykeyTest(): Unit = {
    val ds = Seq(Person("zhangsan", 19), Person("lisi", 30), Person("zhangsan", 26)).toDS()
    //以什么作为key
    val group = ds.groupByKey(person => person.name)
    val result = group.count()
      .show()
  }

  /**
    * 切分
    */
  @Test
  def splitTest(): Unit = {
    val ds = spark.range(15)
    //randomSplit,切几份，权重多少
    //拆成了三份，权重分别是5，2，3
    val datasets: Array[Dataset[lang.Long]] = ds.randomSplit(Array(5, 2, 3))
    datasets.foreach(_.show())
    //sample，采样,不放回，百分之40
    ds.sample(withReplacement = false, fraction = 0.4).show()
  }

  /**
    * 排序算子
    */
  @Test
  def sort(): Unit = {
    val ds = Seq(Person("zhangsan", 19), Person("lisi", 30), Person("wangwu", 26)).toDS()
    ds.orderBy('name.desc).show() //sql中的
    ds.sort('name.asc).show()
  }

  /**
    * 去重
    */
  @Test
  def distinctTest(): Unit = {
    val ds = Seq(Person("zhangsan", 19), Person("lisi", 30), Person("zhangsan", 19)).toDS()
    //distinct去重，一整条一样的数据他才会认为是重复的
    ds.distinct().show()
    //dropDuplicates,它可以具体到某一列，查某一列的重复，而不是一整条数据
    ds.dropDuplicates("age").show()
  }

  /**
    * 集合操作
    */
  @Test
  def collect(): Unit = {
    val ds1 = spark.range(1, 10)
    val ds2 = spark.range(5, 15)
    //差集,ds1中有的而ds2没有的
    ds1.except(ds2).show()
    //交集
    ds1.intersect(ds2).show()
    //并集
    ds1.union(ds2)
    //limit
    ds1.limit(3).show()
  }
}

case class Student(id: Int, name: String, age: Int, sex: String, className: String)