package 数据预处理

import org.apache.spark.sql.SparkSession

/**
 * @author 35192
 * @date 2021-05-21 15:27
 */
object Task01 {

  def main(args: Array[String]): Unit = {

    // 创建环境
    val spark = SparkSession.builder()
      .appName("task")
      .master("local[6]")
      .getOrCreate()
    // 导入隐式转换和函数
    import spark.implicits._
    import org.apache.spark.sql.functions._

    // 构建数据集
    val list_data = List(
      (1, 144.5, 5.9, 33, "M"),
      (2, 167.2, 5.4, 45, "M"),
      (3, 124.1, 5.2, 23, "F"),
      (4, 144.5, 5.9, 33, "M"),
      (5, 133.2, 5.7, 54, "F"),
      (3, 124.1, 5.2, 23, "F"),
      (5, 129.2, 5.3, 42, "M")
    )
    var df = spark.createDataFrame(list_data).toDF("id", "weight", "height", "age", "gender")
    df.show()

    // 任务一：查看重复情况
    // 方式一
    val counts = df.count()
    val only = df.distinct().count()
    println("共有" + counts + "条数据，实际存在" + only + "条数据。")
    val df0 = df.dropDuplicates()
    df0.show()
    // 方式二
    val df1 = df.agg(
      count('id) as "count",
      countDistinct('id) as "count_distinct_id")
    df1.show()

    // 任务二：判断是否存在id不同但是其他数据相同的情况
    // 方式一
    val df2 = df0.select('weight, 'height, 'age, 'gender)
      .distinct()
    df2.show()
    val other_count = df2.count()
    println("存在id不同但是其他数据相同的情况去重后记录数为" + other_count)
    // 方式二
    val df3 = df0.dropDuplicates(Seq("weight", "height", "age", "gender"))
      df3.show()

    // 任务三：生成新id
    val df4 = df3.withColumn("new_id",monotonically_increasing_id())
    df4.show()
  }
}
