package dataframe

import org.apache.spark.SparkConf
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.functions.{avg, floor, lit, sqrt, udf}
import org.apache.spark.sql.{DataFrame, SparkSession}

object DataFrame_FinalDemo05 {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setMaster("local[*]")
    conf.setAppName("DataFrame_FinalDemo05")

    val spark: SparkSession = SparkSession
      .builder()
      .config(conf)
      .getOrCreate()
    import spark.implicits._

    // 1、数据加载：从不同数据源（内存集合、文件等）加载数据
    val employeeDF: DataFrame = spark.sparkContext.parallelize(List(
      (1, "陈柯宇", 25), (2, "陶心瑶", 35), (3, "楼一萱", 24),
      (4, "张希", 28), (5, "王心凌", 26), (6, "庄妮", 35),
      (7, "何洁", 38), (8, "成方圆", 32), (9, "孙玉", 29),
      (10, "刘珂矣", 29), (11, "林忆莲", 28), (12, "蓝琪儿", 25), (13, "白安", 31)
    )).toDF("emp_id", "name", "age")

    val salaryDF: DataFrame = spark.read.json("data/salary.json")

    val roleDF = spark.read.json("data/designation.json")

    // 2、数据整合：整合从各种数据源获得的数据
    val employeeDataDF: DataFrame = employeeDF
      .join(salaryDF, employeeDF("emp_id") === salaryDF("e_id"))
      .join(roleDF, employeeDF("emp_id") === roleDF("id"))
      .select(
        employeeDF("emp_id").alias("emp_id"),
        employeeDF("name").as("name"),
        employeeDF("age").as("age"),
        salaryDF("salary").as("salary"),
        roleDF("role").as("role")
      )
    employeeDataDF.printSchema()
    employeeDataDF.show()

    // 数据清洗 缺失值处理-可以删除带有缺失值的行
    var clean_data = employeeDataDF.na.drop()
    clean_data.show()

    // 数据清洗 缺失值处理-可以使用平均值替换缺失值
    val salaryValue: Double = employeeDataDF
      .select(floor(avg("salary")))
      .first()(0).toString.toDouble
    println(salaryValue)
    clean_data = employeeDataDF.na.fill(Map("salary" -> salaryValue))
    clean_data.show()

    // 异常值处理
    // 识别异常值
    // 1.计算 每一行的偏差
    val devs: DataFrame = clean_data.select(
      (($"salary" - salaryValue) * ($"salary" - salaryValue)).alias("deviation")
    )
    // 2.计算 标准偏差
    val stddev: Double = devs.select(
      sqrt(avg("deviation"))
    ).first().getDouble(0)

    // 用平均工资替换超过2个标准差范围内的异常值(UDF)
    val myFunc: UserDefinedFunction = udf((value: Long, mean: Double) => {
      if (value - mean > 2 * stddev || mean - value > 2 * stddev)
        mean
      else
        value
    })
    clean_data.withColumn(
      "updated_salary",
      myFunc($"salary",lit(salaryValue))
    )
      .filter($"salary"=!=$"updated_salary")
      .show()



    spark.stop()
  }
}
