package com.scala.business

import com.google.gson.Gson
import org.apache.spark.sql.functions.{col, round}
import org.apache.spark.sql.{Dataset, Row, SaveMode, SparkSession}

import java.util.Properties

/**
 * 不同会员级别的精英积分和基础积分比例：(落盘)
 * - 计算每个会员级别的精英积分和基础积分的平均比例，然后将结果落盘
 * - 这个指标可以帮助我们了解不同的会员等级在精英积分和基础积分上的分布情况。
 */
object Take02 {
  val gson: Gson = new Gson()

  def main(args: Array[String]): Unit = {
    //创建SparkSession对象
    val ss = SparkSession.builder
      .appName("不同会员级别的精英积分和基础积分比例")
      .master("local")
      .getOrCreate

    // 调用spark Session的对象得到一个DataFrameReader对象
    val df: Dataset[Row] = ss.read.json("hdfs://192.168.88.33:9000/air_data/*")

    val res: Dataset[Row] = df.groupBy("ffpTier")
      .avg("age", "epSum", "bpSum")
      .withColumn("Avg-epSum", round(col("avg(epSum)"), 3))
      .withColumn("Avg-bpSum", round(col("avg(bpSum)"), 3))
      .withColumn("Avg-age", round(col("avg(age)"), 2))
      .withColumn("epSum / bpSum", round(col("avg(epSum)")
        .divide(col("avg(bpSum)")), 3))
      .drop("avg(age)", "avg(epSum)", "avg(bpSum)")

    res.show(false)

    val url: String = "jdbc:mysql://localhost:3306/hadoop?useUnicode=true&characterEncoding=utf-8&useSSL=false&serverTimezone=GMT"
    val properties: Properties = new Properties
    properties.put("user", "root")
    properties.put("password", "123456")

    res.write
      //      .mode(SaveMode.ErrorIfExists)
      .mode(SaveMode.Append)
      .jdbc(url, "percentage_of_member_points", properties)

    ss.stop()
  }

}
