package DataAnalysis_hzl

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.DoubleType

import java.util.Properties

object t4 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("数据分析第一题")
      .enableHiveSupport()
      .getOrCreate()


    // 准备连接mysql的配置
    val conn = new Properties()
    conn.setProperty("user", "root")
    conn.setProperty("password", "123456")
    conn.setProperty("driver", "com.mysql.jdbc.Driver")

    //  todo 读取已经处理好的数据
    val data = spark.read
      .jdbc("jdbc:mysql://192.168.40.110:3306/hzl?useSSL=false", "clean", conn)


    //  todo 分析低于和高于平均降价的数量占比

    //  添加降价列
    val data2=data.withColumn("jiangjia",round(col("old_price") - col("price"),1))
    //  得到平均降价的值
    val avg_jiangjia = data2.select(round(avg(col("jiangjia")), 1)).first().getDouble(0)

    val r1 = data2.withColumn(
        "panduan",
        when(col("jiangjia") > lit(avg_jiangjia), "高于平均降价").otherwise(lit("低于平均降价"))
      )
      .groupBy("panduan")
      .count()
      .distinct()

    r1.show

    r1.write.mode("overwrite")
      .jdbc("jdbc:mysql://192.168.40.110:3306/hzl?useSSL=false", "r4", conn)


    spark.close()
  }

}
