package DataAnalysis_hzl

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.DoubleType

import java.util.Properties

object t3 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("数据分析第一题")
      .enableHiveSupport()
      .getOrCreate()


    // 准备连接mysql的配置
    val conn = new Properties()
    conn.setProperty("user", "root")
    conn.setProperty("password", "123456")
    conn.setProperty("driver", "com.mysql.jdbc.Driver")

    //  todo 读取已经处理好的数据
    val data = spark.read
      .jdbc("jdbc:mysql://192.168.40.110:3306/hzl?useSSL=false", "clean", conn)


    //  todo 分析大于和小于平均行驶路程的占比情况

    //  首先拿到所有行驶路程的平均值
    val avg_licheng=data.select(avg(col("distance").cast(DoubleType))).first().getDouble(0)

    val r1 = data
      .withColumn(
        "panduan",
        when(col("distance") >lit(avg_licheng),lit("高于平均路程")).otherwise(lit("低于平均路程"))
        )
      .groupBy("panduan")
      .count()
      .distinct()


    r1.show

    r1.write.mode("overwrite")
      .jdbc("jdbc:mysql://192.168.40.110:3306/hzl?useSSL=false", "r3", conn)


    spark.close()
  }

}
