package Data_Analysis_llp

import org.apache.spark.sql.SparkSession

import java.util.Properties

object DataAnalysis2 {
  def main(args: Array[String]): Unit = {
    /*
        DataAnalysis2:分析出各个地方的数据分析岗位的平均薪资薪资(存在最大值和最小值的已经拿中位数代替了)大小和招聘信息发布的数量
        ,并且按照降序排列
        例如:上海       158       11433
     */
    //  准备sparksql的环境
    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("DataAnalysis2")
      .getOrCreate()

    //  准备连接mysql的配置
    val mysql_connect=new Properties()
    mysql_connect.setProperty("user","root")
    mysql_connect.setProperty("password","123456")
    mysql_connect.setProperty("driver","com.mysql.jdbc.Driver")

    //  将处理完的数据读取出来创建临时表
   spark.read.jdbc("jdbc:mysql://192.168.40.110:3306/llp?useSSL=false","cleanse",mysql_connect)
     .createOrReplaceTempView("cleanse")

    // 最里面得到地区和该地区的薪资,外面一层得到地区，和该地区的数量，还有所有薪资的和,最外层得到每个地区的每个招聘的平均薪资
    //  ceil:向上取整
    val result=spark.sql(
      """
        |select
        |position,
        |position_number,
        |ceil(money_all/position_number) as avg_money
        |from(
        |select
        |position ,
        |count(position) as  position_number,
        |sum(money) as money_all
        |from (
        |select
        |position,
        |money
        |from cleanse
        |) as t1
        |group by position
        |)as t2
        |order by avg_money desc
        |""".stripMargin)



    //  将结果保存到mysql
    result.write.mode("overwrite")
      .jdbc("jdbc:mysql://192.168.40.110:3306/llp?useSSL=false","DataAnalysis2",mysql_connect)

    //  将数据写入到hdfs,repartition(1):指定数据文件保存到一个分区
    result.repartition(1).write.mode("overwrite").csv("/llp/DataAnalysis_all/DataAnalysis2.csv")

    //  关闭sparksql的环境
    spark.close()
  }

}
