package Data_Analysis_llp

import org.apache.spark.sql.SparkSession

import java.util.Properties

object DataAnalysis4 {
  def main(args: Array[String]): Unit = {
    /*
        DataAnalysis4:对每个城市发布的招聘信息，针对工作经验的要求进行统计
        例如:上海       在校生/应届生   130
     */
    //  准备sparksql的环境
    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("DataAnalysis4")
      .getOrCreate()

    //  准备连接mysql的配置
    val mysql_connect=new Properties()
    mysql_connect.setProperty("user","root")
    mysql_connect.setProperty("password","123456")
    mysql_connect.setProperty("driver","com.mysql.jdbc.Driver")

    //  读取之前已经处理好的数据
    spark.read.jdbc("jdbc:mysql://192.168.40.110:3306/llp?useSSL=false","cleanse",mysql_connect)
      .createOrReplaceTempView("cleanse")

    //  根据地区和工作经验的要求分组，然后count统计
    val result=spark.sql(
      """
        |select
        |position,
        |split(demand,",")[0] as demand01,
        |count(*) as number
        |from cleanse
        |group by position,demand01
        |""".stripMargin)

    //  将结果写入到mysql
    result.write.mode("overwrite")
      .jdbc("jdbc:mysql://192.168.40.110:3306/llp?useSSL=false","DataAnalysis4",mysql_connect)

    //  将数据写入到hdfs,repartition(1):指定数据文件保存到一个分区
    result.repartition(1).write.mode("overwrite").csv("/llp/DataAnalysis_all/DataAnalysis4.csv")


    //  关闭sparksql的环境
    spark.close()
  }

}
