package Data_Analysis_llp

import org.apache.spark.sql.SparkSession

import java.util.Properties

object DataAnalysis6 {
  def main(args: Array[String]): Unit = {
    /*
        DataAnalysis6:基于招聘信息对学生的技术要求和工作经验要求，还有学历要求，进行统计
     */
    //  准备sparksql的环境
    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("DataAnalysis6")
      .getOrCreate()

    //  准备连接mysql的配置
  val  mysql_connect=new Properties()
  mysql_connect.setProperty("user","root")
    mysql_connect.setProperty("password","123456")
    mysql_connect.setProperty("driver","com.mysql.jdbc.Driver")

    //  读取到处理完的数据并且创建临时表
    spark.read.jdbc("jdbc:mysql://192.168.40.110:3306/llp?useSSL=false","cleanse",mysql_connect)
      .createOrReplaceTempView("cleanse")

    //  数据分析
    //  内层查询将招聘的要求使用explode函数分成单独的一行，然后外层根据要求分组进行统计
    val result=spark.sql(
      """
        |select
        |demand,
        |count(*) as number
        |from(
        |select
        |explode(split(demand,",")) as demand
        |from cleanse
        |) as t1
        |group by demand
        |order by number desc
        |""".stripMargin)

    //  将数据写入mysql
    result.write.mode("overwrite")
      .jdbc("jdbc:mysql://192.168.40.110:3306/llp?useSSL=false", "DataAnalysis6", mysql_connect)

    //  将数据写入到hdfs,repartition(1):指定数据文件保存到一个分区
    result.repartition(1).write.mode("overwrite").csv("/llp/DataAnalysis_all/DataAnalysis6.csv")



    //  关闭sparksql的环境
    spark.close()
  }

}
