package Data_Analysis_llp

import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.sql.functions._

import java.util.Properties

object DataAnalysis5 {
  def main(args: Array[String]): Unit = {
    /*
          DataAnalysis5:对招聘信息上面企业的介绍的特点进行分类统计
          例如: '5000-10000人'    55
     */

    //  准备sparksql的环境、
    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("DataAnalysis5")
      .getOrCreate()

    //  准备连接mysql的配置
    val mysql_connect=new Properties()
    mysql_connect.setProperty("user","root")
    mysql_connect.setProperty("password","123456")
    mysql_connect.setProperty("driver","com.mysql.jdbc.Driver")

    //  读取到已经处理好的数据
    spark.read.jdbc("jdbc:mysql://192.168.40.110:3306/llp?useSSL=false","cleanse",mysql_connect)
      .createOrReplaceTempView("cleanse")


// explode(split(trait,",")):使用explode函数将里面分割后的值转换成单独的行
    //  内层sql将公司介绍的特点拿到，每个特点分成单独的行，外层对每个特点进行分组统计
    val result=spark.sql(
      """
        |select
        |gonsi_jieshao,
        |count(*) as number
        |from(
        |select
        |explode(split(trait,",")) as gonsi_jieshao
        |from cleanse
        |) as t1
        |group by gonsi_jieshao
        |order by number desc
        |""".stripMargin)

    //  将数据写入mysql
    result.write.mode("overwrite")
      .jdbc("jdbc:mysql://192.168.40.110:3306/llp?useSSL=false","DataAnalysis5",mysql_connect)

    //  将数据写入到hdfs,repartition(1):指定数据文件保存到一个分区
    result.repartition(1).write.mode("overwrite").csv("/llp/DataAnalysis_all/DataAnalysis5.csv")



    //  关闭sparksql的环境
    spark.close()
  }


}
