package com.jscloud.spark.dbopt

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.commons.lang3.StringUtils
import org.apache.spark.rdd.{JdbcRDD}

import java.sql.{Connection, DriverManager, PreparedStatement}

/**
 * 读取mysql的数据
 */

case class Job_Detail(job_id: String, job_name: String, job_url: String, job_location: String, job_salary: String,
                      job_company: String, job_experience: String, job_class: String, job_given: String,
                      job_detail: String, company_type: String, company_person: String,
                      search_key: String, city: String)

object JdbcForMysql {

  val getConn: () => Connection = () => {
    DriverManager.getConnection("jdbc:mysql://bigdata03:3306/mydb?useSSL=false&characterEncoding=utf-8", "root", "jsCloud123")
  }

  def main(args: Array[String]): Unit = {
    //程序入口类 SparkContext
    val sparkConf: SparkConf = new SparkConf().setAppName("JdbcForMysql").setMaster("local[*]")
    val sparkContext = new SparkContext(sparkConf)
    sparkContext.setLogLevel("WARN")

    //构建jdbcRDD
    val jdbcRDD: JdbcRDD[Job_Detail] = new JdbcRDD[Job_Detail](
      sparkContext,
      getConn,
      "select * from jobdetail where job_id >= ? and job_id  <= ? ",
      1,
      75000,
      8,
      rs => {
        val job_id = rs.getString(1)
        val job_name: String = rs.getString(2)
        val job_url = rs.getString(3)
        val job_location: String = rs.getString(4)
        val job_salary = rs.getString(5)
        val job_company: String = rs.getString(6)
        val job_experience = rs.getString(7)
        val job_class: String = rs.getString(8)
        val job_given = rs.getString(9)
        val job_detail: String = rs.getString(10)
        val company_type = rs.getString(11)
        val company_person: String = rs.getString(12)
        val search_key = rs.getString(13)
        val city: String = rs.getString(14)
        Job_Detail(job_id, job_name, job_url, job_location, job_salary, job_company, job_experience, job_class, job_given, job_detail, company_type, company_person, search_key, city)
      }
    )

    //通过 search key 进行分组
    val searchKey: RDD[(String, Iterable[Job_Detail])] = jdbcRDD.groupBy(x => x.search_key)

    //需求一：统计每个搜索关键字下的职位数量，将结果保存到mysql里面去
    val searchKeyRDD: RDD[(String, Int)] = searchKey.map(x => (x._1, x._2.size))

    //搜索关键字下面的岗位人数
    val resultRDD: RDD[(String, Int)] = jdbcRDD.map(x => (x.search_key, 1)).reduceByKey(_ + _).filter(x => x._1 != null)

    //调用了filter之后，需要缩减分区
    val resultRdd2: RDD[(String, Int)] = resultRDD.coalesce(2)

    resultRdd2.foreachPartition(iter => {
      val conn: Connection = DriverManager.getConnection("jdbc:mysql://bigdata03:3306/mydb?useSSL=false&characterEncoding=UTF-8", "root", "jsCloud123")

      conn.setAutoCommit(false)

      val statement: PreparedStatement = conn.prepareStatement("insert into job_count(search_name,job_num)  values(?,?)")

      iter.foreach(record => {
        statement.setString(1, record._1)
        statement.setInt(2, record._2)
        statement.addBatch()

      })
      statement.executeBatch()
      conn.commit()
      conn.close()
      statement.close()

    })
    //需求二，求取每个搜索关键字岗位下最高薪资的工作信息，以及最低薪资下的工作信息
    val getEachJobs: RDD[(String, Iterable[Job_Detail])] = jdbcRDD.groupBy(x => x.search_key)
    val maxJobDetail: RDD[Job_Detail] = getEachJobs.map(x => {

      val value: Iterable[Job_Detail] = x._2
      val array: Array[Job_Detail] = value.toArray
      array.maxBy(x => {
        val job_salary: String = x.job_salary
        val result = if (StringUtils.isNotEmpty(job_salary) && job_salary.contains("k") && job_salary.contains("-") && job_salary.replace("k", "").split("-").length >= 2) {
          val strings: Array[String] = job_salary.replace("k", "").split("-")
          val result2 = if (strings.length >= 2) {
            strings(1).toInt
          } else {
            0
          }
          result2
        } else {
          0
        }
        result
      })
    })
    val details: Array[Job_Detail] = maxJobDetail.collect()
    details.foreach(x => {
      println(x.job_id + "\t" + x.job_salary + "\t" + x.search_key + "\t" + x.job_company)
    })
    sparkContext.stop()

  }

}

