package com.bd03.streaminglearn.day0402.test

import java.sql.DriverManager

import com.bd03.streaminglearn.day0402.test.IpUtil.ip2Long
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{ ConsumerStrategies, KafkaUtils, LocationStrategies}

/**
 * @see `在这里说明这个类`
 * @ClassName ReadHDFSToProducer.java
 * @author Ablue
 * @version 1.0.0
 * @projectName spark-kakfa-redis-process-data
 * @createTime 2020年04月01日 11:54:00
 */
object StreamingProcessConsumer {
  def main(args: Array[String]): Unit = {
    //屏蔽日志
    Logger.getLogger("org").setLevel(Level.WARN)
    val conf = new SparkConf().setMaster("local[*]").setAppName(this.getClass.getSimpleName)
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer");  //使用Kryo序列化库
    val ssc = new StreamingContext(conf, Seconds(2)) //Seconds是批次时间

    //kafka的参数设置
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "hdp01:9092,hdp02:9092,hdp03:9092,hdp04:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "streaming_consumer",
      "auto.offset.reset" -> "earliest",//理论上,所有的数据只消费一次,避免重复消费
      //偏移量的管理,本地管理,spark管理和kafka管理
      "enable.auto.commit" -> (false: java.lang.Boolean)//不自动管理偏移量,每次都是重新消费
    )


    val stream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](List("test04"), kafkaParams))

    val res = stream.map(record => {
      val arr = record.value().split(",")
      //注意不要在这里获取链接,这里获取链接会导致一条数据获取一个链接
      //在不产生shuffler的时候会造成链接阻塞
      //在产生shuller的时候会报错
      ip2Long(arr(0)).toString
    }).map((_, 1)).reduceByKey(_ + _).repartition(1)
    //因为线程的问题,如果这里不重新分区为1,mysql表中会出现重复的省份

    res.foreachRDD(rdd => {

      rdd.foreachPartition(partition => {
        //标准的链接获取位置是在foreachRDD后的foreachPartition内
        //获取jedis链接,注意连接池里面的参数配置
        val jedis = JedisPoolDemo.getResource()
        //获取jdbc链接
        val url = "jdbc:mysql://hdp03:3306/spark?useUnicode=true&characterEncoding=utf-8"
        val con = DriverManager.getConnection(url,"root","root")
        //要执行的sql语句
        val sql = "insert into t_province values(?,?)"
        val sqlSerach = "select t_count from t_province where province = ?"
        val sqlUpdate = "update t_province  set t_count = ? where province = ?"
        //创建prepareStatement,prepareStatement会预编译sql语句,防止sql注入
        val ps = con.prepareStatement(sql)
        val ps2 = con.prepareStatement(sqlSerach)
        val ps3 = con.prepareStatement(sqlUpdate)
        //这里取出每一条数据写入到数据库
        partition.foreach(m=>{
          //首先判断数据库中是否有已经该省份
          //WriteDemo.binarySearch(m._1,jedis)  修改二分查找的方法,让传入指定jedis
          val province = WriteDemo.binarySearch(m._1,jedis)
          ps2.setString(1,province)
          val resSerach = ps2.executeQuery()
          //如果判断能进来,说明表中已经有该省份,那么就修改该省份的值
          if(resSerach.next()){
            //获取到原来省份的数量
            val i = resSerach.getInt("t_count")
            //省份原来的访问量加上新增的访问量
            val newCount = i+m._2
            //将得到的新访问量添加到数据库中
            ps3.setInt(1,newCount)
            ps3.setString(2,province)
            ps3.executeUpdate()
          }else{// 如果判断没进去,代码运行到这里,说明变表中没有该省份,那么就插入该省份
            ps.setString(1,province)
            ps.setInt(2,m._2)
            ps.executeUpdate()
          }
        })
        ps.close()
        ps2.close()
        ps3.close()
        con.close()
      })
    })
    ssc.start()
    ssc.awaitTermination()
  }
}
