package com.atguigu.qzpoint.streaming

import java.lang
import java.sql.ResultSet

import com.atguigu.constan.Constan
import com.atguigu.qzpoint.acc.RegNumAcc
import com.atguigu.util._
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010._

import scala.collection.mutable

/**
 * description ：注册模块 SparkStreaming 指标统计
 * author      ：剧情再美终是戏 
 * mail        : 13286520398@163.com
 * date        ：Created in 2020/3/11 15:42
 * modified By ：
 * version:    : 1.0
 */
object RegistetApp extends BaseApp {


  // 定义 conf
  val conf = new SparkConf()
    .setMaster("local[*]")
    .setAppName(this.getClass.getName)
    .set("spark.streaming.kafka.maxRatePerPartition", "100")
    .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
  //      .set("spark.streaming.backpressure.enabled", "true")
  //      .set("spark.streaming.stopGracefullyOnShutdown", "true")
  super.setConf(conf)
  private val groupid = "register_group_test4"
  private val topics = Array(Constan.KAFKA_TOPIC_REGISTER)

  /*
   需求1：实时统计注册人数，批次为3秒一批，使用updateStateBykey算子计算历史数据和当前批次的数据总数，仅此需求使用updateStateBykey，后续需求不使用updateStateBykey。
   需求2：每6秒统统计一次1分钟内的注册数据，不需要历史数据 提示:reduceByKeyAndWindow算子
   需求3：观察对接数据，尝试进行调优。
   */
  override def doSomething(ssc: StreamingContext) = {
    // 读取 mysql 中的消费者组的offset
    val offsetMap = new mutable.HashMap[TopicPartition, Long]()
    val connection = DataSourceUtil.getConnection()
    SqlProxy.executeQuery(connection, "select groupid, topic, `partition`, untiloffset from `offset_manager` where groupid=?", Array(groupid), new QueryCallback {
      override def process(rs: ResultSet) = {
        while (rs.next()) {
          val partition = new TopicPartition(rs.getString("topic"), rs.getInt("partition"))
          offsetMap.put(partition, rs.getInt("untiloffset"))
        }
      }
    })

    // kafka 相关参数
    val kafkaMap: Map[String, Object] = Map[String, Object](
      "bootstrap.servers" -> ConfigManager.getProperty("kafka.bootstrap.servers"),
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> groupid,
      "auto.offset.reset" -> "earliest",
      //如果是true，则这个消费者的偏移量会在后台自动提交，但是kafka宕机容易丢失数据
      //如果是false，则需要手动维护kafka偏移量
      "enable.auto.commit" -> (false: lang.Boolean)
    )
    // 判断是否否在 offset,如果存在就从当前 offset 开始获取 DataStream, 如果不存在就从状读取
    var source: InputDStream[ConsumerRecord[String, String]] = null
    if (offsetMap.isEmpty) {
      source = KafkaUtils.
        createDirectStream(ssc, LocationStrategies.PreferConsistent, ConsumerStrategies.Subscribe[String, String](topics, kafkaMap))
    } else {
      source = KafkaUtils.
        createDirectStream(ssc, LocationStrategies.PreferConsistent, ConsumerStrategies.Subscribe[String, String](topics, kafkaMap, offsetMap))
    }

    // etl　数据
    val etlSource: DStream[(String, Int)] = source
      .filter(line => {
        val splist = line.value().split("\\t")
        (null != splist && splist.length == 3)
      })
      .mapPartitions(it => {
        it.map(line => {
          val splist = line.value().split("\\t")
          val appName = splist(1) match {
            case "1" => "PC"
            case "2" => "APP"
            case _ => "Other"
          }
          (appName, 1)
        })
      })

    // 缓存 etl 后的结果
    etlSource.cache()

    // 需求2：每6秒统统计一次1分钟内的注册数据，不需要历史数据 提示:reduceByKeyAndWindow算子
    //    etlSource.reduceByKeyAndWindow((x: Int, y: Int) => x + y, Seconds(60), Seconds(6)).print()

    // 需求1：实时统计注册人数，批次为3秒一批，使用updateStateBykey算子计算历史数据和当前批次的数据总数，仅此需求使用updateStateBykey，后续需求不使用updateStateBykey。
    // 注册累加器, 累加注册人数
    val regAcc = new RegNumAcc().getInstance(null)
    ssc.sparkContext.register(regAcc)

    source.foreachRDD(it => {
      // 进来一次就清空累加器
      regAcc.reset()
      println(regAcc)

      // 处理 offset，记录最新的 offset
      val offsetMap = scala.collection.mutable.Map[(String, String), Long]()

      val connection = DataSourceUtil.getConnection()

      it.mapPartitions(it => {
        it.map(line => {
          val splist = line.value().split("\\t")
          val appName = splist(1) match {
            case "1" => "PC"
            case "2" => "APP"
            case _ => "Other"
          }
          (appName, 1)
        })
      })
        .foreach(rdd => {
          regAcc.add(rdd._1, rdd._2)
        })

      val offsetRanges: Array[OffsetRange] = it.asInstanceOf[HasOffsetRanges].offsetRanges
      for (or <- offsetRanges) {
        offsetMap.put((or.topic, or.partition.toString), or.untilOffset)
      }

      try {
        // 开启事务
        SqlProxy.beginTransaction(connection)

        // 保存注册信息
        for (elem <- regAcc.value) {
          SqlProxy.executeUpdate(connection, "replace into reg_num (reg_source, reg_num) VALUES(?, ?)", Array(elem._1, elem._2))
        }

        // 保存 offset
        for (elem <- offsetMap) {
          SqlProxy.executeUpdate(connection,
            "replace into `offset_manager` (groupid,topic,`partition`,untilOffset) values(?,?,?,?)",
            Array(groupid, elem._1._1, elem._1._2, elem._2))
        }
        // 没有发生异常就提交事务，关闭连接
        SqlProxy.commitTransaction(connection)
        DataSourceUtil.closeResource(null, null, connection)
      } catch {
        case e: Exception => {
          SqlProxy.rollback(connection) // 回滚事务
          e.printStackTrace()
        }
      }
    })
  }
}
