package cn.tedu.spark

import java.util

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010._
import org.apache.kafka.clients.consumer.{Consumer, ConsumerRecord, KafkaConsumer}
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import java.util.{ArrayList, Calendar, Properties}

import cn.tedu.dao.HBaseUtil
import cn.tedu.pojo.LogBean

/**
 * 从Kafka接收主题为weblog的消息，并处理后存HBase
 */
object Driver {

  /**
   * Producer和Consumer只于Leader交互，其余Replica作为Follower从leader中复制数据。
   * 每个partitions都有一个server为leader，leader负责所有的读写操作,follower只负责备份
   * 一个server上有一个topic的多个partitions
   */
  def getTopicOffset(topic: String, props: Map[String, Object]): Long = {
    var sum = 0L
    val topicPartitionList = new ArrayList[TopicPartition]()

    val prop = new Properties
    props.map { case (k, v) => {
      prop.put(k, v)
      (k, v)
    } }

    val consumer = new KafkaConsumer[String, Object](prop)
    val topicPartition = consumer.listTopics();

    if (null == topicPartition || topicPartition.size() == 0) {
      return 0L;
    }

    val partitionInfos = topicPartition.get(topic);
//    logger.info("partitionInfo Size:" + partitionInfos.size())
    partitionInfos.forEach { item =>
      //      logger.info("partitionInfos:" + item)
      topicPartitionList.add(new TopicPartition(topic, item.partition()))
    }

    val endOffsets = consumer.endOffsets(topicPartitionList)
    endOffsets.forEach{(k, v) => sum += v}

//    logger.info("Offset sum:" + sum)

    sum
  }

  def main(args: Array[String]): Unit = {
    // 消费Kafka数据，启动的线程数至少是两个，其中一个线程负责SparkStreaming, 另外一个线程负责消费Kafka数据
    val conf = new SparkConf().setMaster("local[5]").setAppName("kafkasource")
    val sc = new SparkContext(conf)
    // SparkStreaming在设置批大小时，并不是一味的越小越好，理想状态是: 上一个批次的数据刚处理完，下一个批次正好到达
    val ssc = new StreamingContext(sc, Seconds(2))

    val kafkaParams: Map[String, Object] = Map[String, Object](
      "bootstrap.servers" -> "mycentos8:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "1910kafka",
      "auto.offset.reset" -> "latest",
      "enable.auto.commit" -> (false: java.lang.Boolean)
    )
    val topics = Array("weblog")
    val topicPartition = Array(new TopicPartition("weblog", 0))
    val offsetNumber: Long = getTopicOffset("weblog", kafkaParams)

    val offsets: Map[TopicPartition, Long] = Map(new TopicPartition("weblog", 0) -> offsetNumber)

    /** 1、这种订阅会读取所有的partition数据 但是可以指定某些partition的offset */
    val stream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      ssc,
      PreferConsistent,
      Subscribe[String, String](topics, kafkaParams, offsets)
    )

    HBaseUtil.sparkContext = sc

    stream.foreachRDD(lineRDD => {
      if (!lineRDD.isEmpty()) {
        val offsetRanges: Array[OffsetRange] = lineRDD.asInstanceOf[HasOffsetRanges].offsetRanges
        lineRDD.foreachPartition(iter => {
          iter.foreach(record => {
            println("partition = " + record.partition() ," key = " + record.key(), " value = " + record.value(), " offset = " + record.offset())

            val line = record.value()
            val info = line.split("\\|")
            val url = info(0)
            val urlname = info(1)
            val uvid = info(13)
            val ssid = info(14).split("_")(0)
            val sscount = info(14).split("_")(1)
            val sstime = info(14).split("_")(2)
            val cip = info(15)
            // Step2: 将业务字段封装到指定的bean中
            val logBean = LogBean(url, urlname, uvid, ssid, sscount, sstime, cip)
            // Step3: 统计业务指标，实时计算的指标: (1)pv (2)uv (3)vv (4)newip (5)newcust
            // 3.1 统计pv: 用户访问一次页面，则pv=1
            val pv = 1
            // 3.2 统计vv: uv = 1 or uv = 0，判断依据是: 根据当前记录中的uvid，去HBase表查当天的数据，
            // 如果uvid在当天的记录中已经出现过，则 uv = 0；如果uvid在当天的记录中没有出现过，则 uv = 1
            // 难点: 如何表示当天的时间范围。startTime = 当天的凌晨0点时间戳 endTime = sstime
            val endTime = sstime.toLong
            val calendar = Calendar.getInstance
            calendar.setTimeInMillis(endTime)
            calendar.set(Calendar.HOUR, 0)
            calendar.set(Calendar.MINUTE, 0)
            calendar.set(Calendar.SECOND, 0)
            calendar.set(Calendar.MILLISECOND, 0)

            // 以当前记录的时间戳为基准，获取到当天0:00的时间戳
            val startTime = calendar.getTimeInMillis
            // 查询的范围定义好之后，还需要根据当前记录的uvid去HBase表查询是否出现过
            // 所以可以使用HBase的行键正则过滤器来匹配查询
            val uvRegex = "^\\d+_" + uvid + ".*$"

            val uvResultRDD = HBaseUtil.queryByRangeAndRegex(startTime, endTime, uvRegex)
            // val uv = if (uvResultRDD.count() == 0) 1 else 0
            val uv = if (uvResultRDD.size() == 0) 1 else 0

            // 3.3 统计vv: vv=1 or vv=0 判断依据: 根据当前记录的ssid，去HBase表查询当天的数据
            // 如果此ssid在当天的记录中已经出现过，则 vv = 0 反之 vv = 1
            val vvRegex = "^\\d+_\\d+_" + ssid + ".*$"
            val vvResultRDD = HBaseUtil.queryByRangeAndRegex(startTime, endTime, vvRegex)
            // val vv = if (vvResultRDD.count() == 0) 1 else 0
            val vv = if (vvResultRDD.size() == 0) 1 else 0

            // 3.4 统计newip: newip = 1 or newip = 0 判断依据: 根据当前记录中的cip，去HBase表查询历史数据
            // 如果当前记录中的cip在历史数据中从未出现过，则 newip = 1, 反之 newip = 0
            val newipRegex = "^\\d+_\\d+_\\d+_" + cip + ".*$"
            val newipResultRDD = HBaseUtil.queryByRangeAndRegex(0, endTime, newipRegex)
            // val newip = if (newipResultRDD.count() == 0) 1 else 0
            val newip = if (newipResultRDD.size() == 0) 1 else 0

            // 3.5 统计newcust, 实现思路同newip, 判断指标由cip更换为uvid
            val newcustResultRDD = HBaseUtil.queryByRangeAndRegex(0, endTime, uvRegex)
            // val newcust = if (newcustResultRDD.count() == 0) 1 else 0
            val newcust = if (newcustResultRDD.size() == 0) 1 else 0

            // TODO Step4: 将统计出的业务指标封装到bean中, 并将数据写出到Mysql数据库中
            // val tongjiBean = TongjiBean(sstime, pv, uv, vv, newip, newcust)
            // MysqlUtil.saveToMysql(tongjiBean)

            // Step5: 将logBean数据存储到HBase指定的表中
            HBaseUtil.saveToHBase(logBean)
          })
        })
        stream.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)
      }
    })

    ssc.start()
    ssc.awaitTermination()
  }
}
