package com

import Utils.{RequirementAnalyze, TimeUtils}
import com.alibaba.fastjson.JSON
import com.typesafe.config.ConfigFactory
import kafka.common.TopicAndPartition
import kafka.message.MessageAndMetadata
import kafka.serializer.StringDecoder
import kafka.utils.{ZKGroupTopicDirs, ZkUtils}
import org.I0Itec.zkclient.ZkClient
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka.{HasOffsetRanges, KafkaUtils, OffsetRange}
import org.apache.spark.streaming.{Duration, StreamingContext}

import scala.tools.nsc.interpreter.InputStream


/**
  *
  * 直连方式
  */
object KafkaStreamingRedis {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .appName(this.getClass.getName)
      .master("local[*]")
      .getOrCreate()
    val ssc = new StreamingContext(spark.sparkContext, Duration(5000))

    val load = ConfigFactory.load()
    //组名
    val groupId = load.getString("groupid")
    //topic名字
    val topic = load.getString("topicid")

    // 指定kafka的Broker地址（SparkStreaming的Task直接连接到Kafka分区上，用的是底层API消费）
    val brokerList = load.getString("broker.list")

    // 指定zk列表，将offset维护到zk中
    val zkQuorum = load.getString("zookeeper.list")

    val topics: Set[String] = Set(topic)
    //创建ZK对象，用于保存偏移量
    val TopicDirs = new ZKGroupTopicDirs(groupId, topic)
    //获取zookeeper中的路径
    val zkTopicPath = s"${TopicDirs.consumerOffsetDir}"

    //准备kafka参数
    val kafkas = Map(
      "metadata.broker.list" -> brokerList,
      "group.id" -> groupId,
      "auto.offset.reset" -> kafka.api.OffsetRequest.SmallestTimeString // 指定读取数据方式
    )
    //创建zk客户端，可以从zk中读取偏移量数据，并且更新偏移量
    val zkClient = new ZkClient(zkQuorum)

    //读取偏移量
    val clientOffset = zkClient.countChildren(zkTopicPath)


    //创建kafkastream
    var kafkaStream: InputDStream[(String, String)] = null

    var fromOffsets: Map[TopicAndPartition, Long] = Map()


    if (clientOffset > 0) {
      for (i <- 0 until clientOffset) {
        val partitionOffset = zkClient.readData[String](s"$zkTopicPath/${i}")
        val tp = TopicAndPartition(topic, i)
        fromOffsets += (tp -> partitionOffset.toLong)

      }
      val messageHandler = (mmd: MessageAndMetadata[String, String]) => (
        mmd.key(), mmd.message()
      )
      kafkaStream = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder, (String, String)](ssc, kafkas, fromOffsets, messageHandler)
    } else {
      kafkaStream = KafkaUtils.createDirectStream
        [String, String, StringDecoder, StringDecoder](ssc, kafkas, topics)
    }
    var offsetRanges = Array[OffsetRange]()

    val provinceInfo = spark.sparkContext
      .textFile("")
      .collect()
      .map(t => {
        val arr = t.split(" ")
        (arr(0), arr(1))
      }).toMap
    val provinceInfoBroadcast = spark.sparkContext.broadcast(provinceInfo)

    kafkaStream.foreachRDD { rdd =>
      offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

      val baseData = rdd.map(t => JSON.parseObject(t._2)
      ).filter(_.getString("serviceName").equalsIgnoreCase("reChargeNotifyReq"))

        .map(jsobj => {
          val rechargeRes = jsobj.getString("bussinessRst") // 充值结果
          val fee: Double = if (rechargeRes.equals("0000")) // 判断是否充值成功
            jsobj.getDouble("chargefee") else 0.0 // 充值金额
          val feeCount = if (!fee.equals(0.0)) 1 else 0 // 获取到充值成功数,金额不等于0
          val starttime = jsobj.getString("requestId") // 开始充值时间
          val recivcetime = jsobj.getString("receiveNotifyTime") // 结束充值时间
          val pcode = jsobj.getString("provinceCode") // 获得省份编号
          val province = provinceInfoBroadcast.value.get(pcode).toString // 通过省份编号进行取值
          // 充值成功数
          val isSucc = if (rechargeRes.equals("0000")) 1 else 0
          // 充值时长
          val costtime = if (rechargeRes.equals("0000")) TimeUtils.costtime(starttime, recivcetime) else 0

          (starttime.substring(0, 8), // 年月日
            starttime.substring(0, 10), // 年月日时
            List[Double](1, fee, isSucc, costtime.toDouble, feeCount), // (数字1用于统计充值订单量，充值金额，充值成功数，充值时长，充值成功数且金额不等于0)
            province, // 省份
            starttime.substring(0, 12), // 年月日时分
            (starttime.substring(0, 10), province) // (年月日时，省份)
          )
        }).cache()
      // 指标一
      // 要将两个list拉倒一起去，因为每次处理的结果要合并
      val result1 = baseData.map(t => (t._1, t._3)).reduceByKey((list1, list2) => {
        // 拉链操作
        list1.zip(list2).map(t => t._1 + t._2)
      })
      RequirementAnalyze.requirement01(result1)

      // 指标二
      val result2 = baseData.map(t => (t._6, t._3)).reduceByKey((list1, list2) => {
        list1.zip(list2).map(t => t._1 + t._2)
      })
      RequirementAnalyze.requirement02(result2)

      // 指标三
      val result3 = baseData.map(t => (t._4, t._3)).reduceByKey((list1, list2) => {
        list1.zip(list2).map(t => t._1 + t._2)
      })
      RequirementAnalyze.requirement03(result3)

      // 指标四
      // 要将两个list拉倒一起去，因为每次处理的结果要合并
      val result4 = baseData.map(t => (t._5, t._3)).reduceByKey((list1, list2) => {
        list1.zip(list2).map(t => t._1 + t._2)
      })
      RequirementAnalyze.requirement04(result4)

      // 更新offset
      for (o <- offsetRanges) {
        // /group01/offset/recharge/  0
        val zkpath = s"${TopicDirs.consumerOffsetDir}/${o.partition}"
        // 将该partition的offset保存到zookeeper中
        // /group01/offset/recharge/  0/88889
        ZkUtils.updatePersistentPath(zkClient, zkpath, o.untilOffset.toString)
      }
    }

    ssc.start()
    ssc.awaitTermination()





  }

}
