package com.air.antispider.stream.rulecompute

import com.air.antispider.stream.commom.bean.{AntiCalculateResult, FlowCollocation, ProcessedData}
import com.air.antispider.stream.commom.util.jedis.{JedisConnectionUtil, PropertiesUtil}
import com.air.antispider.stream.commom.util.kafka.KafkaOffsetUtil
import com.air.antispider.stream.dataprocess.businessprocess.QueryDBUtil
import com.air.antispider.stream.rulecompute.businessprocess.{ComputeFlowScore, QueryDataPackage, RuleComputeUtils}
import kafka.common.TopicAndPartition
import kafka.message.MessageAndMetadata
import kafka.serializer.StringDecoder
import org.I0Itec.zkclient.ZkClient
import org.apache.commons.lang3.StringUtils
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import redis.clients.jedis.JedisCluster

import scala.collection.mutable.ArrayBuffer

/**
  * 反爬虫实时计算入口程序
  */
object RuleComputeStreamingApp {

  def main(args: Array[String]): Unit = {
    //创建Spark环境
    //当应用被停止的时候，进行如下设置可以保证当前批次执行完之后再停止应用。
    System.setProperty("spark.streaming.stopGracefullyOnShutdown", "true")
    //创建SparkConf配置对象
    val conf: SparkConf = new SparkConf()
      .setMaster("local[*]")
      .setAppName("RuleComputeStreamingApp")
      //开启Spark监控功能
      .set("spark.metrics.conf.executor.source.jvm.class", "org.apache.spark.metrics.source.JvmSource")

    //创建ssc和sc对象
    val ssc: StreamingContext = new StreamingContext(conf,Seconds(3))
    val sc: SparkContext = ssc.sparkContext

    //从数据库中查询关键页面的匹配规则，放到广播变量中
    val criticalPagesList: ArrayBuffer[String] = QueryDBUtil.queryCriticalPages()
    @volatile var criticalPagesBroadcast: Broadcast[ArrayBuffer[String]] = sc.broadcast(criticalPagesList)

    //从数据库中查询流程数据，放于广播变量中
    val flowList: ArrayBuffer[FlowCollocation] = QueryDBUtil.queryFlow(0)
    @volatile var flowListBroadcast: Broadcast[ArrayBuffer[FlowCollocation]] = sc.broadcast(flowList)

    //加载数据源，从kafka中消费数据
    //使用自定义维护offset的方式
    //创建zookeeper相关的参数
    val zkHost: String = PropertiesUtil.getStringByKey("zkHosts", "zookeeperConfig.properties")
    val zkClient = new ZkClient(zkHost, 300000, 30000)
    val zkPath: String = PropertiesUtil.getStringByKey("rulecompute.antispider.zkPath", "zookeeperConfig.properties")

    def inputStream: InputDStream[(String, String)] = createInputStream(ssc, sc, zkClient, zkHost, zkPath)

    //取出消息数据,kafka中的数据为KV。K默认为空，取第二个字段
    val dataStream: DStream[String] = inputStream.map(_._2)

    //将数据封装回对象
    val processedDataDStream: DStream[ProcessedData] = QueryDataPackage.queryDataLoadAndPackage(dataStream)

    val jedis: JedisCluster = JedisConnectionUtil.getJedisCluster
    //业务处理
    processedDataDStream.foreachRDD(processedDataRDD =>{
      //每次处理前，先判断是否要更新关键页面匹配规则
      var criticalPagesFlag: String = jedis.get("criticalPagesFlag")
      if (StringUtils.isBlank(criticalPagesFlag)) {
        criticalPagesFlag = "true"
        jedis.set("criticalPagesFlag",criticalPagesFlag)
      }
      if (criticalPagesFlag.toBoolean) {
        val newCriticalPagesList: ArrayBuffer[String] = QueryDBUtil.queryCriticalPages()
        criticalPagesBroadcast.unpersist()
        criticalPagesBroadcast = sc.broadcast(newCriticalPagesList)
        //将标记置为false
        criticalPagesFlag = "false"
        jedis.set("criticalPagesFlag",criticalPagesFlag)
      }
      //更新流程规则广播变量
      var flowFlag: String = jedis.get("FlowFlag")
      if (StringUtils.isBlank(flowFlag)) {
        //第一次运行
        flowFlag = "true"
        jedis.set("FlowFlag", flowFlag)
      }
      if (flowFlag.toBoolean) {
        //废除之前广播变量中的数据
        flowListBroadcast.unpersist()
        //如果标记为true,数据需要更新
        val newFlowList: ArrayBuffer[FlowCollocation] = QueryDBUtil.queryFlow(0)
        //将新的数据放入广播变量
        flowListBroadcast = sc.broadcast(newFlowList)
        //将标记重置为false
        flowFlag = "false"
        jedis.set("FlowFlag", flowFlag)
      }


      //指标计算
      //1.ip段的访问量
      val ipBlockMap: collection.Map[String, Int] = RuleComputeUtils.ipBlock(processedDataRDD)
      //2.IP访问量指标计算
      val ipMap: collection.Map[String, Int] = RuleComputeUtils.ipCount(processedDataRDD)
      //3.IP访问关键页面指标计算
      val ipCriticalPagesMap: collection.Map[String, Int] = RuleComputeUtils.ipCriticalPagesCount(processedDataRDD, criticalPagesBroadcast.value)
      //4.IP携带不同UA数量指标计算
      val ipUAMap: collection.Map[String, Int] = RuleComputeUtils.ipUACount(processedDataRDD)
      //5.IP访问关键页面最小时间差指标计算
      val ipCriticalPagesMinTimeMap: collection.Map[String, Int] = RuleComputeUtils.ipCriticalPagesMinTime(processedDataRDD, criticalPagesBroadcast.value)
      //6,某个 IP，5 分钟内小于最短访问间隔（自设）的关键页面查询次数
      val ipCriticalPagesMinNumTimeMap: collection.Map[(String, String), Int] = RuleComputeUtils.ipCriticalPagesMinNumTime(processedDataRDD,  criticalPagesBroadcast.value)
      //7.某个 IP，5 分钟内查询不同行程的次数
      val ipArrCountMap: collection.Map[String, Int] = RuleComputeUtils.ipArrCount(processedDataRDD)
      //8.某个 IP，5 分钟内关键页面的访问次数的 Cookie 数
      val ipCookieCountMap: collection.Map[String, Int] = RuleComputeUtils.ipCookieCount(processedDataRDD, criticalPagesBroadcast.value)

      //开始计算流程的结果
      val antiCalculateResultRDD: RDD[AntiCalculateResult] = ComputeFlowScore.computeFlowScore(
        processedDataRDD,
        ipBlockMap,
        ipMap,
        ipCriticalPagesMap,
        ipUAMap,
        ipCriticalPagesMinTimeMap,
        ipCriticalPagesMinNumTimeMap,
        ipArrCountMap,
        ipCookieCountMap,
        //        数据库中的流程信息
        flowListBroadcast.value
      )


//      println("IP段的访问量:" + ipBlockMap)
//      println("IP的访问量:" + ipMap)
//      println("IP关键页面的访问量:" + ipCriticalPagesMap)
//      println("IP携带不同UA数量:" + ipUAMap)
//      println("IP访问关键页面最小时间差指标计算:" + ipCriticalPagesMinTimeMap)
//      println("某个 IP，5 分钟内小于最短访问间隔（自设）的关键页面查询次数:" + ipCriticalPagesMinNumTimeMap)
//      println("某个 IP，5 分钟内查询不同行程的次数:" + ipArrCountMap)
//      println("某个 IP，5 分钟内关键页面的访问次数的 Cookie 数:" + ipCookieCountMap)
//      println("================================")

    })


    //将消费的偏移量保存到zookeeper中
    inputStream.foreachRDD(rdd => KafkaOffsetUtil.saveOffsets(zkClient, zkHost, zkPath, rdd))
    //执行程序
    ssc.start()
    ssc.awaitTermination()

  }


  /**
    * 自定义维护offset的方法
    * @param ssc
    * @param sc
    * @param zkClient
    * @param zkHost
    * @param zkPath
    * @return
    */
  def createInputStream(ssc: StreamingContext, sc: SparkContext, zkClient: ZkClient, zkHost: String, zkPath: String): InputDStream[(String, String)] = {
    //获取kafka集群的地址
    val brokerList: String = PropertiesUtil.getStringByKey("default.brokers", "kafkaConfig.properties")
    //kafka的参数配置
    var kafkaParams = Map[String, String]()
    kafkaParams += ("metadata.broker.list" -> brokerList)
    //获取Topic
    val topic: String = PropertiesUtil.getStringByKey("source.query.topic", "kafkaConfig.properties")
    //消费的topic,可以指定多个
    val topics: Set[String] = Set[String](topic)

    //从zookeeper中取出偏移量
    val topicAndPartition: Option[Map[TopicAndPartition, Long]] = KafkaOffsetUtil.readOffsets(zkClient, zkHost, zkPath, topic)
    var inputStream: InputDStream[(String, String)] = null
    //连接Kafka消费数据
    //判断zookeeper中是否能够取出偏移量信息
    topicAndPartition match {
      case Some(topicAndPartitionAndOffset) =>
        //如果有数据,取出原来的数据,创建消费者
        val messageHandler = (mam: MessageAndMetadata[String, String]) => (mam.key(), mam.message())
        //使用createDirectStream方法,根据指定的分区/偏移量/topic取出数据
        inputStream = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder, (String, String)](ssc, kafkaParams, topicAndPartitionAndOffset, messageHandler)
      case None =>
        //如果第一次运行,没有数据,直接创建消费者
        inputStream = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, topics)
    }
    inputStream
  }

}
