package com.air.antispider.stream.dataprocess

import com.air.antispider.stream.common.bean.{AnalyzeRule, BookRequestData, ProcessedData, QueryRequestData, RequestType}
import com.air.antispider.stream.common.util.jedis.{JedisConnectionUtil, PropertiesUtil}
import com.air.antispider.stream.dataprocess.businessprocess.{AnalyzeBookRequest, AnalyzeRequest, AnalyzeRuleDB, BusinessProcess, DataPackage, DataSend, DataSplit, EncryptedData, FilterURL, IpOperation, RequestTypeClassifier, SparkStreamingMonitor, TravelTypeClassifier}
import com.air.antispider.stream.dataprocess.constants.BehaviorTypeEnum
import com.air.antispider.stream.dataprocess.constants.TravelTypeEnum.TravelTypeEnum
import kafka.serializer.StringDecoder
import org.apache.commons.lang3.StringUtils
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}
import redis.clients.jedis.JedisCluster

import scala.collection.mutable.ArrayBuffer

/**
  * 数据预处理的主程序
  */
object DataProcessApp {


  def main(args: Array[String]): Unit = {


    //当应用被停止的时候，进行如下设置可以保证当前批次执行完之后再停止应用。
//    System.setProperty("spark.streaming.stopGracefullyOnShutdown", "true")
//    //初始化 sparkcontext
//    val conf = new SparkConf()
//      .setAppName("streaming-data-peocess")
//      .setMaster("local[*]")
//      .set("spark.metrics.conf.executor.source.jvm.class", "org.apache.spark.metrics.source.JvmSource")
    //当应用被停止的时候，进行如下设置可以保证当前批次执行完之后再停止应用。
    System.setProperty("spark.streaming.stopGracefullyOnShutdown", "true")

    // 先创建执行环境
    val conf: SparkConf = new SparkConf()
      .setMaster("local[*]")
      //开启Spark的性能监控功能
      .set("spark.metrics.conf.executor.source.jvm.class", "org.apache.spark.metrics.source.JvmSource")
      .setAppName("DataProcessApp")

    val sc: SparkContext = new SparkContext(conf)

    //创建SparkContext之后,获取Kafka消息之前,读取MySQL中的配置,放入广播变量
    val filterRuleList: ArrayBuffer[String] = AnalyzeRuleDB.queryFilterRule()
    //放入广播变量 @volatile 允许多线程安全的更新变量
    @volatile var filterRuleBroadcast: Broadcast[ArrayBuffer[String]] = sc.broadcast(filterRuleList)
    //将分类规则数据从MySQL中查询出来,放入广播变量
    val classifyRuleMap: Map[String, ArrayBuffer[String]] = AnalyzeRuleDB.queryClassifyRule()
    @volatile var classifyRuleBroadcast: Broadcast[Map[String, ArrayBuffer[String]]] = sc.broadcast(classifyRuleMap)
    //将解析规则数据从MySQL中查询出来,放入广播变量
    val queryAnalyzeRuleList: List[AnalyzeRule] = AnalyzeRuleDB.queryRule(BehaviorTypeEnum.Query.id)
    val bookAnalyzeRuleList: List[AnalyzeRule] = AnalyzeRuleDB.queryRule(BehaviorTypeEnum.Book.id)
    @volatile var queryAnalyzeRuleBroadcast: Broadcast[List[AnalyzeRule]] = sc.broadcast(queryAnalyzeRuleList)
    @volatile var bookAnalyzeRuleBroadcast: Broadcast[List[AnalyzeRule]] = sc.broadcast(bookAnalyzeRuleList)
    //查询MySQL高频IP
    val blackIPList: ArrayBuffer[String] = AnalyzeRuleDB.queryBlackIPList()
    //放入广播变量 @volatile 允许多线程安全的更新变量
    @volatile var blackIPListBroadcast: Broadcast[ArrayBuffer[String]] = sc.broadcast(blackIPList)


    val ssc: StreamingContext = new StreamingContext(sc, Seconds(2))

    //kafka集群地址信息
    val brokerList: String = PropertiesUtil.getStringByKey("default.brokers", "kafkaConfig.properties")
    //topic信息
    val topic: String = PropertiesUtil.getStringByKey("source.nginx.topic", "kafkaConfig.properties")
    //定义Kafka连接参数
    var kafkaParams = Map[String,String](
      "metadata.broker.list" -> brokerList
    )

//    kafkaParams += ("hello" -> "world")
    //topic信息
    val topics: Set[String] = Set[String](topic)

    //从Kafka中获取消息, StringDecoder因为Kafka使用java默认的序列化效率不高,所以Kafka自己捣鼓了一套序列化机制,这里就使用Kafka的序列化
    val sourceStream: InputDStream[(String, String)] = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, topics)

    //取出原始的message信息
    val sourceMessage: DStream[String] = sourceStream.map(_._2)

    //创建Redis连接
    val jedis: JedisCluster = JedisConnectionUtil.getJedisCluster

    sourceMessage.foreachRDD(messageRDD => {
      //操作数据之前,先更新广播变量
      var filterChangeFlag: String = jedis.get("FilterChangeFlag")
      if (StringUtils.isBlank(filterChangeFlag)) {
        //如果取出来的数据为空,那么就先创建
        filterChangeFlag = "true"
        jedis.set("FilterChangeFlag", filterChangeFlag)
      }
      if (filterChangeFlag.toBoolean) {
        //需要更新广播变量
        //1. 先废除之前的广播变量
        filterRuleBroadcast.unpersist()
        //2. 查询最新的数据
        val filterRuleList: ArrayBuffer[String] = AnalyzeRuleDB.queryFilterRule()
        //3. 放入广播变量
        filterRuleBroadcast = sc.broadcast(filterRuleList)
        //4. 更新redis
        filterChangeFlag = "false"
        jedis.set("FilterChangeFlag", filterChangeFlag)
      }
      //从Redis获取规则状态信息,看是否发生修改
      var classifyRuleChangeFlag: String = jedis.get("ClassifyRuleChangeFlag")
      if (StringUtils.isBlank(classifyRuleChangeFlag)) {
        //如果没有初始值,那么就给个true
        classifyRuleChangeFlag = "true"
        jedis.set("ClassifyRuleChangeFlag", classifyRuleChangeFlag)
      }
      if (classifyRuleChangeFlag.toBoolean) {
        //说明需要更新广播变量
        //先废除之前的广播变量
        classifyRuleBroadcast.unpersist()
        //查询最新的数据
        val classifyRuleMap: Map[String, ArrayBuffer[String]] = AnalyzeRuleDB.queryClassifyRule()
        //更新广播变量
        classifyRuleBroadcast = sc.broadcast(classifyRuleMap)
        //更新redis信息
        classifyRuleChangeFlag = "false"
        jedis.set("ClassifyRuleChangeFlag", classifyRuleChangeFlag)
      }
      //从Redis获取规则状态信息,看是否发生修改
      var analyzeRuleChangeFlag: String = jedis.get("AnalyzeRuleChangeFlag")
      if (StringUtils.isBlank(analyzeRuleChangeFlag)) {
        //如果没有初始值,那么就给个true
        analyzeRuleChangeFlag = "true"
        jedis.set("AnalyzeRuleChangeFlag", analyzeRuleChangeFlag)
      }
      if (analyzeRuleChangeFlag.toBoolean) {
        //说明需要更新广播变量
        //先废除之前的广播变量
        queryAnalyzeRuleBroadcast.unpersist()
        bookAnalyzeRuleBroadcast.unpersist()
        //查询最新的数据
        val queryAnalyzeRuleList: List[AnalyzeRule] = AnalyzeRuleDB.queryRule(BehaviorTypeEnum.Query.id)
        val bookAnalyzeRuleList: List[AnalyzeRule] = AnalyzeRuleDB.queryRule(BehaviorTypeEnum.Book.id)
        //更新广播变量
        queryAnalyzeRuleBroadcast = sc.broadcast(queryAnalyzeRuleList)
        bookAnalyzeRuleBroadcast = sc.broadcast(bookAnalyzeRuleList)
        //更新redis信息
        analyzeRuleChangeFlag = "false"
        jedis.set("AnalyzeRuleChangeFlag", analyzeRuleChangeFlag)
      }

      //操作数据之前,先更新高频IP广播变量
      var blackListChangeFlag: String = jedis.get("blackListChangeFlag")
      if (StringUtils.isBlank(blackListChangeFlag)) {
        //如果取出来的数据为空,那么就先创建
        blackListChangeFlag = "true"
        jedis.set("blackListChangeFlag", blackListChangeFlag)
      }
      if (blackListChangeFlag.toBoolean) {
        //需要更新广播变量
        //1. 先废除之前的广播变量
        blackIPListBroadcast.unpersist()
        //2. 查询最新的数据
        val blackList: ArrayBuffer[String] = AnalyzeRuleDB.queryBlackIPList()
        //3. 放入广播变量
        blackIPListBroadcast = sc.broadcast(blackList)
        //4. 更新redis
        blackListChangeFlag = "false"
        jedis.set("blackListChangeFlag", blackListChangeFlag)
      }


      //链路统计功能
      val serverCountRDD: RDD[(String, Int)] = BusinessProcess.linkCount(messageRDD)
      //进行数据过滤操作
      val filterRDD: RDD[String] = messageRDD.filter(message => FilterURL.filterRule(message, filterRuleBroadcast.value))

      //对用户敏感信息进行加密操作
      //加密手机号
      val encryptedPhoneRDD: RDD[String] = EncryptedData.encryptedPhoneNum(filterRDD)
      //加密身份证号
      val encryptedRDD: RDD[String] = EncryptedData.encryptedIDNum(encryptedPhoneRDD)

      //最终需要的是一个结构化数据,String => ProcessedData结构化的数据
      val processedDataRDD: RDD[ProcessedData] = encryptedRDD.map(message => {
        //数据切割
        val (request, //请求的URL
        requestMethod, //请求方式
        contentType, //请求类型
        requestBody, //请求体
        httpReferrer, //referrer
        remoteAddr, //客户端IP
        httpUserAgent, //ua
        timeIso8601, //时间
        serverAddr, //服务器地址
        cookiesStr, //cookie原始数据
        cookieValue_JSESSIONID, //从cookie提取出来的sessionID
        cookieValue_USERID) = DataSplit.dataSplit(message)

        //数据打标签,看当前请求是国内预定/国内查询...
        val requestType: RequestType = RequestTypeClassifier.classifyByRequest(request, classifyRuleBroadcast.value)
        //往返信息打标签
        val travelType: TravelTypeEnum = TravelTypeClassifier.travelTypeClassifier(httpReferrer)

        //将请求体数据,请求URL,请求方式,标签信息,解析规则放入工具类中开始解析.
        //开始解析查询相关参数
        val queryRequestData: Option[QueryRequestData] = AnalyzeRequest.analyzeQueryRequest(requestType, requestMethod, contentType, request, requestBody, travelType, queryAnalyzeRuleBroadcast.value)
        //开始解析预定相关参数
        val bookRequestData: Option[BookRequestData] = AnalyzeBookRequest.analyzeBookRequest(requestType, requestMethod, contentType, request, requestBody, travelType, bookAnalyzeRuleBroadcast.value)
        //数据加工,黑名单数据,看当前请求是否是一个高频IP
        val isFreIP: Boolean = IpOperation.isFreIP(remoteAddr, blackIPListBroadcast.value)

        //数据结构化
        val processedData: ProcessedData = DataPackage.dataPackage("",
          requestMethod,
          request,
          remoteAddr,
          httpUserAgent,
          timeIso8601,
          serverAddr,
          isFreIP,
          requestType,
          travelType,
          cookieValue_JSESSIONID,
          cookieValue_USERID,
          queryRequestData, //查询数据的请求参数封装
          bookRequestData, //预定数据的请求参数封装
          httpReferrer)

        processedData
      })

      //将数据发送到Kafka中
      DataSend.sendDataToKafka(processedDataRDD)

      //开启Spark的性能监控.
      SparkStreamingMonitor.streamMonitor(serverCountRDD, sc)



      //打印数据
      processedDataRDD.foreach(println)
    })


    //启动程序
    ssc.start()
    //让程序一直运行
    ssc.awaitTermination()
  }
}
