package com.spark.steaming.kafka

/**
  * @Author: 吴敬超
  * @Date: 2019/9/7 21:18
  *
  *
  *        全市，没有电警过滤，按照卡口坐标计算的距离，然后计算速度，代码里做认证
  *        在yarn上运行的
  *
  *
  *
  */


//82079 ?        Sl   8314:38 /opt/hadoopclient/JDK/jdk1.8.0_162/bin/java -Djava.security.krb5.conf=/opt/hadoopclient/KrbClient/kerberos/var/krb5kdc/krb5.conf -Dzookeeper.server.principal=zookeeper/hadoop.hadoop.com -Djava.security.auth.login.config=/opt/hadoopclient/Spark2x/spark/conf/jaas.conf -Dzookeeper.kinit=/opt/hadoopclient/KrbClient/kerberos/bin/kinit -cp /opt/hadoopclient/Spark2x/spark/conf/:/opt/hadoopclient/Spark2x/spark/jars/*:/opt/hadoopclient/Spark2x/spark/conf/:/opt/hadoopclient/Spark2x/spark/jars/*:/opt/hadoopclient/Spark2x/spark/conf/:/opt/hadoopclient/Yarn/config/ -Xmx24G -Dlog4j.configuration=file:/opt/hadoopclient/Spark2x/spark/conf/log4j.properties -Djetty.version=x.y.z -Dzookeeper.server.principal=zookeeper/hadoop.hadoop.com -Djava.security.krb5.conf=/opt/hadoopclient/KrbClient/kerberos/var/krb5kdc/krb5.conf -Djava.security.auth.login.config=/opt/hadoopclient/Spark2x/spark/conf/jaas.conf -Dorg.xerial.snappy.tempdir=/opt/hadoopclient/Spark2x/tmp -Dcarbon.properties.filepath=/opt/hadoopclient/Spark2x/spark/conf/carbon.properties -Djava.io.tmpdir=/opt/hadoopclient/Spark2x/tmp org.apache.spark.deploy.SparkSubmit --master yarn --deploy-mode client --class com.ehualu.liaocheng.realTimekafka12 /root/sptemqlc/realtimemonitor-1.0-SNAPSHOT-jar-with-dependencies.jar


import com.login.kafka.security
import com.typesafe.config.ConfigFactory
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.kafka010.KafkaUtils
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.{Seconds, StreamingContext}

object realTimeshiqvkafkatest {


  def main(args: Array[String]): Unit = {

    //    var conn = DBConnection.getehldb();

    //KAFKA安全认证




    security.run()


    val conf = new SparkConf().setAppName("realTimeMonitoringtest2")
      .setMaster("local[2]")
    //创建一个StreamingContext，批次时长为2秒
    //    val ssc = new StreamingContext(conf, Seconds(30))
    val ssc = new StreamingContext(conf, Seconds(5))

    ssc.sparkContext.setLogLevel("WARN")
    //配置工厂 获取kakfa配置信息
    val load = ConfigFactory.load("kafka")

    //消费卡夫卡参数配置
    val kafkaParams = Map[String, Object](
      ///brokers 地址
      "bootstrap.servers" -> load.getString("kafka.brokers"),
      //指定该 consumer 将加入的消费组
      "group.id" -> "odoaqwefng12344",
      //指定序列化类
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      // 是否开启自动提交 offset
      "enable.auto.commit" -> (false: java.lang.Boolean),
      "heartbeat.interval.ms" -> new Integer(100),
      "session.timeout.ms" -> new Integer(20000)
    )

    //主题列表
    val topics = Array("GSPASSINFO")

    //直连方式消费
    val stream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](ssc,
      PreferConsistent,
      //订阅的策略（可以指定用正则的方式读取topic，比如my-ordsers-.*）
      Subscribe[String, String](topics, kafkaParams))

    println("vvvvvvvvvvvvvvvvvvvvvvvvvvvv")
    //3715000530-3715000545
    val WindowsData: DStream[(String, (String, String, String, String))] = stream.map(

      line => {

        print("777777777777value777777777777：" + line.value())


        val arr: Array[String] = line.value().split(",")
        //      时间
        val sj = arr(4)

        //      号牌
        val hphm = arr(9)

        //      卡口编号
        val kkbh = arr(5)

        //        卡口名称
        val kkmc = arr(6)

        var dataid = arr(0)


//        println("sj:"+sj)
//        println("hphm:"+hphm)
//        println("kkbh:"+kkbh)
//        println("kkmc:"+kkmc)
//        println("dataid:"+dataid)


        //        var kkbhi = kkbh.toLong
        (hphm, (kkbh, sj, kkmc, dataid))
      }


    ).window(Seconds(5))
    //      .filter(!_._1.toString.equals("未识别"))

    //过滤窗口数据
    val filterHphmGroupedRDD = WindowsData.groupByKey()
      .filter(e => {

        e._2.size > 1

      })

    //    遍历批次
    filterHphmGroupedRDD.foreachRDD(rdd => {


      rdd.foreachPartition(t => {

        t.foreach(println)


      })


    })
    ssc.start()
    //等待退出
    ssc.awaitTermination()


  }

}
