package ApplicationTest.Example.KafKa.Consumer

import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.kafka010.KafkaUtils
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.{Milliseconds, StreamingContext}

object KafkaToRedisConsumer {

  private val conf = new SparkConf().setMaster("local[*]").setAppName("Scala Spark Test Application")

  def main(args: Array[String]): Unit = {
    //key Word Count

    //设置这个Level 等级
    Logger.getLogger("org.apache.spark").setLevel(Level.ERROR)
    Logger.getLogger("org.apache.hadoop").setLevel(Level.ERROR)
    Logger.getLogger("org.apache.zookeeper").setLevel(Level.WARN)
    Logger.getLogger("org.apache.hive").setLevel(Level.WARN)

    lazy val stream = new StreamingContext(conf, Milliseconds(5000))

    stream.sparkContext.setLogLevel("WARN")

    val groupId = "group1"
    val bootstrapServers = "master:9092,spark02:9092,spark03:9092"

    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> bootstrapServers, //连接两台kafka 服务 -- 关于zookeeper 发现
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> groupId, //分配一个组
      "auto.offset.reset" -> "latest",
      "enable.auto.commit" -> (false: java.lang.Boolean)
    )


    val topics = Array("helloword") //本地集群决定 topic


    var kafkaStream = KafkaUtils.createDirectStream[String, String](stream, PreferConsistent, Subscribe[String, String](topics, kafkaParams))

    val dbIndex = 1
    val clickHashKey = "app::user::click"



    //create user click times
    kafkaStream.foreachRDD{
      rdd =>

        //修正这个过程
        val words: RDD[KafkaInfoStand] =
          rdd.mapPartitions {
            records =>
              records.map {
                record =>
                  val time = record.timestamp() //时间戳
                  val key = record.key()
                  val value = record.value()
                  val partition = record.partition()
                  val offset = record.offset()
                  val topic = record.topic()

                  //分区存储调用本地资源信息

                  //jedis getting
                  val jedis = RedisClient.pool.getResource
                  //select
                  jedis.select(dbIndex)
                  //hincrBy
                  jedis.hincrBy(clickHashKey, value, offset)
                  //返回资源
                  RedisClient.pool.returnResource(jedis)

                  println(s"$time - $key - $value - $partition - $offset - $topic")

                  KafkaInfoStand(key, value, time, partition, offset, topic)
              }
          }

        val spark = SparkSession.builder.config(rdd.sparkContext.getConf).getOrCreate()

        import spark.implicits._

        val wordsDataFrame = words.toDF("key", "value", "timestamp", "parttion", "offset", "topic")

        // Create a temporary view
        wordsDataFrame.createOrReplaceTempView("words")

        // Do word count on DataFrame using SQL and print it
        //val wordCountsDataFrame = wordsDataFrame.select("*")
        val wordCountsDataFrame = spark.sql("SELECT * FROM words").sort("key")

        //s通过write 方法设置写入的流的形式传入到sparkStreaming 或者其他流库中
        //wordCountsDataFrame.write

        wordCountsDataFrame.show()

    }

    stream.start()
    stream.awaitTermination()
  }

}
