package ApplicationTest.Example.KafKa.Consumer

import java.io.IOException

import com.twitter.bijection.Injection
import com.twitter.bijection.avro.GenericAvroCodecs
import org.apache.avro.Schema
import org.apache.avro.generic.GenericRecord
import org.apache.commons.httpclient.methods.GetMethod
import org.apache.commons.httpclient.{HttpClient, HttpStatus, MultiThreadedHttpConnectionManager}
import org.apache.http.HttpException
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.kafka010.KafkaUtils
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.{Milliseconds, StreamingContext}

object KafkaAvroStream {
  private val conf = new SparkConf().setMaster("local[*]").setAppName("Scala Spark Test Application")

  def main(args: Array[String]): Unit = {
    lazy val stream = new StreamingContext(conf, Milliseconds(5000))
    val groupId = "group1"

    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "master:9092,spark02:9092,spark03:9092", //连接两台kafka 服务 -- 关于zookeeper 发现
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> groupId, //分配一个组
      "auto.offset.reset" -> "latest",
      "enable.auto.commit" -> (false: java.lang.Boolean)
    )
    val topics = Array("helloword") //本地集群决定 topic
    lazy val kafkaStream = this.createCustomDirectStreamKafkaStreamAvro(stream, kafkaParams, topics)

    //广播一个变量值进行获取inject导入
    val broatInjection : Broadcast[Injection[GenericRecord, Array[Byte]]] = stream.sparkContext.broadcast(this.getRecordInjection(topics.last))
    //create user click times
    kafkaStream.foreachRDD{
      rdd =>
        //修正这个过程
        val words: RDD[KafkaInfoStand] =
          rdd.mapPartitions {
            records =>
              records.map {
                record =>

                  val time = record.timestamp() //时间戳
                  val key = record.key()

                  //avro binary data construcment
                  val bytes = record.value()

                  //deserializer
                  val avroRecord : GenericRecord = broatInjection.value.invert(bytes).get
                  val value = avroRecord.get("click").asInstanceOf[String] //被点击次数

                  val partition = record.partition()
                  val offset = record.offset()
                  val topic = record.topic()

                  //分区存储调用本地资源信息

                  //jedis getting
                  val jedis = RedisClient.pool.getResource
                  //select
                  jedis.select(1)
                  //hincrBy
                  jedis.hincrBy("app::user::click", value, offset)
                  //返回资源
                  RedisClient.pool.returnResource(jedis)

                  println(s"$time - $key - $value - $partition - $offset - $topic")

                  KafkaInfoStand(key, value, time, partition, offset, topic)
              }
          }

        val spark = SparkSession.builder.config(rdd.sparkContext.getConf).getOrCreate()

        import spark.implicits._

        val wordsDataFrame = words.toDF("key", "value", "timestamp", "parttion", "offset", "topic")

        // Create a temporary view
        wordsDataFrame.createOrReplaceTempView("words")

        // Do word count on DataFrame using SQL and print it
        //val wordCountsDataFrame = wordsDataFrame.select("*")
        val wordCountsDataFrame = spark.sql("SELECT * FROM words").sort("key")

        //通过 write 方法设置写入的流的形式传入到 sparkStreaming 或者其他流库中
        //wordCountsDataFrame.write

        wordCountsDataFrame.show()

    }


  }
  def createCustomDirectStreamKafkaStreamAvro(scc: StreamingContext, kafkaParams: Map[String, Object], topics: Array[String]): InputDStream[ConsumerRecord[String, Array[Byte]]] = {
    //avro and streaming
    KafkaUtils.createDirectStream[String, Array[Byte]](scc, PreferConsistent, Subscribe[String, Array[Byte]](topics, kafkaParams))
  }

  //获取到Injection
  def getRecordInjection(topic: String): Injection[GenericRecord, Array[Byte]] ={
    val avroSchema = getLatestSchemaByTopic(topic)
    GenericAvroCodecs.toBinary(avroSchema) //record Injection
  }

  /**
    * get avro schema for the kafka topic
    * @param topicName topic name
    * @return schema info
    */
  //获取 producer schema 信息
  def getLatestSchemaByTopic(topicName: String) : Schema = {
    val url = Constants.KAFKA_SCHEMA_REGISTRY_URL
    val httpClient : HttpClient = new HttpClient(new MultiThreadedHttpConnectionManager)
    val schemaUrl = url + "/" + topicName + "/lastest"
    val get = new GetMethod(schemaUrl)
    var statusCode :Integer = 0
    var responseBody : String = ""
    try {
      statusCode = httpClient.executeMethod(get)
      responseBody = get.getResponseBodyAsString()
    } catch {
      case ioE: IOException =>
        throw new RuntimeException(ioE)
      case httpE : HttpException =>
        throw new RuntimeException(httpE)
    }finally {
      get.releaseConnection()
    }
    if (statusCode != HttpStatus.SC_OK) {
      throw new Exception(s"laster schema for topic $topicName cannot be retrieved. Status code = $statusCode ")
    }
    var schema : Schema = null
    try{
      val schemaString = responseBody.substring(responseBody.indexOf("{")) // 格式转换规则
      schema = new Schema.Parser().parse(schemaString) //transform avro schema -- 格式转换规则
    }catch {
      case t: Throwable => throw new Exception(s"laster schema for topic $topicName cannot be retrieved")
    }
    schema
  }
}
