import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.{StringDeserializer, StringSerializer}
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, HasOffsetRanges, KafkaUtils, LocationStrategies, OffsetRange}
import org.apache.spark.streaming.{Seconds, StreamingContext}

import java.util.Properties


/**
 * 编程题一、
 * 将sample.log的数据发送到Kafka中，经过Spark Streaming处理，将数据格式变为以下形式：
 *    commandid | houseid | gathertime | srcip | destip |srcport| destport |
 *    domainname | proxytype | proxyip | proxytype | title | content | url | logid
 * 在发送到kafka的另一个队列中
 * 要求：
 *    1、sample.log => 读文件，将数据发送到kafka队列中
 *    2、从kafka队列中获取数据（0.10 接口不管理offset），变更数据格式
 *    3、处理后的数据在发送到kafka另一个队列中
 */

object Subject1 {
  def main(args: Array[String]): Unit = {
    Logger.getLogger("org").setLevel(Level.ERROR)
    val conf: SparkConf = new SparkConf().setAppName(this.getClass.getCanonicalName)
      .setMaster("local[*]")
    val ssc = new StreamingContext(conf, Seconds(10))

    //定义kafka消费者相关参数
    val groupid = "group0"
    val topics: Array[String] = Array("topic01")
    val kafkaParams: Map[String, Object] = getKafkaConsumerParameters(groupid)

    //从redis中获取offset
    val offsets: Map[TopicPartition, Long] = OffsetsWithRedisUtils.getOffsetsFromRedis(topics, groupid)
    offsets.foreach(println)

    //创建DStream
    val dstream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream(
      ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](topics, kafkaParams, offsets)
    )

    // 定义kafka生产者参数，用于将处理后的数据在发送到kafka另一个队列中
    val brokers = "linux121:9092"
    val topic = "topic02"
    val prop = new Properties()

    prop.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers)
    prop.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer])
    prop.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer])

    //DStream转换和输出
    dstream.foreachRDD { (rdd, time) =>
      if (!rdd.isEmpty()){
        //获取offset
        val offsetRanges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
        val range: OffsetRange = offsetRanges(0)
        println(s"${range.topic} ${range.partition} ${range.fromOffset} ${range.untilOffset}")
        //处理数据
        rdd.foreachPartition { iter =>
          iter.map{ line =>
            //将数据格式变为|分割
            val str: String = line.value
              .replace("<<<!>>>", "")
              .split(",")
              .mkString("|")
            str
          }
            //将处理后的数据发送到kafka另一个队列中
            .foreach{ line =>
              val producer = new KafkaProducer[String, String](prop)
              val msg = new ProducerRecord[String, String](topic, line)
              producer.send(msg)
            }
        }
        //将offset保存到redis中
        OffsetsWithRedisUtils.saveOffsetsToRedis(offsetRanges, groupid)
      }
    }

    //启动作业
    ssc.start()
    ssc.awaitTermination()
  }

  def getKafkaConsumerParameters(groupid: String): Map[String, Object] = {
    Map[String, Object](
      ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> "linux121:9092",
      ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG -> classOf[StringDeserializer],
      ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG -> classOf[StringDeserializer],
      ConsumerConfig.GROUP_ID_CONFIG -> groupid,
      ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG -> (false: java.lang.Boolean)
    )
  }

}
