package com.zhaosc.spark.stream

import org.apache.spark.streaming.StreamingContext
import org.apache.spark.SparkConf
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.kafka.KafkaUtils
import kafka.serializer.StringDecoder
import com.zhaosc.spark.constant.SysConst

/**
 * 并行度问题：
 * 1、linesDStram里面封装到的是RDD， RDD里面有partition与这个topic的parititon数是一致的。
 * 2、从kafka中读来的数据封装一个DStram里面，可以对这个DStream重分区 reaprtitions(numpartition)
 * @author root
 */
object SparkStreamingOnKafkaDirected {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local[1]").setAppName("NetworkWordCount").set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    val ssc = new StreamingContext(conf, Seconds(5)) //5秒处理一次消息

        val params = Map[String, String](
      "metadata.broker.list" -> SysConst.KAFA_METADATA_BROKER_LIST,
      "group.id" -> SysConst.KAFA_PRS_USERFACE_GROUP_ID,
      "fetch.message.max.bytes" -> String.valueOf(5 * 1024 * 1024))

    val directStream = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, params, Set("topic"))


    ssc.start();
    ssc.awaitTermination();
  }
}