package com.gizwits.kafkaStream

import consumer.kafka.ReceiverLauncher
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}
object kafkaStreamConsumer {
  def main(args: Array[String]) {
    import org.apache.log4j.{Level, Logger}

    Logger.getLogger("org").setLevel(Level.OFF)
    Logger.getLogger("akka").setLevel(Level.OFF)

    //Create SparkContext
    val conf = new SparkConf()
      .setAppName("LowLevelKafkaConsumer")
      .set("spark.executor.memory", "1g")
      .set("spark.rdd.compress","true")
      .set("spark.storage.memoryFraction", "1")
      .set("spark.streaming.unpersist", "true")

    val sc = new SparkContext(conf)

    val ssc = new StreamingContext(sc, Seconds(1))

    val topic = "gizwits_raw"
    val zkhosts = "s60"
    val zkports = "2181"
    val brokerPath = "/brokers"

    //Specify number of Receivers you need.
    val numberOfReceivers = 3

    //The number of partitions for the topic will be figured out automatically
    //However, it can be manually specified by adding kafka.partitions.number property
    val kafkaProperties: Map[String, String] = Map("zookeeper.hosts" -> zkhosts,
      "zookeeper.port" -> zkports,
      "zookeeper.broker.path" -> brokerPath ,
      "kafka.topic" -> topic,
      "zookeeper.kafkaConsumer.connection" -> "s60:2181",
      "zookeeper.kafkaConsumer.path" -> "/consumers",
      "kafka.kafkaConsumer.id" -> "12345",
      //optional properties
      "kafkaConsumer.forcefromstart" -> "true",
      "kafkaConsumer.fetchsizebytes" -> "1048576",
      "kafkaConsumer.fillfreqms" -> "250")

    val props = new java.util.Properties()
    kafkaProperties foreach { case (key,value) => props.put(key, value)}

    val tmp_stream = ReceiverLauncher.launch(ssc, props, numberOfReceivers,StorageLevel.MEMORY_ONLY)

//    tmp_stream.foreachRDD(rdd => {
//      rdd.collect()
//      println("\n\nNumber of records in this batch : " + rdd.count())
//    } )
    //(line.getConsumer,line.getOffset,String.valueOf(line.getPayload)))
  //  tmp_stream.flatMap(line=>(line.getConsumer,line.getOffset,String.valueOf(line.getPayload)))



    val pt=tmp_stream.map(line=>{


      (line.getConsumer,line.getOffset,new String(line.getPayload))
    })

    pt.print()
    ssc.start()
    ssc.awaitTermination()

  }
}
