from pyspark import SparkContext, SparkConf
from pyspark.streaming import StreamingContext

from pyspark.streaming.kafka import KafkaUtils

if __name__ == '__main__':
    conf = SparkConf().setMaster('local[2]').setAppName('pyspark_kafka')
    sc = SparkContext(conf=conf)
    ssc = StreamingContext(sc, 10)
    ssc.sparkContext.setLogLevel("Error")
    brokers = 'niit01:9092'

    kafka_streaming_rdd = KafkaUtils.createStream(ssc, "niit01:2181",   "2",  {"test": 1})

    # query = kafka_streaming_rdd.writeStream \
    #     .outputMode("complete") \
    #     .format("console") \
    #     .start()
    # kafka_streaming_rdd.saveAsTextFiles('G:\\19code\jar-file\out.txt')

    # query.awaitTermination()
    # value = kafka_streaming_rdd

    result = []
    #
    # def get_output(_, rdd):
    #     for event in rdd.collect():
    #         if len(result) > 0:
    #             result.append(event)


    # kafka_streaming_rdd.foreachRDD(get_output)
    print(result)
    kafka_streaming_rdd.pprint()

    # lines = kafka_streaming_rdd.map(lambda x: x[1])
    # counts = lines.map(lambda word: (word, 1).reduceByKey(lambda a, b: a + b))
    # counts.pprint()
    # vale = counts.collect()
    # print(vale)
    print("=======")

    # lines = kafka_streaming_rdd.map(lambda x: x[1])
    # lines.count().map(lambda x: 'profiles in this batch: %d' % x).pprint()
    # lines.pprint()

    ssc.start()
    # ssc.awaitTerminationOrTimeout(10000)
    ssc.awaitTermination
