from __future__ import print_function
import argparse
import json
import re
from pyspark import SparkContext, SparkConf
from pyspark.streaming import StreamingContext
from pyspark.streaming.kafka import KafkaUtils


def main():
    parser = argparse.ArgumentParser(description="Gets twitter data from Kafka and work with it.")
    parser.add_argument("broker", nargs=1, help="broker name")
    parser.add_argument("topics", nargs="+", help="topics list")
    parser.add_argument("batch", nargs=1, type=int, help="Batch duration for StreamingContext")
    args = parser.parse_args()

    broker = args.broker[0]
    topics = args.topics
    batch_duration = args.batch[0]

    print(broker, topics, type(batch_duration))

    conf = SparkConf().setMaster("local[2]").setAppName("Streamer")
    sc = SparkContext(conf=conf)

    # sc = SparkContext(appName="PythonStreamingKafkaWordCount")
    sc.setLogLevel("WARN")
    ssc = StreamingContext(sc, batch_duration)

    # brokers, topics = 'localhost:9092', 'test2'
    kvs = KafkaUtils.createDirectStream(ssc, topics, {"metadata.broker.list": broker})

    lines = kvs.map(lambda x: x[1])

    lines.count().map(lambda x: 'Tweets in this batch: %s' % x).pprint()

    ssc.start()
    ssc.awaitTermination()


if __name__ == "__main__":
    main()
