# -*- coding:utf-8 -*-
from kafka import KafkaProducer
from pyspark.streaming import StreamingContext
from pyspark.streaming.kafka import KafkaUtils
from pyspark import SparkConf, SparkContext
import json
import sys

def show(rdd):{
    print(rdd.collect())
}

def KafkaWordCount(zkQuorum, group, topics, numThreads):
    spark_conf = SparkConf().setAppName("KafkaWordCount")
    sc = SparkContext(conf=spark_conf)
    spark_conf.setMaster("local")
    sc.setLogLevel("ERROR")
    ssc = StreamingContext(sc, 10)

    lines = KafkaUtils.createStream(ssc, "10.0.38.14:2181", "testGroup", {"nnsmk":1}).map(lambda x : x)
    # words = lines.flatMap(lambda x : x.split(" "))
    # wordcount = words.map(lambda x : (x, 1)).reduceByKeyAndWindow((lambda x,y : x+y), (lambda x,y : x-y), 1, 1, 1)
    # wordcount.foreachRDD(lambda x : sendmsg(x))
    lines.foreachRDD(show)
    ssc.start()
    ssc.awaitTermination()


# 格式转化，将[["1", 3], ["0", 4], ["2", 3]]变为[{'1': 3}, {'0': 4}, {'2': 3}]，这样就不用修改第四个教程的代码了
def Get_dic(rdd_list):
    res = []
    for elm in rdd_list:
        tmp = {elm[0]: elm[1]}
        res.append(tmp)
    return json.dumps(res)


def sendmsg(rdd):
    if rdd.count != 0:
        msg = Get_dic(rdd.collect())
        # 实例化一个KafkaProducer示例，用于向Kafka投递消息
        producer = KafkaProducer(bootstrap_servers='10.0.38.14:9092')
        producer.send("result", msg.encode('utf8'))
        # 很重要，不然不会更新
        producer.flush()


if __name__ == '__main__':
    # 输入的四个参数分别代表着
    # 1.zkQuorum为zookeeper地址
    # 2.group为消费者所在的组
    # 3.topics该消费者所消费的topics
    # 4.numThreads开启消费topic线程的个数
    if (len(sys.argv) < 5):
        print("Usage: KafkaWordCount <zkQuorum> <group> <topics> <numThreads>")
        zkQuorum = "10.0.38.14:2181"
        group = "testGroup"
        topics = "nnsmk"
        numThreads = 1
    else:
        zkQuorum = sys.argv[1]
        group = sys.argv[2]
        topics = sys.argv[3]
        numThreads = int(sys.argv[4])
    print(group, topics)
    KafkaWordCount(zkQuorum, group, topics, numThreads)
