"""结合累加器和广播变量完成需求：
    1.统计特殊符号出现的次数
    2.对单词进行计数"""
#coding:utf8
from pyspark import SparkContext,SparkConf
import re
from operator import add


if __name__ == '__main__':
    conf = SparkConf().setAppName('demo')
    sc = SparkContext(conf=conf)

    # 累加器
    count = sc.accumulator(0)
    # 特殊字符的list定义
    abnormal_char = [",", ".", "!", "#", "$", "%"]
    # 广播特殊符号
    bc_01 = sc.broadcast(abnormal_char)

    file_rdd = sc.textFile("../data/input/accumulator_broadcast_data.txt")

    # 去除前后空格后过滤空行数据
    filter_rdd = file_rdd.map(str.strip).filter(lambda line: line!='')

    # 根据空格对数据进行切割，这里用到了正则
    words = filter_rdd.flatMap(lambda line:re.split('\s+',line))


    def my_filter(data):
        global count
        if data in bc_01.value:
            count += 1
            return None
        return data, 1


    result_rdd = words.map(my_filter).filter(lambda x: x is not None).reduceByKey(add)
    print(result_rdd.collect())
    print('特殊字符出现的次数：',count)







