import re

from pyspark import SparkConf, SparkContext

if __name__ == '__main__':
    conf = SparkConf().setAppName("test").setMaster("local[*]")
    sc = SparkContext(conf=conf)

    # 1.读取数据文件
    file_rdd = sc.textFile("../data/input/accumulator_broadcast_data.txt")

    # 特殊字符定义
    abnormal_char = [",", ".", "!", "#", "$", "%"]

    # 2.将特殊字符list 包装成广播变量
    broadcast = sc.broadcast(abnormal_char)

    # 3.对特殊字符进行累加，使用累加器
    acmlt = sc.accumulator(0)

    # 4.数据处理，先处理空白行，在Python中，有内容就是True None就是False
    lines_rdd = file_rdd.filter(lambda line: line.strip())

    # 5.去除前后的空格
    data_rdd = lines_rdd.map(lambda line: line.strip())

    # 6.对数据进行切分，按照正则表达式切分，因为空格分隔符 某些单词是2个空格
    words_rdd = data_rdd.flatMap(lambda line: re.split("\s+", line))


    # 7.当前words_rdd 中有正常单词，也有特殊符合
    # 8过滤数据
    def filter_func(data):
        """过滤"""
        global acmlt
        abnormal_chars = broadcast.value
        if data in abnormal_chars:
            acmlt += 1
            return False
        else:
            return True


    normal_words_rdd = words_rdd.filter(filter_func)

    result_rdd = normal_words_rdd.map(lambda x: (x, 1)).reduceByKey(lambda a, b: a + b)

    print("正常单词计数结果", result_rdd.collect())
    print("特殊字符数量", acmlt)
