from pyspark import SparkConf, SparkContext, StorageLevel
from defs import context_jieba, filter_word, append_words

if __name__ == '__main__':
    # 构建SparkConf对象
    conf = SparkConf().setAppName("test").setMaster("local[*]")
    # 构建SparkContext执行环境入口对象
    sc = SparkContext(conf=conf)

    # 读取hdfs上文件
    rdd = sc.textFile("hdfs://node1:8020/input/SogouQ.txt")

    # 每行按照tab分割
    split_rdd = rdd.map(lambda line: line.split("\t"))

    # split_rdd多次被使用，增加缓存
    split_rdd.persist(StorageLevel.DISK_ONLY)

    # 取出content
    content_add = split_rdd.map(lambda x: x[2])

    # 对搜索内容进行分词处理
    words_rdd = content_add.flatMap(context_jieba)

    # 抽样查看分词结果
    # print(words_rdd.takeSample(False, 5))

    # 过滤一些需要特殊处理的字符
    filter_rdd = words_rdd.filter(filter_word)

    # 修订特殊字单词
    final_rdd = filter_rdd.map(append_words)

    result = final_rdd.map(lambda word: (word, 1)) \
        .reduceByKey(lambda x, y: x + y) \
        .takeOrdered(5, lambda x: -x[1])

    print(result)
    # 输出结果 [('scala', 2310), ('hadoop', 2268), ('博学谷', 2002), ('传智汇', 1918), ('itheima', 1680)]

    # 取消缓存
    split_rdd.unpersist()