from pyspark import SparkConf, SparkContext, StorageLevel
from defs import context_jieba, filter_word, append_words, extract_user_words

if __name__ == '__main__':
    # 构建SparkConf对象
    conf = SparkConf().setAppName("test").setMaster("local[*]")
    # 构建SparkContext执行环境入口对象
    sc = SparkContext(conf=conf)

    # 读取hdfs上文件
    rdd = sc.textFile("hdfs://node1:8020/input/SogouQ.txt")

    # 每行按照tab分割
    split_rdd = rdd.map(lambda line: line.split("\t"))

    # split_rdd多次被使用，增加缓存
    split_rdd.persist(StorageLevel.DISK_ONLY)

    # 组合元素 (用户id,用户content)
    user_content_rdd = split_rdd.map(lambda x: (x[1], x[2]))

    # 分词特殊处理
    user_words_rdd = user_content_rdd.flatMap(extract_user_words)

    # 单词计数
    result = user_words_rdd.map(lambda x: (x, 1)) \
        .reduceByKey(lambda x, y: x + y) \
        .takeOrdered(5, lambda x: -x[1])

    print(result)
    #输出结果 [('6185822016522959_scala', 2016), ('41641664258866384_博学谷', 1372), ('44801909258572364_hadoop', 1260), ('7044693659960919_数据', 1120), ('7044693659960919_仓库', 1120)]

    # 取消缓存
    split_rdd.unpersist()
