from pyspark import SparkConf, SparkContext, StorageLevel
from defs import context_jieba, filter_word, append_words, extract_user_words

if __name__ == '__main__':
    # 构建SparkConf对象
    conf = SparkConf().setAppName("test").setMaster("local[*]")
    # 构建SparkContext执行环境入口对象
    sc = SparkContext(conf=conf)

    # 读取hdfs上文件
    rdd = sc.textFile("hdfs://node1:8020/input/SogouQ.txt")

    # 每行按照tab分割
    split_rdd = rdd.map(lambda line: line.split("\t"))

    # split_rdd多次被使用，增加缓存
    split_rdd.persist(StorageLevel.DISK_ONLY)

    # 取出时间
    time_rdd = split_rdd.map(lambda x: x[0])

    # 时间格式处理 只保留小时
    hour_rdd = time_rdd.map(lambda x: (x.split(":")[0], 1))

    # 单词计数
    result = hour_rdd.reduceByKey(lambda x, y: x + y) \
        .sortBy(lambda x: x[1], ascending=False, numPartitions=1) \
        .collect()

    print(result)
    # 输出结果
    # [('20', 3479), ('23', 3087), ('21', 2989), ('22', 2499), ('01', 1365), ('10', 973), ('11', 875), ('05', 798), ('02', 756), ('19', 735), ('12', 644), ('14', 637), ('00', 504), ('16', 497), ('08', 476), ('04', 476), ('03', 385), ('09', 371), ('15', 350), ('06', 294), ('13', 217), ('18', 112), ('17', 77), ('07', 70)]

    # 取消缓存
    split_rdd.unpersist()
