# -*- coding: UTF-8 -*-
from pyspark import SparkContext

def f(iterator):
    List = []
    for x in iterator:
        List.append((x, 1))
    return List

if __name__ == "__main__":

    """
        需求：对本地文件系统URI为：/root/wordcount.txt 的内容进行词频统计
    """
    # ********** Begin **********#
    sc = SparkContext('local', 'a')
    distFile = sc.textFile('/root/wordcount.txt')
    # print(distFile.collect())
    rdd = distFile.map(lambda x: x.split(' '))
    # print(rdd.collect())
    rdd_flatMap = rdd.flatMap(lambda x: x)
    # print(rdd_flatMap.collect())
    # partitions = rdd_flatMap.mapPartitions(f)
    partitions = rdd_flatMap.map(lambda x: (str(x), 1))
    # print(partitions.collect())
    reducer = partitions.reduceByKey(lambda x,y:x+y)
    # print(reducer.collect())
    By = reducer.sortBy(lambda x: x[1], False)
    print(By.collect())
    sc.stop()



    # ********** End **********#
