# coding:utf-8
from pyspark import SparkConf, SparkContext
import os

os.environ['JAVA_HOME'] = '/server/jdk'

if __name__ == '__main__':
    conf = SparkConf().setAppName('test').setAppName('local[*]')
    sc = SparkContext(conf=conf)

    # 1.读取文件获取数据 构建RDD
    file_rdd = sc.textFile('../data/input/words.txt')

    # 2.通过flatMap API取出所有单词
    word_rdd = file_rdd.flatMap(lambda line:line.split(' '))

    # 3.讲单词转换为元组
    one_word_rdd = word_rdd.map(lambda word: (word, 1))

    # 4.用reduceByKey对单词进行value的聚合
    result_rdd = one_word_rdd.reduceByKey(lambda a, b : a+b)

    # 5.通过 collect 将 rdd 的数据收集到Driver中
    print(result_rdd.collect())
