from pyspark import SparkConf
from pyspark import SparkContext
import os


os.environ["PYSPARK_PYTHON"] = "python3"

# configure spark application
conf = SparkConf()
conf.setMaster("local[4]")
conf.setAppName("wordCount")

# create Spark Context object
sc = SparkContext(conf=conf)

# 创建一个已存在的可迭代对象的RDD对象
# rdd = sc.parallelize((1,2,3,4,5,6,7,8,9))

# 读取文件
# rdd = sc.textFile("file:///Users/liuqi/PycharmProjects/P1905/liuqi/hadoop/word.txt")

# 读取目录
# rdd = sc.textFile("file:///Users/liuqi/PycharmProjects/P1905/liuqi/hadoop/*.txt")
rdd = sc.textFile("file:///Users/liuqi/PycharmProjects/P1905/liuqi/hadoop/word.txt")

def map_line(data):
    words = []
    for word in data.split():
        words.append((word, 1))

    return words

print("rdd:")
for r in rdd.collect():
    print(r)

# 原样返回
# rdd1 = rdd.map(map_line)

# 展开一层再返回
rdd1 = rdd.flatMap(map_line)
# print("rdd1:\n")
# for r in rdd1.collect():
#     print(r)
# print(rdd1.collect())

# 分组
rdd2 = rdd1.groupByKey()
rdd3 = rdd2.map(lambda x: (x[0],sum(x[1])))
print(rdd3.collect())
sc.stop()