from pyspark import SparkConf
from pyspark import SparkContext
import os

os.environ['PYSPARK_PYTHON'] = "python3"

# configure spark application
conf = SparkConf()
conf.setMaster("local[4]")
conf.setAppName("WordCount")

# create Spark Context object
sc = SparkContext(conf=conf)

# 创建一个RDD对象从一个已存在的可迭代的对象
# rdd = sc.parallelize((1,2,4,5,6,7,8,9))

# 读取文件
# rdd = sc.textFile("file:///Users/sonto/Workspace/P1905/spark_example/jobs.txt")

# 读取目录
# rdd = sc.wholeTextFiles("file:///Users/sonto/Workspace/P1905/spark_example/*.txt")

rdd = sc.textFile("file:///Users/sonto/Workspace/P1905/spark_example/words.txt")


def map_line(data):
    global i
    words = []
    for word in data.split():
        words.append((word, 1))
    return words #[ [],[], [] ]

#  map and flatMap: difference:
rdd1 = rdd.flatMap(map_line)

# print("\nrdd1 dataset:")
# for x in rdd1.collect():
#     print(x)

# group
# rdd2 = rdd1.groupByKey()
# # print(rdd2.collect())
#
#
# rdd3 = rdd2.flatMap(lambda x: (x[0], sum(x[1])))
# print(rdd3.collect())
