# encoding="utf-8"

from pyspark import SparkContext
import os

os.environ['PYSPARK_PYTHON'] = "python3"
# os.environ['SPARK_LOCAL_IP'] = "192.168.1.100"
sc = SparkContext(appName="Lesson3", master="local[4]")

rdd = sc.parallelize(range(1, 50))

rdd.saveAsTextFile("file:///Users/sonto/Workspace/P1905/spark_example/result")
print(rdd.takeOrdered(5))
# print(rdd.reduce(lambda a,b: a+b))
# def map_func(values):
#     new_values = []
#     for value in values:
#         new_values.append(value * 3)
#     return new_values
# rdd2 = rdd.mapPartitions(map_func)


# def map_func(index, values):
#     new_values = []
#     for value in values:
#         new_values.append(value * index)
#     return new_values
#
# rdd3 = rdd.mapPartitionsWithIndex(map_func)
#
#
# def print_elements(values):
#     print("Partition:")
#     for value in values:
#         print(value)


# rdd3.foreachPartition(print_elements)


# rdd1 = sc.parallelize((1,2,3,4,10, 10))
# rdd2 = sc.parallelize((5,6,7,8,10))

# rdd3 = rdd1.union(rdd2)

# rdd3 = rdd1.intersection(rdd2)

# rdd3 = rdd1.distinct()

# rdd3.foreach(lambda x:print(x))


# rdd = sc.parallelize((("zs", 34), ("ls", 45), ('john', 23), ("zs", 56)))
# rdd.collect()

# rdd.reduceByKey(lambda v1, v2: v1 + v2).foreach(lambda x:print(x))

# rdd.sortByKey().foreach(lambda x:print(x))
# rdd.sortBy(lambda x:x[1]).foreach(lambda x:print(x))