from pyspark import SparkConf, SparkContext


# coding: utf-8

def splitLine(x):
    return x.split()


conf = SparkConf().setMaster("spark://127.0.0.1:7077").setAppName("test")
sc = SparkContext(conf=conf)
sc.setLogLevel("WARN")

# txtRdd = sc.textFile("file:///Users/sonto/Workspace/Rimi/P1901/lessons/spark/lesson1.py")
# newRdd = txtRdd.map(splitLine)
# for x in newRdd.collect():
#     print(x)

rdd = sc.parallelize(("how", "are", "you"))
srdd = sc.parallelize((("name", "sonto"), ("age", 45), ("gender", True), ("name", "john"), ("name", "sdfsdf")))
srdd1 = sc.parallelize((("name", "mike"), ("name", "john"), ("gender", False), ("age", 34), ("address", "chengdu")))
# print("* map with key")
# for x in rdd.map(lambda x: (x, 1)).collect():
#     print(x)
#
#
# print("* flat map with key")
# for x in rdd.flatMap(lambda x: ((x, 1), (x, 1))).collect():
#     print(x)
#
# print("* do distinct")
# for x in srdd.keys().distinct().collect():
#     print(x)
#
# print("* union two rdds")
# for x in srdd.union(srdd1).collect():
#     print(x)
#
# print("* do intersection")
# for x in srdd.intersection(srdd1).collect():
#     print(x)
#
# print(" do subtract")
# for x in srdd.subtract(srdd1).collect():
#     print(x)

# print("* filter with `name` ")
# for x in srdd.filter(lambda x: None if x[0] == 'name' else True).collect():
#     print(x)
#
#
# print("* print keys")
# for x in srdd.keys().collect():
#     print(x)
#
# print("* reduceByKey")
# for x in srdd.reduceByKey(lambda x, y: x + "+" + y).collect():
#     print(x)
#
# print("* mapValues")
# for x in srdd.mapValues(lambda x: str(x) + "*").collect():
#     print(x)
#
# print("* flatMapValues")
# for x in srdd.flatMapValues(lambda x: (str(x), str(x))).collect():
#     print(x)
#
# print("* groupByKey")
# for x in srdd.groupByKey().collect():
#     print(x[0], [m for m in x[1]])

# action
from operator import add


# def add_(x, y):
#     print(x, y)
#     return x + y
#
# print(rdd.fold('*', add_))
for x in srdd.join(srdd1).collect():
    print(x)

sc.stop()
