from pyspark import SparkConf, SparkContext
from pyspark.sql import HiveContext, SparkSession

words = "this is a letter for john, which described a girl loves him this is letter how are you this is a test".split()
new_words = "abc def ghi described".split()
#
#
# def filter_func(x):
#     if x == "letter":
#         return None
#     return x
from pyspark.sql.dataframe import DataFrame

sf = SparkConf().setMaster("spark://10.2.3.41:7077").setAppName("WordCount")
sc = SparkContext.getOrCreate(sf)
sc.setLogLevel("ERROR")
rdd = sc.parallelize(words)
rdd = rdd.map(lambda x: (x, x + "1"))

rdd1 = rdd.mapValues(lambda v: [v, v])

for x in rdd1.values().collect():
    print(x)

print("There are %d elements" % rdd1.count())

print(rdd1.top(5))
print(rdd1.first())
print(rdd1.take(5), rdd1.takeOrdered(5))

def foreach_cb(x):
    print("*", x, type(x[1]))
    x[1].append("***")

rdd1.foreach(f=foreach_cb)
for w in rdd1.collect():
    print(w)

def combiner(a):
    return [a]

def merge_func(a, b):
    a.append(b)
    return a

def merge_combiner(a, b):
     a.extend(b)
     return a

def values_cb(x):
    return str(x) + "*"

for w in rdd.combineByKey(combiner, merge_func, merge_combiner).collect():
    print(w)

print("Do groupByKey")
for w in rdd.groupByKey().collect():
    print(w[0], [x for x in w[1]])


for x in rdd.mapValues(f=values_cb).collect():
    print(x)

# for x in rdd.combineByKey()
# sc = SparkContext(conf=sf)
# hiveCtx = SparkSession.builder.config(conf=sf).getOrCreate()
# hiveCtx.sparkContext.setLogLevel("WARN")

# sqlContext.read.format("jdbc").option("url", "jdbc:mysql://localhost/uber").option("driver", "com.mysql.jdbc.Driver").option("dbtable", "trips").option("user", "root").option("password", "root").load()

# table = hiveCtx.read.jdbc("jdbc:mysql://localhost/roc", "django_migrations", properties={ 'user' : 'root', 'password' : '123456' })
# data = table.select(["id", "app"])
# data.write.save(format="jdbc", url="jdbc:mysql://localhost/ggchat", dbtable="test2", user="root", password="123456")
# data.write.format("jdbc").options(url="jdbc:mysql://localhost/ggchat", dbtable="test1", user="root", password="123456").save()

# csv_file = hiveCtx.read.csv("file:///media/psf/Home/Workspace/Rimi/P1901/lessons/spark/users.csv", sep=",", header=True)
# csv_file.show()
# json_file = hiveCtx.read.json("file:///media/psf/Home/Workspace/Rimi/P1901/lessons/spark/users.json")
# json_file.createOrReplaceTempView("user")
# df = hiveCtx.sql("select name, age from user")
# # df = hiveCtx.sql("select name, age from user.`file:///media/psf/Home/Workspace/Rimi/P1901/lessons/spark/users.json`")
# # df = json_file.select(["name", "age"])
# df.show()
# df.write.save("file://", format="json")
# sc.setLogLevel("WARN")
# rdd = sc.parallelize(words)
# new_rdd = sc.parallelize(new_words)
#
# def reduce_func(x, y):
#     return str(x) + "-" + str(y)

# pair_rdd = rdd.map(lambda x: (x, len(x)))
# def count_words(e):
#     key, value = e
#     return (key, len([m for m in value]))
#
# rdd1 = pair_rdd.groupByKey()
# rdd2 = rdd1.map(count_words)
#
# for x in rdd2.collect():
#     print(x)

# sc.stop()
