from pyspark.sql import SparkSession
from operator import add
from datetime import datetime
import time

def TestFunc(data):
    datetime1 = datetime.now()
    print("Test Function")
    print(data)
    time.sleep(1)
    datetime2 = datetime.now()
    return (datetime2 - datetime1).total_seconds()

spark = SparkSession \
    .builder \
    .appName("Python Spark SQL basic example") \
    .config("spark.some.config.option", "some-value") \
    .getOrCreate()

spark.conf.set("spark.sql.shuffle.partitions", 5)
spark.conf.set("spark.default.parallelism", 5)

sc = spark.sparkContext

# ctx = SQLContext(sc)

# 以DataFrame方式读取数据库
df = spark.read.format("jdbc") \
    .option("url","jdbc:mysql://localhost:3306") \
    .option("dbtable", "instruments.bondmap") \
    .option("user", "root") \
    .option("password", "kirk2018") \
    .option("lowerBound", 0) \
    .option("upperBound", 20) \
    .option("partitionColumn", "id") \
    .option("numPartitions", 6) \
    .load()

# .option("dbtable", "(SELECT * FROM instruments.bondmap) as x") \

print(spark.sparkContext.defaultParallelism)
print(spark.sparkContext.defaultMinPartitions)
print(df.rdd.getNumPartitions())

# spark is an existing SparkSession
# df = spark.read.json("../test.json")
# Displays the content of the DataFrame to stdout
# df.show()

# convert to RDD
# RDD 切分成 partition, partition 对应 Task
# a = sc.parallelize(1 to 10, 3)
lines = df.rdd.map(TestFunc) # map
# lines = df.rdd.mapPartitions(TestFunc)

# slicedlines = sc.parallelize(lines, 5)

# print(lines.collect().mkString)

# Reduce
timeConsumed = lines.reduce(add)
print("final", timeConsumed, "Secont")
# print(df.printSchema())