from pyspark import SparkConf
from pyspark.sql import SparkSession, dataframe

sf = SparkConf().setMaster("spark://10.2.3.41:7077").setAppName("spark sql")

sf.set("spark.executor.memeory", "512")
sf.set("spark.executor.cores", "3")

# create SparkSession object firstly.
session = SparkSession.builder.config(conf=sf).getOrCreate()
session.sparkContext.setLogLevel("ERROR")

print("Read JSON file")
# read data set from json,csv file and convert it into DataFrame object (like table)
jsonData = session.read.json("file:///media/psf/Home/Workspace/Rimi/P1901/lessons/spark/users.json")
assert isinstance(jsonData, dataframe.DataFrame)

# create a table view
# jsonData.createOrReplaceTempView("users")

# to execute SQL
# data = session.sql("select name from users where age > 30")

# use select to query
# data = jsonData.select("name")
# to filter data
data = jsonData.filter(jsonData.age > 30)
data.show()

# data.write.json("file:///media/psf/Home/Workspace/Rimi/P1901/lessons/spark/users2.json")

# to access rdd attribute to do transform and action
dataRDD = data.rdd
print("Read CSV file")
csvData = session.read.csv("file:///media/psf/Home/Workspace/Rimi/P1901/lessons/spark/users.csv", header=True)
csvData.show()

print("Connect to MySQL database")
mysqlData = session.read.jdbc("jdbc:mysql://127.0.0.1/ggchat", "test1", properties={"user": "root", "password": "123456"})
mysqlData.show()