from pyspark.sql.functions import *
from pyspark.sql.session import SparkSession

# 1、创建spark sql执行环境
spark = SparkSession \
    .builder \
    .master("local") \
    .appName("dsl") \
    .config("spark.sql.shuffle.partitions", 1) \
    .getOrCreate()

# 1、csv
# 读取csv格式的数据
students_df = spark \
    .read \
    .format("csv") \
    .option("sep", ",") \
    .schema("id STRING,name STRING,age INT,sex STRING,clazz STRING") \
    .load("../../data/students.txt")

# 将DF保存到CSV格式中
students_df \
    .write \
    .format("csv") \
    .option("sep", "\t") \
    .mode("overwrite") \
    .save("../../data/students_csv")

# 2、json
# 保存为json格式
students_df \
    .write \
    .format("json") \
    .mode("overwrite") \
    .save("../../data/students_json")

# 读取json
# spark自动解析json格式，作为表结构
students_json_df = spark \
    .read \
    .format("json") \
    .load("../../data/students_json")

students_json_df.show()

# 3、orc
# orc格式会对数据进行压缩，数据中会自带表结构
students_df \
    .write \
    .format("orc") \
    .mode("overwrite") \
    .save("../../data/students_orc")

students_orc_df = spark \
    .read \
    .format("orc") \
    .load("../../data/students_orc")

students_orc_df.show()

# 4、parquet
# orc格式会对数据进行压缩，数据中会自带表结构
students_df \
    .write \
    .format("parquet") \
    .mode("overwrite") \
    .save("../../data/students_parquet")

students_parquet_df = spark \
    .read \
    .format("parquet") \
    .load("../../data/students_parquet")

students_parquet_df.show()

# 5、jdbc
# 1、读取mysql
# 需要将jdbc驱动包放到D:\pythoncode\bigdata36\venv\Lib\site-packages\pyspark\jars
# mysql-connector-java-8.0.29.jar
students_jdbc_df = spark \
    .read \
    .jdbc(url="jdbc:mysql://master:3306/myemployees", table="students",
          properties={"user": "root", "password": "123456"})

students_jdbc_df.show()

# 将数据写入到mysql
students_df \
    .where(col("clazz") == "文科一班") \
    .write \
    .jdbc(url="jdbc:mysql://master:3306/myemployees", table="students", mode="overwrite",
          properties={"user": "root", "password": "123456"})
