from pyspark.sql import SparkSession
import os

#0.设置系统环境变量
os.environ['JAVA_HOME'] = '/export/server/jdk1.8.0_241/'
os.environ['SPARK_HOME'] = '/export/server/spark'
os.environ['PYSPARK_PYTHON'] = '/root/anaconda3/envs/pyspark_env/bin/python'
os.environ['PYSPARK_DRIVER_PYTHON'] = '/root/anaconda3/envs/pyspark_env/bin/python'

#1.构建SparkSession
spark = SparkSession \
    .builder \
    .master("local[2]") \
    .appName("SparkSQLAppName") \
    .config("spark.sql.shuffle.partitions", 4) \
    .getOrCreate()

#2.读取数据，从MySQL读取
input_df = spark.read.jdbc(url='jdbc:mysql://up01:3306/edu',
                table='base_region',
                properties={'user':'root','password':'123456'})

#3.处理数据
input_df.printSchema()
input_df.show()

#4.保存数据，保存到MySQL中
input_df.write.jdbc(url='jdbc:mysql://up01:3306/edu',
                    table='base_region42',
                    mode='overwrite',
                    properties={'user':'root','password':'123456'})

#5.关闭SparkSession
spark.stop()
