# from pyspark.python.pyspark.shell import spark

from pyspark.sql import SparkSession
import os

from pyspark.storagelevel import StorageLevel
os.environ['JAVA_HOME'] = '/export/server/jdk'
# 锁定远端操作环境, 避免存在多个版本环境的问题
os.environ['SPARK_HOME'] = '/export/server/spark'
os.environ["PYSPARK_PYTHON"] = "/root/anaconda3/bin/python"
os.environ["PYSPARK_DRIVER_PYTHON"] = "/root/anaconda3/bin/python"

# 快捷键:  main 回车
if __name__ == '__main__':
    spark = SparkSession.builder \
        .appName("InsuranceActuaryExport") \
        .master("yarn") \
        .config("spark.hadoop.fs.defaultFS", "hdfs://node1:8020") \
        .config("spark.sql.warehouse.dir", "hdfs://node1:8020/user/hive/warehouse") \
        .enableHiveSupport() \
        .getOrCreate()
    # 3) 将保险精算结果表导出到MYSQL中:
    df = spark.sql("""
            select * from insurance_app.app_agg_month_incre_rate;
        """)
    # 设置缓存, 将其缓存到内存中, 如果内存放不下, 放置到磁盘上
    df.persist(storageLevel=StorageLevel.MEMORY_AND_DISK).count()

    # df.createTempView('t1')
    # 3.1 将这个结果灌入到 HIVE的APP层库中
    # spark.sql("""
    # set hive.exec.dynamic.partition.mode=nonstrict
    # """)

    # spark.sql("""
    #         insert overwrite table insurance_app.policy_result
    #         select  * from  insurance_app.policy_result
    #     """)
    # 3.2 将这个结果灌入到 mysql的APP层库中
    df.write.jdbc(
        "jdbc:mysql://node1:3306/insurance_app?createDatabaseIfNotExist=true&serverTimezone=UTC&characterEncoding=utf8&useUnicode=true",
        'app_agg_month_incre_rate',
        'overwrite',
        {'user': 'root', 'password': '123456'}
    )