from pyspark import StorageLevel
from pyspark.shell import spark
from pyspark.sql import SparkSession

# -- # 3) 将保险精算结果表导出到MYSQL中:
spark = SparkSession.builder.master('local[*]').appName("app").getOrCreate()

df = spark.sql("""
    select
        t1.age_buy,
        t1.sex,
        t1.ppp,
        t1.bpp,
        t1.policy_year,
        t1.sa,
        t1.cv_1a,
        t1.cv_1b,
        t1.sur_ben,
        t1.np,
        t2.rsv2_re,
        t2.rsv1_re,
        t2.np_
    from insurance_dw.cv_src t1 join  insurance_dw.rsv_src t2
        on t1.age_buy = t2.age_buy and t1.ppp = t2.ppp and t1.sex = t2.sex and t1.policy_year = t2.policy_year;
""")
# --     # 设置缓存, 将其缓存到内存中, 如果内存放不下, 放置到磁盘上
df.persist(storageLevel=StorageLevel.MEMORY_AND_DISK).count()

df.createTempView('t1')
# --     # 3.1 将这个结果灌入到 HIVE的APP层库中
spark.sql("""
    insert overwrite table insurance_app.policy_actuary
    select  * from  t1
""")
# --     # 3.2 将这个结果灌入到 mysql的APP层库中
df.write.jdbc(
    "jdbc:mysql://node1:3306/insurance_app?createDatabaseIfNotExist=true&serverTimezone=UTC&characterEncoding=utf8&useUnicode=true",
    'policy_actuary',
    'overwrite',
    {'user': 'root', 'password': '123456'}
)

