import os

from pyspark.sql import SparkSession
from pyspark import StorageLevel

# 设置环境变量（Windows下可选）
os.environ['SPARK_HOME'] = '/export/server/spark'
os.environ["PYSPARK_PYTHON"] = "/export/server/anaconda3/bin/python"
os.environ["PYSPARK_DRIVER_PYTHON"] = "/export/server/anaconda3/bin/python"

# 创建 SparkSession
spark = (SparkSession
    .builder
    .appName("insurance_app")
    .config("spark.some.config.option", "some-value")
    .config("spark.sql.warehouse.dir", "file:///E:/spark_warehouse")
    .config("spark.sql.hive.metastore.version", "2.3.7")
    .enableHiveSupport()
    .getOrCreate())
# 1

# 查询数据
df = spark.sql("""
    SELECT
        t1.age_buy,
        t1.sex,
        t1.ppp,
        t1.bpp,
        t1.policy_year,
        t1.sa,
        t1.cv_1a,
        t1.cv_1b,
        t1.sur_ben,
        t1.np,
        t2.rsv2_re,
        t2.rsv1_re,
        t2.np_
    FROM insurance_dw.cv_src t1
    JOIN insurance_dw.rsv_src t2
        ON t1.age_buy = t2.age_buy
        AND t1.ppp = t2.ppp
        AND t1.sex = t2.sex
        AND t1.policy_year = t2.policy_year
""")

# 可选：保存为 CSV
df.write.mode('overwrite').format('csv').save("path/to/output/csv")

# 导出到 MySQL
df.write \
    .mode('overwrite') \
    .format('jdbc') \
    .option('url', 'jdbc:mysql://node1:3306/insurance_app') \
    .option('dbtable', 'policy_actuary') \
    .option('user', 'root') \
    .option('password', '123456') \
    .save()

# 缓存数据
df.persist(storageLevel=StorageLevel.MEMORY_AND_DISK).count()

# 创建临时视图
df.createTempView('t1')

# 写入 Hive 表
spark.sql("""
    INSERT OVERWRITE TABLE insurance_app.policy_actuary
    SELECT * FROM t1
""")

# 使用 jdbc 方法再次写入 MySQL（可选，避免重复写入）
df.write.jdbc(
    url="jdbc:mysql://node1:3306/insurance_app?createDatabaseIfNotExist=true&serverTimezone=UTC&characterEncoding=utf8&useUnicode=true",
    table='policy_actuary',
    mode='overwrite',
    properties={'user': 'root', 'password': '123456'}
)