from pyspark.sql import SparkSession
import os
import pyspark.sql.functions as F
"""
-------------------------------------------------
   Description :	TODO：写出数据到ES
-------------------------------------------------
"""

# 0.设置系统环境变量
os.environ['JAVA_HOME'] = '/export/server/jdk1.8.0_241/'
os.environ['SPARK_HOME'] = '/export/server/spark'
os.environ['PYSPARK_PYTHON'] = '/root/anaconda3/envs/pyspark_env/bin/python'
os.environ['PYSPARK_DRIVER_PYTHON'] = '/root/anaconda3/envs/pyspark_env/bin/python'

# 构建SparkSession
# 建造者模式：类名.builder.配置…….getOrCreate()
# 自动帮你构建一个SparkSession对象，只要指定你需要哪些配置就可
#hive.metastore.uris：告诉Spark Hive的元数据在哪里
#hive.metastore.warehouse.dir：告诉Spark Hive的数据在哪里
#enableHiveSupport()：启用Hive支持，Spark就可以访问Hive了
spark = SparkSession \
    .builder \
    .master("local[2]") \
    .appName("SparkSQLAppName") \
    .config("spark.sql.shuffle.partitions", 4) \
    .getOrCreate()

url = "jdbc:mysql://up01:3306/tags_dat"
properties = {"user": "root", "password": "123456"}

df = spark.read.jdbc(url=url, table="tbl_users", properties=properties)

# 将Decimal类型列转换为字符串
decimal_columns = [field.name for field in df.schema.fields if field.dataType.typeName() == 'decimal']
for col_name in decimal_columns:
    df = df.withColumn(col_name, F.col(col_name).cast("string"))

df.show()

df.write\
    .format("es")\
    .option("es.resource","test_spark_write_es")\
    .option("es.nodes","up01:9200")\
    .option("es.mapping.id","id")\
    .save()
# 关闭SparkSession
spark.stop()