from pyspark.sql import SparkSession
import os

#0.设置系统环境变量
os.environ['JAVA_HOME'] = '/export/server/jdk1.8.0_241/'
os.environ['SPARK_HOME'] = '/export/server/spark'
os.environ['PYSPARK_PYTHON'] = '/root/anaconda3/envs/pyspark_env/bin/python'
os.environ['PYSPARK_DRIVER_PYTHON'] = '/root/anaconda3/envs/pyspark_env/bin/python'

#1.构建SparkSession
spark = SparkSession \
    .builder \
    .master("local[2]") \
    .appName("SparkSQLAppName") \
    .config("spark.sql.shuffle.partitions", 4) \
    .getOrCreate()

#2.数据读取
#es.resource：索引库/索引类型（类型可以不写）
#es.nodes：ES的节点信息
#es.read.field.include：包括的字段
#es.read.field.exclude：不包括的字段
input_df = spark.read\
    .format("es")\
    .option("es.resource","hive_test")\
    .option("es.nodes","up01:9200")\
    .option("es.read.field.include","id,age")\
    .option("es.read.field.exclude","name")\
    .load()

#3.数据处理
input_df.printSchema()
input_df.show()

#4.数据写出


#5.关闭SparkSession
spark.stop()
