# input_df = spark.read\
#     .format("org.elasticsearch.spark.sql")\
#     .option("es.resource","hive_test")\
#     .option("es.nodes","up01:9200")\
#     .option("es.read.field.exclude","name")\
#     .option("es.read.field.include","id,age")\
#     .load()
#
# # 写入数据到ES中
# input_df.write.format("es")\
#     .mode("overwrite")\
#     .option("es.resource","hive_test")\
#     .option("es.nodes","up01:9200")\
#     .option("es.mapping.id","id")\
#     .save()


print("30g",12.9/20)
print("38g",18/20)
print("40g",19.8/20)
print("50g",25/20)
print("75g",34/20)