#dataframe写入es
def write_data_es1(df):
    df.write \
        .format("org.elasticsearch.spark.sql") \
        .option("es.nodes", "127.0.0.1") \
        .option("es.port", 17100) \
        .option("es.net.http.auth.user", "user")\
        .option("es.net.http.auth.pass", "password")\
        .option("es.resource", "index/doc") \
        .save()

#rdd写入es
def write_data_es2(df):
    es_conf = {"es.nodes": "127.0.0.1", "es.port": "7100", "es.resource": "index/doc"}
    df.rdd.map(lambda row: (None, row.asDict())).saveAsNewAPIHadoopFile(
        path='-',\
        outputFormatClass="org.elasticsearch.hadoop.mr.EsOutputFormat",\
        keyClass="org.apache.hadoop.io.NullWritable",\
        valueClass="org.elasticsearch.hadoop.mr.LinkedMapWritable",\
        conf=es_conf)

#dict写入es
def write_data_es3(result):
    from elasticsearch import Elasticsearch
    es = Elasticsearch('127.0.0.1', http_auth=('eagles', 'datatom.com'), port=7100)
    es.index(index="gatest", doc_type="gatest", body=result)

#df写入csv
def write_data_csv(df):
    df.repartition(1).write.csv("file:///root/shixu/yuansaijie.csv",header=True)