package org.apache.spark.examples.es;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.VoidFunction;
import org.elasticsearch.spark.rdd.api.java.JavaEsSpark;
import org.spark_project.guava.collect.ImmutableList;
import org.spark_project.guava.collect.ImmutableMap;

import java.util.Map;

/**
 * Created by admin on 2019/3/28.
 */
public class JavaESearchJsonExample {

    public static void main(String[] args){
        System.setProperty("hadoop.home.dir", "C:/hadoop-2.6.0");
        SparkConf conf = new SparkConf().setAppName("JavaESearchJsonExample").setMaster("local[1]");
        conf.set("es.nodes", "172.30.17.156");
        conf.set("es.port","9200");
        conf.set("es.index.auto.create", "true");
        conf.set("es.write.operation", "index");
        conf.set("es.index.auto.create", "true");
        JavaSparkContext  jsc = new JavaSparkContext(conf);
        String json1 = "{\"id\" : \"1\",\"reason\" : \"business\",\"airport\" : \"SFO\"}";
        String json2 = "{\"id\" : \"2\",\"participants\" : 5,\"airport\" : \"OTP\"}";
        JavaRDD<String> stringRDD = jsc.parallelize(ImmutableList.of(json1, json2));
        JavaEsSpark.saveJsonToEs(stringRDD, "spark/docs");
        //第一个dataframe 第二个形参格式 _index/_type
//        JavaEsSpark.saveToEs(stringRDD, "spark/docs"); //index/type


        
    }
}
