package day13

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.elasticsearch.spark._

object ESRDD_Demo {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    // 设置es配置信息
    conf.setAppName("esrdd")
    conf.setMaster("local")
    conf.set("es.nodes", "master,slave1,slave2")
    conf.set("es.port", "9200")
    conf.set("es.index.auto.create", "true")
    val sc = new SparkContext(conf)

    val query =
      """
        {"query":{"match_all": {}}}
      """.stripMargin

    // sc.esRDD这个方法调用不到，因为不是Spark提供的方法，所以需要手动在上面输入import org.elasticsearch.spark._ 才不会报错
    // 输出类型汇总，key=每条数据对应的id，value=id对应的数据。这个map中的key就是字段的名称
    val queryRDD: RDD[(String, collection.Map[String, AnyRef])] = sc.esRDD("blog", query)

    // 拿到id 对应的数据
    val valueRDD: RDD[collection.Map[String, AnyRef]] = queryRDD.map(_._2)

    // 获取每个字段的数据
    val dataRDD: RDD[(AnyRef, AnyRef, AnyRef)] = valueRDD.map(line => {
      val id: AnyRef = line.getOrElse("id", "")
      val title: AnyRef = line.getOrElse("title", "")
      val content: AnyRef = line.getOrElse("content", "")
      (id, title, content)
    })

    println(dataRDD.collect.toBuffer)

    sc.stop()

  }
}
