package com.leal.elasticsearch

import com.leal.util.{DateUtil, SparkUtil}
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.elasticsearch.spark.sql.sparkDatasetFunctions

/**
 * @Classname bigdata
 * @Description hive 倒入es
 * @Date 2024/3/15 20:41
 * @Created by leal
 */
object HiveToEs {

  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkUtil.initSpark(enableHive = true)

    val options: Map[String, String] = Map(
      "es.nodes" -> "127.0.0.1",
      "es.port" -> "9200",
      "pushdown" -> "true",
      //spark和es非同一网段时增加该配置
      "es.nodes.wan.only" -> "true",
      //并发更新时， 如果更新在同一条记录则会冲突，增加该配置
      "es.update.retry.on.conflict" -> "3",
      //决定spark会生成多少个partition对应执行的task
      "es.input.max.docs.per.partition" -> "5000000",
      // 设置字段mapping,默认自动
      "es.mapping.date.rich" -> "false",
      "es.mapping.fields" -> "name:text,age:long,sex:keyword,address:text",
      "es.mapping.id"-> "id",
      "es.resource" -> "cx_ads_safe.es_student"
    )
    val frame: DataFrame = spark.sql("SELECT * FROM cx_ads_safe.es_student")
    frame.show(5,truncate = false)

    // save to es
    frame.saveToEs(f"es_student_${DateUtil.getCurrentDate(format = "yyyyMMdd")}/docs", options)
    //get data
    spark.close()
  }
}
