package com.sunzm.spark.elasticsearch

import com.sunzm.common.utils.ParameterTool
import org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Dataset, SaveMode, SparkSession}
import org.elasticsearch.spark.sparkContextFunctions

/**
 *
 * ReadFromESBySpark
 *
 * spark-submit \
 * --master yarn \
 * --class com.sunzm.spark.elasticsearch.ReadFromESBySpark \
 * --deploy-mode cluster \
 * --num-executors 3 \
 * --executor-cores 2 \
 * --executor-memory 6G  \
 * --driver-cores 2 \
 * --driver-memory 2G  \
 * /home/hadoop/es/offlineJobProcess-es6-1.0-SNAPSHOT.jar \
 * --es.nodes 10.111.6.110,10.111.6.110,10.111.6.111,10.111.6.112,10.111.6.113 \
 * --indexName call_log_info_2018-11 \
 * --typeName call_log_info \
 * --coalesceNum 8 \
 * --savePath "/tmp/sunzm/es/call_log_info/2018-11/"
 *
 * @author Administrator
 * @version 1.0
 * @date 2021-08-30 15:05
 */
object ReadFromESBySpark {
  def main(args: Array[String]): Unit = {

    Logger.getLogger("org").setLevel(Level.WARN)

    val params: ParameterTool = ParameterTool.fromArgs(args)

    var masterStr = "local[*]"

    val sparkBuilder: SparkSession.Builder = SparkSession
      .builder()
      .appName(this.getClass.getSimpleName.stripSuffix("$"))
      //.master(masterStr)
      //在spark中自动创建es中的索引
      .config("es.nodes.wan.only", "true")
      .config("es.input.use.sliced.partitions", "false")
      .config("es.input.max.docs.per.partitions", "100000000")
      //.config("es.nodes", "192.168.1.192")
      //.config("es.port", "9200")

    if(params.has("master")){
      masterStr = params.getString("master")
      sparkBuilder.master(masterStr)
    }

    sparkBuilder.config("spark.default.parallelism", params.getInt("parallelism", 8))
    sparkBuilder.config("spark.sql.shuffle.partitions", params.getInt("shufflePartitions", 8))
    sparkBuilder.config("es.index.auto.create", params.getBoolean("es.index.auto.create", false))

    //设置在spark中连接es的url和端口
    sparkBuilder.config("es.nodes", params.getRequired("es.nodes"))
    sparkBuilder.config("es.port", params.getInt("es.port", 9200))

    val spark = sparkBuilder.getOrCreate()

    val sc = spark.sparkContext

    import spark.implicits._

    val indexName = params.getRequired("indexName")
    val typeName = params.getRequired("typeName")
    //?q=callRecordId:766b50be-f275-11eb-9f3b-a7e9fe348f68
    val query = params.get("query", "")

    val esRDD: RDD[(String, String)] = sc.esJsonRDD(s"${indexName}/${typeName}", query)

    val esDS: Dataset[String] = spark.createDataset(esRDD).map(_._2)

    val jsonDS = spark.read.json(esDS)
    //jsonDS.show(10, false)

    val savePath = params.getRequired("savePath")
    val coalesceNum = params.getInt("coalesceNum", 1)

    jsonDS.coalesce(coalesceNum)
      .write
      .mode(SaveMode.Overwrite)
      .parquet(savePath)

    spark.close()
  }
}
