package com.swhy

//import java.io.InputStream
import java.text.SimpleDateFormat
import java.util.{Calendar, Date, Properties, UUID}
import org.elasticsearch.spark.sql._
import org.elasticsearch.spark.rdd.EsSpark
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.slf4j.LoggerFactory

import java.util.concurrent.Executors

object spark_es {

    private val log = LoggerFactory.getLogger(spark_es.getClass)

    val prop = new Properties()
//    val is: InputStream = this.getClass().getResourceAsStream("/bootstrap.properties")
//    prop.load(is)
//    val ENVIRONMENT_SETING = "daop.elasticsearch.hosts"
//    val host = prop.getProperty(ENVIRONMENT_SETING)

    def main(args: Array[String]): Unit = {
      val sparkConf = new SparkConf().setAppName(this.getClass.getName).setMaster("local")
      //    sparkConf.set("spark.sql.hive.metastorePartitionPruning", "false")
      sparkConf.set("es.nodes", "192.168.152.47")
      sparkConf.set("es.nodes.wan.only", "true")
      sparkConf.set("es.port", "9200")
      sparkConf.set("es.index.auto.create", "true")
      sparkConf.set("es.net.http.auth.user","elastic")
      sparkConf.set("es.net.http.auth.pass","deepq#elastic")
//      sparkConf.set("spark.dynamicAllocation.enabled","false")
      sparkConf.set("es.batch.write.refresh", "false")
      sparkConf.set("es.batch.size.bytes", "20mb")
      sparkConf.set("es.batch.size.entries", "20000")



      //    sparkConf.set("es.batch.size.entries", "5000")
      //    sparkConf.set("es.write.operation", "upsert")

      val session = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate()
      session.sql("use sum_data")
      session.udf.register("get_utc_time", () => {
        val cal = Calendar.getInstance()
        cal.setTime(new Date())
        val zoneOffset = cal.get(Calendar.ZONE_OFFSET)
        val dstOffset = cal.get(Calendar.DST_OFFSET)
        cal.add(Calendar.MILLISECOND, -(zoneOffset + dstOffset))
        val utcTime = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS").format(cal.getTime)
        utcTime.replace(" ", "T") + "+0000"
      })
      val querySql = "select asset_acct,biz_dt,cust_cd,intg_cust_id,belong_org_id,crdt_acct_flag,cap_bal,in_tran_amt,biz_date from sum_data.sum_t98_asset_acct_info_z_temp"
      val resultDF = session.sql(querySql)
//      if (!ENVIRONMENT_SETING.contains("prd")) {
//        resultDF.show(10)
//      }

      val tuple = resultDF.rdd.map(row => {
        val asset_acct: String = row.getAs[String]("asset_acct").toString()
        val biz_dt: String = row.getAs[String]("biz_dt").toString()
        val cust_cd: String = row.getAs[String]("cust_cd").toString()
        val intg_cust_id: String = row.getAs[String]("intg_cust_id").toString()
        val belong_org_id: String = row.getAs[String]("belong_org_id").toString()
        val crdt_acct_flag: String = row.getAs[String]("crdt_acct_flag").toString()
        val cap_bal =scala.math.BigDecimal(row.getAs[java.math.BigDecimal]("cap_bal"))
        val in_tran_amt =scala.math.BigDecimal(row.getAs[java.math.BigDecimal]("in_tran_amt"))
        val biz_date: Integer = row.getAs[Integer]("biz_date")
        var map = Map[String, Object]()
        map += ("asset_acct" -> asset_acct)
        map += ("biz_dt" -> biz_dt)
        map += ("cust_cd" -> cust_cd)
        map += ("intg_cust_id" -> intg_cust_id)
        map += ("belong_org_id" -> belong_org_id)
        map += ("crdt_acct_flag" -> crdt_acct_flag)
        map += ("cap_bal" -> cap_bal)
        map += ("in_tran_amt" -> in_tran_amt)
        map += ("biz_date" -> biz_date)

        (UUID.randomUUID().toString, map)
      })
      print(tuple)

      EsSpark.saveToEsWithMeta(tuple, "test_data_index")
    }

}
