package com.yeming.flink.mudlog.handle.realdata

import java.text.SimpleDateFormat
import java.util.{Date, Properties}

import com.alibaba.fastjson.{JSON, JSONObject}
import com.google.gson.{Gson, GsonBuilder}
import com.yeming.flink.mudlog.RealLogBean
import com.yeming.flink.mudlog.utils.Constants
import org.apache.flink.api.common.functions.RuntimeContext
import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.connectors.elasticsearch.{ElasticsearchSinkFunction, RequestIndexer}
import org.apache.flink.streaming.connectors.elasticsearch7.ElasticsearchSink
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer
import org.apache.http.HttpHost
import org.codehaus.jackson.map.deser.std.StringDeserializer
import org.elasticsearch.action.index.IndexRequest
import org.elasticsearch.client.Requests

/**
 * 1. 数据的清洗，然后转存入ES中进行可视化展示
 * 2. 复制分流，转入hdfs中进行数据备份或者用于后续的数据挖掘与分析
 */
object ReaLogDataHandle {

  private val gson: Gson = new GsonBuilder().create()

  /**
   * 主函数
   *
   * @param args
   */
  def main(args: Array[String]): Unit = {
    // 初始化环境变量
    val streamEnv: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
    // 环境设置
    streamEnv.setParallelism(1)
    streamEnv.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
    streamEnv.enableCheckpointing(60 * 1000)
    // 添加数据源，连接到Kafka
    val props = new Properties()

    props.setProperty("bootstrap.servers", "f2:9092,f3:9092,f4:9092")
    props.setProperty("group.id", "flink_real_logdata")
    props.setProperty("key.deserializer", classOf[StringDeserializer].getName)
    props.setProperty("value.deserializer", classOf[StringDeserializer].getName)
    props.setProperty("auto.offset.reset", "latest")

    val stream: DataStream[String] = streamEnv.addSource(new FlinkKafkaConsumer[String]("real_logdata", new SimpleStringSchema, props))
    val format: SimpleDateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.sss'Z'")
    // 数据的ETL清洗操作
    val parseStream: DataStream[RealLogBean] = stream.filter(_.contains(Constants.LOG_TYPE_PREFIX))
      .map(msg => {
        val jt: JSONObject = JSON.parseObject(msg)
        val body: String = jt.getString("message")
        val timeStr: String = jt.getString("@timestamp")
        val objectJsonString: String = body.split(Constants.LOG_TYPE_PREFIX)(1)
        val bean: RealLogBean = gson.fromJson(objectJsonString, classOf[RealLogBean])
        bean.logTime = format.parse(timeStr).getTime + 8 * 3600 * 1000
        bean
      })
      .assignAscendingTimestamps(_.happenTime)


    // 1. 构建es的Sink
    val httpHosts = new java.util.ArrayList[HttpHost]
    httpHosts.add(new HttpHost("f2", 9200, "http"))
    httpHosts.add(new HttpHost("f3", 9200, "http"))
    httpHosts.add(new HttpHost("f4", 9200, "http"))
    val esSinkBuilder = new ElasticsearchSink.Builder[RealLogBean](
      httpHosts,
      new ElasticsearchSinkFunction[RealLogBean] {
        def process(element: RealLogBean, ctx: RuntimeContext, indexer: RequestIndexer) {
          val json = new java.util.HashMap[String, Any]
          val format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.sss+08:00")
          json.put("uuid", element.id)
          json.put("log_type", element.logType)
          json.put("well_id", element.wellId)
          json.put("happen_time", format.format(new Date(element.happenTime)))
          json.put("log_time", format.format(new Date(element.logTime)))
          json.put("depth", element.depth)
          json.put("perm", element.perm)
          json.put("gamma", element.gamma)
          json.put("porosity", element.porosity)
          json.put("fluvial_facies", element.fluvialFacies)
          json.put("net_gross", element.netGross)

          val rqst: IndexRequest = Requests.indexRequest
            .index("real_logdata")
            .source(json)
          indexer.add(rqst)
          println(element.id)
        }
      }
    )

    // 批量请求的配置；这指示接收器在每个元素之后发出，否则它们将被缓冲
    esSinkBuilder.setBulkFlushMaxActions(1)
    // 提供一个RestClientFactory以便在内部创建的REST客户端上进行自定义配置
    //    esSinkBuilder.setRestClientFactory(new RestClientFactory {
    //      override def configureRestClientBuilder(restClientBuilder: RestClientBuilder): Unit = {
    //        restClientBuilder.setDefaultHeaders()
    //          .setHttpClientConfigCallback()
    //      }
    //    })

    // 数据的保存
    parseStream.addSink(esSinkBuilder.build())
    // 开始执行流计算
    streamEnv.execute("ViewCompute")
  }
}
