package com.yeming.flink.mudlog.handle.mydata

import java.text.SimpleDateFormat
import java.util.{Date, Properties}

import com.alibaba.fastjson.{JSON, JSONObject}
import com.google.gson.{Gson, GsonBuilder}
import com.yeming.flink.mudlog.LoggingBean
import com.yeming.flink.mudlog.function.LoggingBeanIntervalFunction
import com.yeming.flink.mudlog.utilclass.ExDataSerializationSchema
import com.yeming.flink.mudlog.utils.Constants
import org.apache.flink.api.common.functions.RuntimeContext
import org.apache.flink.api.common.serialization.{SimpleStringEncoder, SimpleStringSchema}
import org.apache.flink.core.fs.Path
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.functions.ProcessFunction
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.connectors.elasticsearch.{ElasticsearchSinkFunction, RequestIndexer}
import org.apache.flink.streaming.connectors.elasticsearch7.ElasticsearchSink
import org.apache.flink.streaming.connectors.kafka.{FlinkKafkaConsumer, FlinkKafkaProducer}
import org.apache.flink.util.Collector
import org.apache.http.HttpHost
import org.apache.kafka.clients.producer.ProducerConfig
import org.codehaus.jackson.map.deser.std.StringDeserializer
import org.elasticsearch.action.index.IndexRequest
import org.elasticsearch.client.Requests

/**
 * 1. 数据的清洗，然后转存入ES中进行可视化展示
 * 2. 复制分流，转入hdfs中进行数据备份或者用于后续的数据挖掘与分析
 */
object MudLogHandle {

  private val gson: Gson = new GsonBuilder().create()

  /**
   * 主函数
   *
   * @param args
   */
  def main(args: Array[String]): Unit = {
    // 初始化环境变量
    val streamEnv: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
    // 环境设置
    streamEnv.setParallelism(1)
    streamEnv.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
    streamEnv.enableCheckpointing(60 * 1000)
    // 添加数据源，连接到Kafka
    val props = new Properties()

    props.setProperty("bootstrap.servers", "f2:9092,f3:9092,f4:9092")
    props.setProperty("group.id", "flink_1")
    props.setProperty("key.deserializer", classOf[StringDeserializer].getName)
    props.setProperty("value.deserializer", classOf[StringDeserializer].getName)
    props.setProperty("auto.offset.reset", "latest")

    val stream: DataStream[String] = streamEnv.addSource(new FlinkKafkaConsumer[String]("mudlog", new SimpleStringSchema, props))

    // 数据的ETL清洗操作
    val parseStream: DataStream[LoggingBean] = stream.filter(_.contains(Constants.LOG_TYPE_PREFIX))
      .map(msg => {
      val jt: JSONObject = JSON.parseObject(msg)
      val body: String = jt.getString("message")
      val objectJsonString: String = body.split(Constants.LOG_TYPE_PREFIX)(1)
      gson.fromJson(objectJsonString, classOf[LoggingBean])})
      .assignAscendingTimestamps(_.happenTime)

    //定义一个侧流输出，用户异常检测
    val outTag1 = new OutputTag[LoggingBean]("DetectKafkaSinkStream")
    val outTag2 = new OutputTag[LoggingBean]("HdfsSinkStream")
//    val outTag3 = new OutputTag[LoggingBean]("TimeComputeStream")

    val mainStream: DataStream[LoggingBean] = parseStream.process(new ProcessFunction[LoggingBean, LoggingBean] {
      override def processElement(value: LoggingBean, ctx: ProcessFunction[LoggingBean, LoggingBean]#Context, out: Collector[LoggingBean]): Unit = {
        //主流，写入es
        out.collect(value)
        ctx.output(outTag1, value)
        ctx.output(outTag2, value)
//        ctx.output(outTag3, value)
      }
    })


    // 数据转换
    val detectSinkStream: DataStream[(String, LoggingBean)] = mainStream.getSideOutput(outTag1)
      .keyBy(_.wellName)
      .flatMap(new LoggingBeanIntervalFunction)

    val hdfsSinkStream: DataStream[LoggingBean] = mainStream.getSideOutput(outTag2)
//    val timeStream  = mainStream.getSideOutput(outTag3)
//      .map(log => (System.currentTimeMillis() - log.happenTime))
//      .countWindowAll(10)
//      .aggregate(AggregationFunction.AggregationType.SUM, 0)

    // 1. 构建es的Sink
    val httpHosts = new java.util.ArrayList[HttpHost]
    httpHosts.add(new HttpHost("f2", 9200, "http"))
    httpHosts.add(new HttpHost("f3", 9200, "http"))
    httpHosts.add(new HttpHost("f4", 9200, "http"))
    val esSinkBuilder = new ElasticsearchSink.Builder[LoggingBean](
      httpHosts,
      new ElasticsearchSinkFunction[LoggingBean] {
        def process(element: LoggingBean, ctx: RuntimeContext, indexer: RequestIndexer) {
          val json = new java.util.HashMap[String, Any]
          val format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.sss+08:00")
          json.put("id_sk", element.id)
          json.put("log_type", element.logType)
          json.put("well_id", element.wellId)
          json.put("well_name", element.wellName)
          json.put("happen_time", format.format(new Date(element.happenTime)))
          json.put("log_depth", element.logDepth)
          json.put("drill_rate", element.drillRate)
          json.put("well_deflection", element.wellDeflection)

          val rqst: IndexRequest = Requests.indexRequest
            .index("mud_log")
            .source(json)
          indexer.add(rqst)
          println(element.id)
        }
      }
    )

    // 批量请求的配置；这指示接收器在每个元素之后发出，否则它们将被缓冲
    esSinkBuilder.setBulkFlushMaxActions(1)
    // 提供一个RestClientFactory以便在内部创建的REST客户端上进行自定义配置
    //    esSinkBuilder.setRestClientFactory(new RestClientFactory {
    //      override def configureRestClientBuilder(restClientBuilder: RestClientBuilder): Unit = {
    //        restClientBuilder.setDefaultHeaders()
    //          .setHttpClientConfigCallback()
    //      }
    //    })

    //2. 构建HdfsSink
    //默认一个小时一个分桶（目录）
    //设置一个滚动策略
    val rolling: DefaultRollingPolicy[LoggingBean, String] = DefaultRollingPolicy.builder()
      .withInactivityInterval(2000) //不活动的分桶时间
      .withRolloverInterval(2000) //每隔两秒钟生成一个文件
      .build() //创建
    val sinkBuilder: StreamingFileSink.RowFormatBuilder[LoggingBean, String, _ <: StreamingFileSink.RowFormatBuilder[LoggingBean, String, _]] = StreamingFileSink.forRowFormat[LoggingBean](
      new Path("hdfs://f1:9000/flink_run_data/mud_log/"),
      new SimpleStringEncoder[LoggingBean]("UTF-8"))
    sinkBuilder.withRollingPolicy(rolling).withBucketCheckInterval(1000)
    val hdfsSink: StreamingFileSink[LoggingBean] = sinkBuilder.build()

    //3. 构建KafkaSink
    val propsSink = new Properties()
    propsSink.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "f2:9092,f3:9092,f4:9092")
    propsSink.setProperty(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, 5*60*1000 + "")

    val kafkaSink = new FlinkKafkaProducer[(String,LoggingBean)](
      "mudlog_ex_data",
      new ExDataSerializationSchema,
      propsSink,
      FlinkKafkaProducer.Semantic.EXACTLY_ONCE)

    //4. 本地文件Sink
    val localSink: StreamingFileSink[(Long, Long, Long)] = StreamingFileSink
      .forRowFormat(new Path("data/timeoutput.log"), new SimpleStringEncoder[(Long, Long, Long)]("UTF-8")) // 所有数据都写到同一个路径
      .build()

    // 数据的保存
    mainStream.addSink(esSinkBuilder.build())
//    hdfsSinkStream.addSink(hdfsSink)
    detectSinkStream.addSink(kafkaSink)
//    timeStream.print()
    // 开始执行流计算
    streamEnv.execute("ViewCompute")
  }
}
