package com.zt.bigdata.flink.stream

import com.zt.bigdata.template.spark.redis.RedisTemplate
import org.apache.flink.api.common.functions.RuntimeContext
import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.configuration.Configuration
import org.apache.flink.streaming.api.functions.sink.{RichSinkFunction, SinkFunction}
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer
import org.apache.flink.streaming.connectors.redis.RedisSink
import org.apache.flink.streaming.connectors.redis.common.config.FlinkJedisPoolConfig
import org.apache.flink.streaming.connectors.redis.common.mapper.{RedisCommand, RedisCommandDescription, RedisMapper}
import org.elasticsearch.action.index.IndexRequest
import redis.clients.jedis.Jedis


/**
  * @ClassName Test
  * @Description
  * @Author zhangtonghy
  * @Date 2019-07-10 16:51
  * @Copyright: 版权所有 (C) zt zt.
  * @注意 ：本内容仅限于zt内部传阅，禁止外泄以及用于其他的商业目的
  **/


class MyRedisMapper extends RedisMapper[(String, String)] {
  override def getCommandDescription: RedisCommandDescription = {
    new RedisCommandDescription(RedisCommand.HSET, "adb")
  }

  override def getKeyFromData(data: (String, String)): String = data._1

  override def getValueFromData(data: (String, String)): String = data._2
}

object RedisSinkDemo {

  def main(args: Array[String]): Unit = {
    val env = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)
    val strings: DataStream[String] = env.socketTextStream("localhost", 999)

    val pair = strings.map {
      x =>
        val p = x.split("\\s+")
        (p(0), p(1))
    }
    val config: FlinkJedisPoolConfig = new FlinkJedisPoolConfig.Builder()
      .setHost("localhost")
      .setPort(6379)
      .build()

    val redisSink = new RedisSink[(String, String)](config, new MyRedisMapper)

    //    pair.addSink(redisSink)

    val myProducer = new FlinkKafkaProducer[String](
      "172.30.12.247:9092", // broker list
      "flink-1", // target topic
      new SimpleStringSchema) // serialization schema

    myProducer.setWriteTimestampToKafka(true)
    pair.map(x => x._1 + " " + x._2)
    //      .addSink(myProducer)

    import org.apache.flink.streaming.connectors.elasticsearch.{ElasticsearchSinkFunction, RequestIndexer}
    import org.apache.flink.streaming.connectors.elasticsearch6.ElasticsearchSink
    import org.apache.http.HttpHost
    import org.elasticsearch.client.Requests


    val httpHosts = new java.util.ArrayList[HttpHost]
    httpHosts.add(new HttpHost("172.30.12.247", 9200, "http"))

    val esSinkBuilder = new ElasticsearchSink.Builder[String](
      httpHosts,
      new ElasticsearchSinkFunction[String] {
        def createIndexRequest(element: String): IndexRequest = {
          val json = new java.util.HashMap[String, String]
          json.put("data", element)

          return Requests.indexRequest()
            .index("my-index")
            .`type`("my-type")
            .source(json)
        }

        override def process(t: String, runtimeContext: RuntimeContext, requestIndexer: RequestIndexer): Unit = {
          requestIndexer.add(createIndexRequest(t))
        }
      }
    )

    esSinkBuilder.setBulkFlushMaxActions(100)


    //      strings.addSink(esSinkBuilder.build)

    env.execute()
  }
}
