package com.gitee.sink

import java.net.{InetAddress, InetSocketAddress}
import java.util

import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment, _}
import org.apache.flink.streaming.connectors.redis.RedisSink
import org.apache.flink.streaming.connectors.redis.common.config.{FlinkJedisClusterConfig, FlinkJedisPoolConfig}
import org.apache.flink.streaming.connectors.redis.common.mapper.{RedisCommand, RedisCommandDescription, RedisMapper}

/*
  1. 确定要sink的数据源类型
  2. 实现RedisMapper接口
    getCommandDescription 获取类型,没有就创建
    getKeyFromData 下沉的key
    getValueFromData 下沉的value
  3. FlinkJedisPoolConfig 配置节点信息
  4. 传递RedisSink对象
  TODO RidesSink数据过多出现连接超时问题,没有解决
 */
object RidesSink {
  def main(args: Array[String]): Unit = {
    val senv: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
    //接收数据
    val source: DataStream[String] = senv.socketTextStream("node01", 9999)
    //处理数据--WordCount
    val result: DataStream[(String, Int)] = source.flatMap(_.split(" "))
      .map((_, 1))
      .keyBy(0)
      .sum(1)

    // 单节点配置
    val conf: FlinkJedisPoolConfig = new FlinkJedisPoolConfig.Builder().setHost("192.186.100.203").setPort(6379).build()

    // 集群配置
    /*val set = new util.HashSet[InetSocketAddress]()
    set.add(new InetSocketAddress(InetAddress.getByName("node03"), 6379))
    val conf: FlinkJedisClusterConfig = new FlinkJedisClusterConfig.Builder().setNodes(set).setMaxTotal(5).build()*/

    result.addSink(new RedisSink[(String, Int)](conf, new MyReidsSink)).setParallelism(1)
    senv.execute()
  }

  class MyReidsSink extends RedisMapper[(String, Int)] {
    override def getCommandDescription: RedisCommandDescription = {
      new RedisCommandDescription(RedisCommand.HSET, "WordCount")
    }

    override def getKeyFromData(data: (String, Int)): String = data._1

    override def getValueFromData(data: (String, Int)): String = data._2.toString
  }


}
