package com.lagou.bak

import java.util.Properties

import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.runtime.state.filesystem.FsStateBackend
import org.apache.flink.streaming.api.datastream.{DataStream, DataStreamSource, SingleOutputStreamOperator}
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer

object KafkaTest {
  private var count = 0
  def getUniqNum() = {
    count += 1
    count
  }
  def main(args: Array[String]): Unit = {
    //读kafka数据
    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)
    env.enableCheckpointing(10)
    val prop = new Properties()
    prop.setProperty("bootstrap.servers","hadoop2:9092")
    prop.setProperty("group.id","mygp")
    val flinkConsumer = new FlinkKafkaConsumer[String]("animalone", new SimpleStringSchema(), prop)
    flinkConsumer.setStartFromEarliest()
    env.setStateBackend(new FsStateBackend("file:////tmp/backend"))
    flinkConsumer.setCommitOffsetsOnCheckpoints(true)

    val dataSource: DataStreamSource[String] = env.addSource(flinkConsumer)
    //处理（根据具体指标编码计算逻辑）
    val result: SingleOutputStreamOperator[String] = dataSource.rebalance().map(x => {
      var r: String = ""
      if (getUniqNum() == 20000) {
        count = 0
        throw new Exception("Failed.")
      }
      if (Integer.valueOf(x) % 100 == 0) {
        println(x)
        r = x
      }
      r
    })
    env.execute()
    //输出计算结果（输出到控制台）
  }
}
