package com.shujia.compute

import java.util.Properties

import com.google.gson.Gson
import com.shujia.bean.CarWide
import com.shujia.sink.{HbaseMapper, HbaseSink}
import com.shujia.tf.KcCarWindowReduce
import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.streaming.api.{CheckpointingMode, TimeCharacteristic}
import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer
import org.apache.flink.streaming.api.windowing.time.Time
import com.shujia.bean.KcCarReduce
import org.apache.hadoop.hbase.client.Put

object KVCar {
  def main(args: Array[String]): Unit = {

    /**
      * 卡口车流量统计
      *
      * 统计每一分钟每个卡口的车流量
      */


    val env = StreamExecutionEnvironment.getExecutionEnvironment

    //    // 每 1000ms 开始一次 checkpoint// 每 1000ms 开始一次 checkpoint
    //    env.enableCheckpointing(1000)
    //    // 高级选项：
    //
    //    // 设置模式为精确一次 (这是默认值)
    //    env.getCheckpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE)
    //
    //    // 确认 checkpoints 之间的时间会进行 500 ms
    //    env.getCheckpointConfig.setMinPauseBetweenCheckpoints(500)
    //
    //    // Checkpoint 必须在一分钟内完成，否则就会被抛弃
    //    env.getCheckpointConfig.setCheckpointTimeout(60000)
    //
    //    // 同一时间只允许一个 checkpoint 进行
    //    env.getCheckpointConfig.setMaxConcurrentCheckpoints(1)
    //
    //    // 开启在 job 中止后仍然保留的 externalized checkpoints
    //    env.getCheckpointConfig.enableExternalizedCheckpoints(ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION)
    //
    //    //设置checkpoint 保存位置为外部持久化系统
    //    val rocksDBStateBackend: StateBackend = new RocksDBStateBackend("hdfs://node1:9000/flink/checkpoint", true)
    //    env.setStateBackend(rocksDBStateBackend)
    //
    //    env.setParallelism(1)

    //设置事件时间
    env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)

    val kafkaProps = new Properties()
    kafkaProps.setProperty("zookeeper.connect", "node1:2181")
    kafkaProps.setProperty("bootstrap.servers", "node1:9092")
    kafkaProps.setProperty("group.id", "qweqwew")

    val kafkaConsumer = new FlinkKafkaConsumer[String]("dwd_car_wide", new SimpleStringSchema, kafkaProps)
    //从头读取数据
    kafkaConsumer.setStartFromEarliest()
    val lineDS = env.addSource(kafkaConsumer)

    val carWideDS = lineDS.map(line => {
      val gson = new Gson()
      gson.fromJson(line, classOf[CarWide])
    })


    //设置时间时间列和水位线
    val waterMarkDS = carWideDS.assignTimestampsAndWatermarks(new BoundedOutOfOrdernessTimestampExtractor[CarWide](Time.seconds(5)) {
      override def extractTimestamp(t: CarWide): Long = {
        t.getTs * 1000
        //指定时间戳列
      }
    })

    val countDS = waterMarkDS.keyBy(_.getKcId)
      .timeWindow(Time.minutes(1))
      .process(new KcCarWindowReduce)

//    countDS.print()


    // 卡口，时间，车流量
    //将结果保存到hbase

    val hbaseMapper = new HbaseMapper[KcCarReduce] {
      override def mapper(value: KcCarReduce): Put = {
        val put = new Put(value.getKcId.getBytes())
        put.add("info".getBytes(), "flow".getBytes(), value.getTs, value.getCount.toString.getBytes())
        put.add("info".getBytes(), "avgSpeed".getBytes(), value.getTs, value.getAvgSpeed.toString.getBytes())

        put
      }
    }

   countDS.addSink(new HbaseSink[KcCarReduce]("kc_car_count", hbaseMapper, "node1:2181"))


    env.execute("kc_car_count")
  }

}
