package flink_p1

import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.api.scala.createTypeInformation
import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment}
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer
import org.apache.kafka.clients.consumer.ConsumerConfig

import java.util.Properties

object FlinkTest_08_CarFlowAnaly {

  def main(args: Array[String]): Unit = {


    /**
     * 统计车流量
     */

    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    val prop = new Properties()
    prop.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "127.0.0.1:9092")
    prop.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "group1")


    val source = new FlinkKafkaConsumer("flink-test", new SimpleStringSchema(), prop)


    val kafkaSource: DataStream[String] = env.addSource(source)


    //案例1： 实时统计，来一条处理一条
    val res1: DataStream[(String, Int)] = kafkaSource.map(data => {
      val split: Array[String] = data.split("\t")
      (split(1), 1)
    })

    //    res1.keyBy(0).sum(1).print()


    //案例2：按分钟统计
    val res2: DataStream[((String, Long), Int)] = kafkaSource.map(data => {
      val split: Array[String] = data.split("\t")
      ((split(1), java.lang.Long.parseLong(split(2)) / 1000 / 60 * 60000), 1)
    })


    res2.keyBy(0).sum(1).print()


    env.execute("kafka1")

  }

}
