package com.ww.flink

import org.apache.flink.api.common.functions.ReduceFunction
import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import org.apache.kafka.common.serialization.StringSerializer
import org.apache.flink.streaming.api.scala._

import java.util.Properties
import scala.io.Source
//统计卡口流量
object Flink_try05_car_flow_analy {
  def main(args: Array[String]): Unit = {
    //配置连接kafka的信息  Properties
    val properties = new Properties()
    properties.setProperty("bootstrap.servers", "192.168.102.19:9092")
    properties.setProperty("key.serializer", classOf[StringSerializer].getName)
    properties.setProperty("value.serializer", classOf[StringSerializer].getName)

    //环境
    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    //从kafka读取数据
    val kafkaStream: DataStream[String] = env.addSource(new FlinkKafkaConsumer[String]("flink-kafka", new SimpleStringSchema(), properties))

    //kafkaStream.print
    //统计卡口流量
    kafkaStream.map(line=>{
      val split: Array[String] = line.split("\t")
      (split(0),1)
    }).keyBy(_._1).reduce(new ReduceFunction[(String, Int)] {
      //v1：上次聚合的结果  v2：本次要聚合的数据
      override def reduce(v1: (String, Int), v2: (String, Int)) = (v1._1,v1._2+v2._2)
    }).print()




    env.execute




  }
}
