package com.chb.flink.window


import com.chb.flink.source.{MyCustomerSource, StationLog}
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.streaming.api.scala.function.ProcessWindowFunction
import org.apache.flink.streaming.api.windowing.assigners.TumblingProcessingTimeWindows
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.streaming.api.windowing.windows.TimeWindow
import org.apache.flink.util.Collector

object TestProcessWindowFunctionByWindow {

    //每隔5秒统计每个基站的日志数量
    def main(args: Array[String]): Unit = {
        val streamEnv: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
        import org.apache.flink.streaming.api.scala._
        streamEnv.setParallelism(1)
        //读取数据源
        // val stream = streamEnv.addSource(new MyCustomerSource)
        //读取数据源
        val stream: DataStream[StationLog] = streamEnv.socketTextStream("10.0.0.201", 8888)
            .map(line => {
                var arr = line.split(",")
                new StationLog(arr(0).trim, arr(1).trim, arr(2).trim, arr(3).trim, arr(4).trim.toLong, arr(5).trim.toLong)
            })

        stream.print()


        //开窗
        stream.map(log => ((log.sid, 1)))
            .keyBy(_._1)
            .window(TumblingProcessingTimeWindows.of(Time.seconds(5)))
            .process(new ProcessWindowFunction[(String, Int), (String, Long), String, TimeWindow] {
                // 窗口结束，触发一次， 每个并行度，窗口执行一次
                override def process(key: String, context: Context, elements: Iterable[(String, Int)], out: Collector[(String, Long)]): Unit = {
                    println("start---------------")
                    // 注意: 窗口的数据都保存在Iterable中， 里面有很多数， Iterable的size就是日志总数
                    out.collect(key, elements.size)
                    println("-----------------end")
                }
            })
            .print()

        streamEnv.execute()
    }
}
