package com.burges.net.dataStream.codeRuler.transform.SingleDataStream

import org.apache.flink.api.java.tuple.Tuple
import org.apache.flink.streaming.api.scala.{DataStream, KeyedStream, StreamExecutionEnvironment, _}

/**
  * 创建人    BurgessLee 
  * 创建时间   2020/1/26 
  * 描述
  */
object AggregationsDemo {

	def main(args: Array[String]): Unit = {
		val environment = StreamExecutionEnvironment.getExecutionEnvironment

		val dataStream = environment.fromElements((1,5),(2,2),(2,4),(1,3))
		//指定第一个字段为分区key
		val keyedStream: KeyedStream[(Int, Int), Tuple] = dataStream.keyBy(0)
		//对子二个字段进行sum统计
		val sumStream: DataStream[(Int, Int)] = keyedStream.sum(1)
		//输出计算结果
		sumStream.print()
		// (1,5)(1,8)  (2,2)(2,6)
		//聚合函数传入的字段类型必须是数值型，否则抛出异常
		//滚动计算指定key的最小值
		val minStream: DataStream[(Int, Int)] = keyedStream.min(1)
		minStream.print()
		//滚动计算指定key的最大值
		val maxStream: DataStream[(Int, Int)] = keyedStream.max(1)
		maxStream.print()
		//滚动计算指定key的最小值，返回最小值对应的元素
		val minByStream = keyedStream.minBy(1)
		minByStream.print()
		//滚动计算指定key的最大值，返回最大值对应的元素
		val maxByStream: DataStream[(Int, Int)] = keyedStream.maxBy(1)
		maxByStream.print()

		environment.execute("streaming aggregationOperator")
	}

}
