package com.atguigu.api

import org.apache.flink.api.common.functions.ReduceFunction
import org.apache.flink.api.java.functions.KeySelector
import org.apache.flink.streaming.api.scala._

/**
 * @description: xxx
 * @time: 2020/6/19 18:14
 * @author: baojinlong
 **/
object TransformTest {
  def main(args: Array[String]): Unit = {
    val environment: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
    // 设置并行度
    environment.setParallelism(1)
    val inputStreamFromFile: DataStream[String] = environment.readTextFile("E:/qj_codes/big-data/FlinkTutorial/src/main/resources/sensor.data")

    // 基本转换操作
    val dataStream: DataStream[SensorReading] = inputStreamFromFile
      .map(data => {
        val dataArray: Array[String] = data.split(",")
        SensorReading(dataArray(0), dataArray(1).toLong, dataArray(2).toDouble)
      })
    // keyBy之后所有的统计都是根据当前key值来的
    //.keyBy("id")
    //.keyBy(data => data.id)
    // 分组滚动聚合
    //dataStream.keyBy(_.id)
    val resultData: DataStream[SensorReading] = dataStream.keyBy(new MyIdSelector())
      //.sum("temperature")
      //.min("temperature")
      // min只会显示最小值和第一个其它字段,而minBy是当前key最小值对应的所有数据
      // .minBy("temperature")
      //  取时间最大,温度最小
      //      .reduce((curRes, newData) => {
      //        SensorReading(curRes.id, curRes.timestamp.max(newData.timestamp), curRes.temperature.min(newData.temperature))
      //      })
      .reduce(new MyReduce)


    // 分流操作,将流从逻辑上分开

    // 打印数据
    resultData.print
    environment.execute("transform test job")

  }

}

class MyIdSelector extends KeySelector[SensorReading, String] {
  override def getKey(in: SensorReading): String = {
    in.id
  }
}

class MyReduce() extends ReduceFunction[SensorReading] {
  override def reduce(curRes: SensorReading, newData: SensorReading): SensorReading = {
    SensorReading(curRes.id, curRes.timestamp.max(newData.timestamp), curRes.temperature.min(newData.temperature))
  }
}
