package com.burges.net.dataSet.api.transform

import org.apache.flink.api.java.aggregation.Aggregations
import org.apache.flink.api.java.utils.ParameterTool
import org.apache.flink.api.scala.{DataSet, ExecutionEnvironment, _}

/**
  * DataSet转换操作代码示例
  */
object TransformOperation {

  def main(args: Array[String]): Unit = {
    val parameterTool = ParameterTool.fromArgs(args)
    //创建ExecutionEnvironment环境
    val env = ExecutionEnvironment.getExecutionEnvironment

    val dataSet: DataSet[String] = env.fromElements("flink", "hadoop", "spark")

    /**
      * 数据处理部分
      */
    // map operator
    dataSet.map(_.toUpperCase)
    dataSet.map(x => x.toUpperCase)

    val ds2: DataSet[String] = env.fromElements("flink,hadoop,spark")
    // flatmap operator
    ds2.flatMap{_.split(",")}

    //mappartition operator
    dataSet.mapPartition{ in => in.map( (_,1) )}

    //filter operator
    val ds3: DataSet[Int] = env.fromElements(2222,121,34,333)
    ds3.filter( x => x > 1000)

    /**
      * 聚合操作部分
      */
    val numdataSet: DataSet[Long] = env.fromElements(123, 212, 123, 243)
    // reduce operator
    numdataSet.reduce((x, y) => x + y)
    // reduceGroup operator
    numdataSet.reduceGroup{ collector => collector.sum }

    val ds4: DataSet[(Int, String, Int)] = env.fromElements((12, "Alice", 34),(12, "Alice", 34),(12, "Alice", 34))
    // aggregate operator
    // 根据第一个字段求和，根据第三个字段求最小值
    val rs: AggregateDataSet[(Int, String, Int)] = ds4.aggregate(Aggregations.SUM, 0).aggregate(Aggregations.MIN,2)
    // 也可以使用缩写方法
    val rs3: AggregateDataSet[(Int, String, Int)] = ds4.sum(0).min(2)

    // distinct operator
    val distinctRs: DataSet[Long] = numdataSet.distinct()


    /**
      * 多表关联
      *   Join算子操作
      */


  }
}
