package com.example

import org.apache.flink.api.scala.ExecutionEnvironment
import org.apache.flink.api.scala._

object FlatMapDemo1 {

  def main(args: Array[String]): Unit = {
    val environment: ExecutionEnvironment = ExecutionEnvironment.getExecutionEnvironment

    val df: DataSet[String] = environment.fromCollection(List("张三,中国,江西省,南昌市", "李四,中国,河北省,石家庄市"))

    //  由少变多
    val res: DataSet[Product] = df.flatMap(iter => {
      val arr: Array[String] = iter.split(",")

      List(
        (arr(0), arr(1)),
        (arr(0), arr(1), arr(2)),
        (arr(0), arr(1), arr(2), arr(3))
      )
    })

    res.print()


    val fDF = environment.fromCollection(List("hadoop", "hive"))

    fDF.filter(_.length > 4).print()


    val reduceDf = environment.fromCollection(List(("java", 1), ("java", 2), ("flink", 1), ("flink", 1)))
    val res3: DataSet[(String, Int)] = reduceDf.reduce((a, b) => {
      (a._1, a._2 + b._2)
    })
    res3.print()

    val res6 = reduceDf.groupBy(_._1).reduce((a, b) => {
      (a._1, a._2 + b._2)
    })
    res6.print()

    reduceDf.groupBy(0).sum(1).print()

    val res7: DataSet[(String, Int)] = reduceDf.groupBy(0).reduceGroup(iter => {
      iter.reduce((a, b) => {
        (a._1, a._2 + b._2)
      })
    })
    res7.print()

//    元组 去重
    reduceDf.distinct().print()
//    (flink,1)
//    (java,1)
//    (java,2)


//    join
//    union
  }

}
