package com.chenjj.bigdata.flink.book


import org.apache.flink.api.common.operators.Order
import org.apache.flink.api.scala.{DataSet, ExecutionEnvironment, _}
import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment}
import org.junit.{Assert, Test}
import org.apache.flink.api.java.functions.KeySelector
import org.apache.flink.util.Collector


/**
  * 第3章：Flink编程模型
  */
@Test
class FlinkTesterChapter3 extends  Assert{
  val benv = ExecutionEnvironment.getExecutionEnvironment
  val senv = StreamExecutionEnvironment.getExecutionEnvironment

  /**
    * 从文件里统计单词
    */
  @Test
  def test1: Unit ={
    val classPath = this.getClass.getProtectionDomain.getCodeSource.getLocation.getPath
    println(classPath)
    val filePath = classPath + "word.txt"
    val text = senv.readTextFile(filePath)
    val counts:DataStream[(String,Int)] = text.flatMap(_.toLowerCase.split("\\W+")).filter(_.nonEmpty).map((_,1)).keyBy(0).sum(1)
    counts.print()

    senv.execute("word count")
  }

  /**
    * DataSet重分区测试
    */
  @Test
  def test2={
    val dataSet = benv.fromElements(("a",1),("a",2),("a",3),("b",1),("b",2))

    //求相同key值下第二个字段最大的值
    val max = dataSet.groupBy(0).max(1)
    max.print()
    //输出：
    //(a,3)
    //(b,2)
  }

  /**
    * DataSet重分区并排序测试
    */
  @Test
  def test2_1={
    val dataSet = benv.fromElements(("c",3),("c",1),("c",2),("a",1),("a",2),("a",3),("b",1),("b",2))

    //相同key值下的倒序排序
    val max = dataSet.groupBy(0).sortGroup(1,Order.DESCENDING).first(100);
    max.print()
    //输出：
    //    (a,3)
    //    (a,2)
    //    (a,1)
    //    (b,2)
    //    (b,1)
    //    (c,3)
    //    (c,2)
    //    (c,1)
  }

  /**
    * DataSet重分区并排序并求和测试
    */
  @Test
  def test2_2={
    val dataSet = benv.fromElements(("c",3),("c",1),("c",2),("a",1),("a",2),("a",3),("b",1),("b",2))

    //相同key值下的倒序排序
    val max = dataSet.groupBy(0).sortGroup(1,Order.DESCENDING)
      .reduceGroup((in:Iterator[(String,Int)],out:Collector[(String,Int)])=>{
        var key = "";
        var sum = 0;
        in.foreach(t=>{
          key = t._1;
          sum = sum + t._2;
        })
        out.collect((key+"-sum",sum))
    }).first(100);
    max.print()
    //输出：
    // (b-sum,3)
    // (a-sum,6)
    // (c-sum,6)
  }


  /**
    * DataStream重分区测试
    */
  @Test
  def test3: Unit ={
    val dataStream = senv.fromElements(("a",3),("a",2),("a",4),("a",5),("b",2),("b",1))

    //求相同key值下第二个字段最大的值
    val max = dataStream.keyBy(0).max(1)
    max.print()

    senv.execute("test")
    //输出：   -- flink会将元素一个一个计算，每次打印出来的都是相同key下第二个字段最大的值
    //8> (a,3)
    //3> (b,2)
    //8> (a,3)
    //8> (a,4)
    //3> (b,2)
    //8> (a,5)

  }

  /**
    * 通过key选择器指定分区key
    * @return
    */
  @Test
  def test4 : Unit={
      val persons = senv.fromElements(Person("chenjj",18),Person("chenjj",20),Person("chenjj",17),Person("chenjj1",19))
      val keyed = persons.keyBy(new KeySelector[Person,String] {
        override def getKey(value: Person): String = {
          return  value.name
        }
      }).sum(1)

    keyed.print()
    senv.execute("test")

    //输出
    //12> Person(chenjj,18)
    //12> Person(chenjj,38)
    //  5> Person(chenjj1,19)
    //12> Person(chenjj,55)
  }
}

case class Person(name:String,age:Int)
