package com.chenjj.bigdata.flink.book

import java.util.Properties

import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.api.scala.{DataSet, ExecutionEnvironment, _}
import org.apache.flink.core.fs.FileSystem.WriteMode
import org.apache.flink.streaming.api.functions.co.CoMapFunction
import org.apache.flink.streaming.api.scala.{ConnectedStreams, DataStream, StreamExecutionEnvironment}
import org.apache.flink.streaming.connectors.kafka.{FlinkKafkaConsumer, FlinkKafkaProducer}
import org.apache.flink.streaming.util.serialization.KeyedDeserializationSchema
import org.junit.{Assert, Test}


/**
  * 第4.1章：DataStream编程模型
  */
@Test
class FlinkTesterChapter4_1 extends  Assert{
  val env = StreamExecutionEnvironment.getExecutionEnvironment

  /**
    * 测试FlinkDataSource -- Kafka Event Datasource ,将接入的数据转换程String字符串类型处理
    */
  @Test
  def test1: Unit ={
    val properties = new Properties()
    properties.setProperty("bootstrap.servers","192.168.152.134:9092")
    //properties.setProperty("zookeeper.connect","192.168.152.134:2181")
    properties.setProperty("group.id","testGRP")

    val consumer = new FlinkKafkaConsumer[String]("test",new SimpleStringSchema(),properties)
    consumer.setStartFromGroupOffsets()//设置消费的策略

    val input = env.addSource(consumer)

    input.print()

    env.execute("testData")
  }

  /**
    * 测试Map操作
    */
  @Test
  def test2: Unit = {
    val dataStream = env.fromElements(("a",1),("b",2),("c",3),("d",4))
    dataStream.map(t=>{ (t._1,t._2+1)}).print()
    env.execute()

    //输出
    //10> (c,4)
    //8> (a,2)
    //9> (b,3)
    //11> (d,5)
  }

  /**
    * 测试Reduce操作
    */
  @Test
  def test3:Unit={
    val dataStream = env.fromElements(("a",1),("a",2),("a",3),("a",4),("a",5),("b",3),("b",4))
    val keyStream = dataStream.keyBy(0)
    val reduceStream = keyStream.reduce((t1,t2)=>{
       println("t1="+t1+",t2="+t2)
      (t1._1,t1._2+t2._2)
    }) //对每个key相同的数据进行求和

    reduceStream.print()
    env.execute()
    //输出
    //t1=(a,1),t2=(a,2)
    //t1=(b,3),t2=(b,4)
    //t1=(a,3),t2=(a,3)
    //t1=(a,6),t2=(a,4)
    //t1=(a,10),t2=(a,5)

    //8> (a,1)
    //3> (b,3)
    //8> (a,3)
    //3> (b,7)
    //8> (a,6)
    //8> (a,10)
    //8> (a,15)
  }

  /**
    * 测试Aggregations
    */
  @Test
  def test4:Unit={
    val dataStream = env.fromElements(("a",1),("a",2),("b",3),("b",4))
    val keyStream = dataStream.keyBy(0)
    val aggregationStream = keyStream.sum(1) //对每个key相同的数据进行求和，和test3的步骤一样简化了reduce的操作

    aggregationStream.print()
    env.execute()
    //输出
    //3> (b,7)
    //8> (a,3)
  }


  /**
    * 测试union
    */
  @Test
  def test5:Unit = {
    val dataStream1 = env.fromElements(("a",1),("a",2),("b",3),("b",4))
    val dataStream2 = env.fromElements(("a1",1),("a1",2),("b1",3),("b1",4))
    val dataStream3 = env.fromElements(("a2",1),("a2",2),("b2",3),("b2",4))

    val unionStream = dataStream1.union(dataStream2)
    val allUnionStream = unionStream.union(dataStream3)

    allUnionStream.print()
    env.execute()

    //输出
    //11> (a2,2)
    //3> (b,4)
    //4> (b1,4)
    //2> (a1,2)
    //1> (a1,1)
    //10> (a2,1)
    //12> (a,1)
    //2> (b,3)
    //12> (b2,3)
    //1> (a,2)
    //1> (b2,4)
    //3> (b1,3)
  }

  /**
    * 测试Connect：Connnect主要是合并两种或者多种不同数据类型的数据集
    */
  @Test
  def test6:Unit={
    val dataStream1 = env.fromElements(("a",1),("a",2),("b",3),("b",4))
    val dataStream2 = env.fromElements(1,2,3,4)
    val connectedStreams = dataStream1.connect(dataStream2)

    val resultString = connectedStreams.map(new CoMapFunction[(String,Int),Int,(Int,String)] {
      override def map1(value: (String, Int)): (Int, String) = {
        (value._2,value._1)
      }

      override def map2(value: Int): (Int, String) = {
        (value,"default")
      }
    })

    resultString.print()

    env.execute()

    //输出
    //9> (2,default)
    //4> (3,b)
    //10> (3,default)
    //3> (2,a)
    //5> (4,b)
    //11> (4,default)
    //8> (1,default)
    //2> (1,a)
  }

  /**
    *测试Connect+keyBy：产生关联性比较强的结果数据集，也是分布式join算子的基本实现方式
    */
  @Test
  def test7:Unit={
    val dataStream1 = env.fromElements(("a",1),("a",2),("b",3),("b",4))
    val dataStream2 = env.fromElements(1,2,3,4)
    //通过keyby的函数，根据指定的key连接2个数据集
    val connectedStreams = dataStream1.connect(dataStream2).keyBy(x=>x._2,x=>x)

    val resultString = connectedStreams.map(new CoMapFunction[(String,Int),Int,(Int,String)] {
      override def map1(value: (String, Int)): (Int, String) = {
        (value._2,value._1)
      }

      override def map2(value: Int): (Int, String) = {
        (value,"default")
      }
    })

    resultString.print()

    env.execute()

    //输出，注意，相同的key都在同一个算子里
    //12> (2,a)
    //9> (1,a)
    //11> (3,b)
    //1> (4,b)
    //9> (1,default)
    //12> (2,default)
    //1> (4,default)
    //11> (3,default)
  }


  /**
    *测试Connect+broadcast：产生关联性比较强的结果数据集，也是分布式join算子的基本实现方式
    */
  @Test
  def test8:Unit={
    val dataStream1 = env.fromElements(("a",1),("a",2),("b",3),("b",4))
    val dataStream2 = env.fromElements(1,2,3,4)
    //通过broadcast关联两个数据集，Broadcast广播变量会在执行计算逻辑之前将dataStream2数据集广播到所有并行计算的算子中
    val connectedStreams = dataStream1.connect(dataStream2.broadcast)

    val resultString = connectedStreams.map(new CoMapFunction[(String,Int),Int,(Int,String)] {
      override def map1(value: (String, Int)): (Int, String) = {
        (value._2,value._1)
      }

      override def map2(value: Int): (Int, String) = {
        (value,"default")
      }
    })

    resultString.print()

    env.execute()

    //输出，注意，每个算子里都有dataStream2的数据
    //8> (1,default)
    //8> (2,default)
    //11> (1,default)
    //11> (2,default)
    //2> (2,a)
    //10> (1,default)
    //10> (2,default)
    //1> (1,default)
    //1> (2,default)
    //1> (3,default)
    //3> (1,default)
    //3> (2,default)
    //3> (3,default)
    //3> (4,default)
    //4> (4,b)
    //4> (1,default)
    //6> (1,default)
    //7> (1,default)
    //7> (2,default)
    //7> (3,default)
    //7> (4,default)
    //5> (1,default)
    //6> (2,default)
    //4> (2,default)
    //4> (3,default)
    //4> (4,default)
    //1> (4,default)
    //10> (3,default)
    //2> (1,default)
    //2> (2,default)
    //2> (3,default)
    //11> (3,default)
    //8> (3,default)
    //12> (1,default)
    //12> (2,default)
    //12> (3,default)
    //12> (4,default)
    //8> (4,default)
    //11> (4,default)
    //2> (4,default)
    //10> (4,default)
    //1> (1,a)
    //3> (3,b)
    //6> (3,default)
    //5> (2,default)
    //5> (3,default)
    //6> (4,default)
    //5> (4,default)
    //9> (1,default)
    //9> (2,default)
    //9> (3,default)
    //9> (4,default)
  }


  /**
    * 测试Split
    */
  @Test
  def test9:Unit={
    val dataStream = env.fromElements(("a",1),("a",2),("b",3),("b",4))
//    //奇数 偶数区分
//    val splitStream = dataStream.split(x=>{if(x._2%2 == 0) Seq("even") else Seq("odd")})
//
//    //打印偶数
//    splitStream.select("even").print()

    env.execute()

    //输出
    //12> (b,4)
    //11> (a,2)
  }

  /**
    * 测试Iterate
    */
  @Test
  def test10:Unit={
    val dataStream = env.fromElements(1,2,3,1,5).map(x=>x)
    val iterated = dataStream.iterate((input:ConnectedStreams[Int,String])=>{

      //map中的第一个func接收dataStream的数据，第二个func接收需要迭代的数据
      val head = input.map(i=>{println("i="+i) ; (i+1).toString}, s=>{println("s="+s);(Integer.parseInt(s)+1).toString})
      //元组中的第一个DataStream继续迭代，第二个DataStream直接输出
      (head.filter(_=="2"),head.filter(_!="2"))

    },5000)
    iterated.print()

    env.execute()

    //输出
    //i=5
    //i=1
    //i=2
    //i=3
    //i=1
    //s=2
    //s=2

    //12> 6
    //10> 4
    //9> 3
    //11> 3
    //8> 3
  }

  /**
    * 测试物理分区
    */
  @Test
  def test11:Unit={
    val dataStream = env.fromElements(1,2,3,1,5)
    dataStream.print()
    dataStream.shuffle
    dataStream.print()

    env.execute()
  }

  /**
    * 测试基本数据输出
    */
  @Test
  def test12:Unit={
    val dataStream = env.fromElements(("a",1),("a",2),("b",3),("b",4))
    dataStream.writeAsCsv("D://tmp//test.csv",WriteMode.OVERWRITE)
    dataStream.writeAsText("D://tmp//test.txt",WriteMode.OVERWRITE)
    env.execute()
  }

  /**
    * 测试第三方数据输出：Kafka
    */
  @Test
  def test13:Unit={

    val dataStream = env.fromElements("a1","b2","c3","d4")

    val properties = new Properties()
    properties.setProperty("bootstrap.servers","192.168.152.134:9092")
    properties.setProperty("group.id","testGRP")

    val producer  = new FlinkKafkaProducer[String]("test",new SimpleStringSchema(),properties)

    dataStream.addSink(producer)

    env.execute("testData")
  }
}
