package flink_p2_sql

import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment, _}
import org.apache.flink.table.api.Table
import org.apache.flink.table.api.scala.StreamTableEnvironment
import org.apache.flink.types.Row


object tableApi03_tableApi_operator {



  case class Person(id:Int, name:String, age:Int)

  /**
   * @param args
   */
  def main(args: Array[String]): Unit = {



    val streamEnv: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    val tableEnv: StreamTableEnvironment = StreamTableEnvironment.create(streamEnv)


    val socketStream: DataStream[String] = streamEnv.socketTextStream("127.0.0.1", 8889)

    val dStream: DataStream[Person] = socketStream.map(data => {
      val arr: Array[String] = data.split(" ")
      new Person(arr(0).toInt, arr(1), arr(2).toInt)
    })



    import org.apache.flink.table.api.scala._      //需要导入导入隐式转换
    val t_user: Table = tableEnv.fromDataStream(dStream, 'id, 'name, 'age)


    /**
     * test1: 按id 聚合取count，返回结果中有一个Boolean
     *
     */

    // Boolean: 新来一条数据, 仅是新增一种状态则为true,
    //          新来一条数据, 原有数据的状态发生改变，则会将原来数据也输出一份并标记为false
//    val resStream: DataStream[(Boolean, Row)] = t_user.groupBy('id)
//      .select('id, 'id.count)
//      .toRetractStream[Row]
//      .filter(_._1 == true)               //仅数据为true的数据
//    resStream.print()


    /**
     * test2: 使用where过滤数据
     */


    t_user.select('id, 'name, 'age)
//      .where('name.equals("aa"))
      .where('name === "aa")      //where过滤时 字符串匹配需要使用 === 而不是
      .toRetractStream[Row]
      .print()


    tableEnv.execute("test")
  }
}
