package com.shujia.flink.sql

import org.apache.flink.streaming.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala._
import org.apache.flink.types.Row

object Demo1DSL {
  def main(args: Array[String]): Unit = {

    /**
     * 创建flink sql执行环境
     *
     */

    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
    val tEnv: StreamTableEnvironment = StreamTableEnvironment.create(env)


    /**
     * 1、读取socket构建一个流
     */

    val linesDS: DataStream[String] = env.socketTextStream("master", 8888)

    /**
     * 2、在流上定义表
     *
     * Table: 相当于spark中的DataFrame，可以使用DSL api
     */
    val studentDS: DataStream[Student] = linesDS.map(line => {
      val split: Array[String] = line.split(",")
      Student(split(0), split(1), split(2).toInt, split(3), split(4))
    })

    val studentTable: Table = tEnv.fromDataStream(studentDS)

    //打印表结构
    studentTable.printSchema()

    /**
     * 3、基于动态表做连续查询  -- 返回一个新的动态表
     *
     * 这个sql产生的结果表是一个更新更改的流
     *
     */
    val clazzNumTable: Table = studentTable
      .groupBy($"clazz")
      .select($"clazz", $"clazz".count().as("num"))

    /**
     * 4、将结果表转换成流打印出来
     *
     * 结果表转换成刘娥的两种情况
     * 1、仅插入的流
     * 2、更新更改的流
     *
     */
    //1、仅插入的流
    //val stream: DataStream[Row] = clazzNumTable.toDataStream

    //2、更新更改的流
    val changelogStream: DataStream[Row] = clazzNumTable.toChangelogStream

    changelogStream.print()

    env.execute()
  }

  case class Student(id: String, name: String, age: Int, sex: String, clazz: String)

}
