package com.shujia.flink.sql

import org.apache.flink.streaming.api.scala._
import org.apache.flink.table.api.{DataTypes, EnvironmentSettings, Schema, Table}
import org.apache.flink.table.api.bridge.scala.StreamTableEnvironment
import org.apache.flink.types.Row

object Demo1DynamicTable {
  def main(args: Array[String]): Unit = {

    //创建flink环境
    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    //创建flink sql环境
    val tEnv: StreamTableEnvironment = StreamTableEnvironment.create(env)

    /**
     * 1、创建一个liu
     */
    val linesDS: DataStream[String] = env.socketTextStream("master", 8888)

    //整理数据
    val clicksDS: DataStream[Clicks] = linesDS.map(line => {
      val split: Array[String] = line.split(",")
      val userId: String = split(0)
      val cTime: String = split(1)
      val url: String = split(2)
      Clicks(userId, cTime, url)
    })


    /**
     * 2、在流上定义一个表
     *
     */
    tEnv.createTemporaryView("clicks", clicksDS)


    /**
     * 3、在动态表上计算连续查询
     *
     * Table: flink中写dsl api对象， 类似spark中的dataFrame
     */
    val resultTable: Table = tEnv.sqlQuery(
      """
        |select
        |user,count(url) as cnt
        |from clicks
        |group by user
        |
        |""".stripMargin)


    /**
     * 4、将连续查询返回的动态表转换成流
     * 将动态表转换成流有两种方式
     * 1、追加的流
     * 2、更新的流
     */
    val resultDS: DataStream[Row] = tEnv.toChangelogStream(resultTable)

    resultDS.print()

    env.execute()


  }

  case class Clicks(user: String, cTime: String, url: String)

}
