package com.kingjw.flinkAPI

import java.sql.Timestamp

import org.apache.flink.streaming.api.scala._
import org.apache.flink.table.api.{DataTypes, EnvironmentSettings, Table}
import org.apache.flink.table.api.scala._
import org.apache.flink.table.descriptors.{Csv, FileSystem, Kafka, OldCsv, Schema}

/**
 *
 * @Package: com.kingjw.flinkAPI
 * @ClassName: TableSource
 * @Author: 王坚伟
 * @CreateTime: 2022/1/22 14:50
 * @Description:
 */
object TableSource {
  def main(args: Array[String]): Unit = {
    val env = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)
    val settings = EnvironmentSettings.newInstance()
      .useBlinkPlanner()
      .inStreamingMode().build()
    val tableEnv: StreamTableEnvironment = StreamTableEnvironment.create(env,settings)
    tableEnv.connect(new FileSystem().path("input/sensor.txt"))
      .withFormat(new Csv())
      .withSchema(new Schema().field("id",DataTypes.STRING())
        .field("timestamp",DataTypes.BIGINT())
        .field("temperature",DataTypes.DOUBLE())
        .field("pt",DataTypes.TIMESTAMP(3)).proctime()
      ).createTemporaryTable("inputTable")

//    val sinkDDL:String =
//      """
//        |create table inputTable(
//        |id varchar(20) not null,
//        |ts bigint,
//        |temperature double,
//        |pt as proctime()
//        |) with (
//        |'connector.type' = 'filesystem',
//        |'connector.path' = 'file:///E:\\bigdata\\bigdata\\FlinkTutorial\\input\\sensor.txt',
//        |'format.type' = 'csv'
//        |)
//        |""".stripMargin
//
//    tableEnv.sqlUpdate(sinkDDL)

//    tableEnv.connect(new Kafka()
//      .version("0.11")
//      .topic("sensor")
//      .property("zookeeper.connect", "hadoop117:2181,hadoop118:2181,hadoop119:2181")
//      .property("bootstrap.servers", "hadoop117:9092")
//    ).withFormat(new Csv())
//      .withSchema(new Schema().field("id",DataTypes.STRING())
//            .field("timestamp",DataTypes.BIGINT())
//            .field("temperature",DataTypes.DOUBLE())).createTemporaryTable("inputTable")
    val table: Table = tableEnv.from("inputTable")
//    val resultTable: Table = table.groupBy('id).select('id,'id.count as 'count)
//    val resultSqlTable: Table = tableEnv.sqlQuery("select id, count(id) as cnt from inputTable group by id")

    table.toRetractStream[(String,Long,Double,Timestamp)].print("source test")
//    resultSqlTable.toRetractStream[(String,Long)].print("sql test")
    env.execute("table api test")
  }

}
