package APITest

import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment}
import org.apache.flink.table.api.DataTypes
import org.apache.flink.table.api.scala.{StreamTableEnvironment, UnresolvedFieldExpression}
import org.apache.flink.table.descriptors.{Csv, Elasticsearch, FileSystem, Json, Kafka, OldCsv, Rowtime, Schema}
import org.apache.flink.types.Row

object APITest01 {
  def main(args: Array[String]): Unit = {

    val env = StreamExecutionEnvironment.getExecutionEnvironment
    val stream: DataStream[String] = env.readTextFile("D:\\codes\\IDEA_prjects\\FlinkDemo\\data\\sensor.txt")
    val tempDS = stream.map(
      data => {
        val dataArr = data.split(",")
        TemperatureData(dataArr(0), dataArr(1).toLong, dataArr(2).toDouble)
      }
    )

    //todo 创建环境
    val tableEnvironment = StreamTableEnvironment.create(env)
    //todo catalog中注册表
    //1.链接到文件系统 Csv
    tableEnvironment
      .connect(new FileSystem().path("sensor.txt"))
      .withFormat(new Csv())
      .withSchema(new Schema()
        .field("id", DataTypes.STRING())
        .field("timestamp", DataTypes.BIGINT())
        .field("temperature", DataTypes.DOUBLE())
        .field("pt", DataTypes.TIMESTAMP(3))
        .proctime() // 指定 pt 字段为处理时间
//        .rowtime(
//          new Rowtime()
//            .timestampsFromField("timestamp")// 从字段中提取时间戳
//            .watermarksPeriodicBounded(1000)// watermark 延迟 1 秒
//        )
      )
      .createTemporaryTable("inputTableFromFileSystem")

    //2.连接到 Kafka
    tableEnvironment
      .connect(
      new Kafka()
        .version("0.11")
        .topic("sensor")
        .property("zookeeper.connect", "localhost:2181")
        .property("bootstrap.servers", "localhost:9092")
      )
      .withFormat(new Csv)
      .withSchema(new Schema()
        .field("id", DataTypes.STRING())
        .field("timestamp", DataTypes.BIGINT())
        .field("temperature", DataTypes.DOUBLE())
      )
      .createTemporaryTable("inputTableFromKafka")

    //todo API调用
    val table = tableEnvironment.from("inputTableFromFileSystem")
    val resultTable = table
      .select("id, temperature")
      .filter("id = 'seneor_1'")
    //.groupBy('id)
    //.select('id, 'id.count as 'c

    //todo sql查询
    val resyltTable1 = tableEnvironment.sqlQuery(
      """
        |select id, temperature
        |from inputTableFromFileSystem
        |where id = 'sensor_1'
        |""".stripMargin)

    //todo DataStream => 表
    val tableFromDS = tableEnvironment.fromDataStream(tempDS, 'temp.proctime)
    //    tableEnvironment.fromDataStream(tempDS, 'id, 'timestamp as 'ts)
    // 将 DataStream 转换为 Table，并指定时间字段
    //    val sensorTable = tableEnvironment.fromDataStream(dataStream, 'id, 'timestamp.rowtime, 'temperature)
    // 或者，直接追加字段
    //    val sensorTable2 = tableEnvironment.fromDataStream(dataStream, 'id, 'temperature, 'timestamp, 'rt.rowtime)
    //基于 DS 创建临时表
    tableEnvironment.createTemporaryView("tempView", tableFromDS)
    //基于 table 创建临时表
    tableEnvironment.createTemporaryView("tempView1", resultTable)

    //todo 输出
    //1. FileSystem
    tableEnvironment
      .connect(
        new FileSystem().path("D:\\codes\\IDEA_prjects\\FlinkDemo\\data\\sensorout.txt")
    )
      .withFormat(new Csv)
      .withSchema(new Schema()
        .field("id", DataTypes.STRING())
        .field("temp", DataTypes.DOUBLE())
      )
      .createTemporaryTable("outTable")
    resultTable.insertInto("outTable")
    //2. kafka
    tableEnvironment.connect(
      new Kafka()
        .version("0.11")
        .topic("sinkTest")
        .property("zookeeper.connect", "localhost:2181")
        .property("bootstrap.servers", "localhost:9092")
    )
      .withFormat( new Csv() )
      .withSchema( new Schema()
        .field("id", DataTypes.STRING())
        .field("temp", DataTypes.DOUBLE())
      )
      .createTemporaryTable("kafkaOutputTable")
    resultTable.insertInto("kafkaOutputTable")
    //3. es
    tableEnvironment.connect(
      new Elasticsearch()
        .version("6")
        .host("localhost", 9200, "http")
        .index("sensor")
        .documentType("temp")
    )
      .inUpsertMode() // 指定是 Upsert 模式
      .withFormat(new Json())
      .withSchema( new Schema()
        .field("id", DataTypes.STRING())
        .field("count", DataTypes.BIGINT())
      )
      .createTemporaryTable("esOutputTable")
    resultTable.insertInto("esOutputTable")
    //4. mysql
    val sinkDDL: String =
      """
        |create table jdbcOutputTable (
        | id varchar(20) not null,
        | cnt bigint not null
        |) with (
        | 'connector.type' = 'jdbc',
        | 'connector.url' = 'jdbc:mysql://localhost:3306/test',
        | 'connector.table' = 'sensor_count',
        | 'connector.driver' = 'com.mysql.jdbc.Driver',
        | 'connector.username' = 'root',
        | 'connector.password' = '123456'
        |)
        """.stripMargin
    tableEnvironment.sqlUpdate(sinkDDL)
    resultTable.insertInto("jdbcOutputTable")
    //5. DS
    //没有经过 groupby 之类聚合操作，可以直接用 toAppendStream 来转换；而如果
    //经过了聚合，有更新操作，一般就必须用 toRetractDstream。
    val resultStream: DataStream[Row] = tableEnvironment.toAppendStream[Row](resultTable)
    val resultStream1: DataStream[(Boolean, (String, Long))] = tableEnvironment.toRetractStream[(String, Long)](resultTable)
    resultStream.print()
    resultStream1.print()


  }
  case class TemperatureData(id: String, time: Long, temp: Double)

}
