package com.ydl.learning.flink.demo.table

import com.alibaba.fastjson.JSON
import com.ydl.learning.flink.demo.TableUtils
import idata.common.model.middleground.FeedMessage
import java.time.Duration
import org.apache.flink.api.common.state.MapStateDescriptor
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.scala.typeutils.Types
import org.apache.flink.streaming.api.CheckpointingMode
import org.apache.flink.streaming.api.environment.ExecutionCheckpointingOptions
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction
import org.apache.flink.streaming.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.types.Row
import org.apache.flink.util.Collector
import scala.collection.JavaConversions._


/**
 * Table API 或者 SQL 查询在下列情况下会被翻译：
 *
 * 当 TableEnvironment.executeSql() 被调用时。该方法是用来执行一个 SQL 语句，一旦该方法被调用， SQL 语句立即被翻译。
 * 当 Table.executeInsert() 被调用时。该方法是用来将一个表的内容插入到目标表中，一旦该方法被调用， TABLE API 程序立即被翻译。
 * 当 Table.execute() 被调用时。该方法是用来将一个表的内容收集到本地，一旦该方法被调用， TABLE API 程序立即被翻译。
 *
 * 当 StatementSet.execute() 被调用时。Table （通过 StatementSet.addInsert() 输出给某个 Sink）和
 * INSERT 语句 （通过调用 StatementSet.addInsertSql()）会先被缓存到 StatementSet 中，StatementSet.execute() 方法被调用时，
 * 所有的 sink 会被优化成一张有向无环图。
 *
 * 当 Table 被转换成 DataStream 时（参阅与 DataStream 和 DataSet API 结合）。
 * 转换完成后，它就成为一个普通的 DataStream 程序，并会在调用 StreamExecutionEnvironment.execute() 时被执行。
 *
 * @author ydl
 * @since 2020/12/8
 */
object TableDemo extends TableUtils {

  case class WordWithCount(word: String, count: Long)

  private val info: (StreamExecutionEnvironment, DataStream[String]) = getSocketTextStream

  /**
   * 第一个table例子
   */
  def firstTableDemo(): Unit = {
    implicit val typeInformation: TypeInformation[String] = TypeInformation.of(classOf[String])
    implicit val typeInformation2: TypeInformation[WordWithCount] = TypeInformation.of(classOf[WordWithCount])
    val stream: DataStream[WordWithCount] = info._2.flatMap(_.split(" ")).map(WordWithCount(_, 1))
    tableEnv.createTemporaryView("myTable", stream)
    //对于流模式，TableResult.collect() 方法或者 TableResult.print 方法保证端到端精确一次的数据交付。
    // 这就要求开启 checkpointing。默认情况下 checkpointing 是禁止的，
    // 我们可以通过 TableConfig 设置 checkpointing 相关属性（请参考 checkpointing 配置）来开启 checkpointing。
    // 因此一条结果数据只有在其对应的 checkpointing 完成后才能在客户端被访问。
    //注意： 对于流模式，当前仅支持追加模式的查询语句，并且应该开启 checkpoint。因为一条结果只有在其对应的 checkpoint 完成之后才能被客户端访问到。
    tableEnv.getConfig.getConfiguration.set(ExecutionCheckpointingOptions.CHECKPOINTING_MODE, CheckpointingMode.EXACTLY_ONCE)
    tableEnv.getConfig.getConfiguration.set(
      ExecutionCheckpointingOptions.CHECKPOINTING_INTERVAL, Duration.ofSeconds(10))
    //bsTableEnv.execute("table demo")
    val table: Table = tableEnv.sqlQuery("select * from myTable ")
    table.printSchema()
    table.execute().print()

    tableEnv.executeSql("select * from myTable").print()
  }

  def crarteView(): Unit = {
    // get a TableEnvironment
    // table is the result of a simple projection query
    val projTable: Table = tableEnv.from("X").select("a")
    // register the Table projTable as table "projectedTable"
    tableEnv.createTemporaryView("projectedTable", projTable)
  }

  def updateTable(): Unit = {
    // create a DataStream
    val dataStream = streamEnv.fromElements(
      Row.of("Alice", Int.box(12)),
      Row.of("Bob", Int.box(10)),
      Row.of("Bob", Int.box(20)),
      Row.of("Alice", Int.box(100))
    )(Types.ROW(Types.STRING, Types.INT))

    // interpret the insert-only DataStream as a Table
    val inputTable = tableEnv.fromDataStream(dataStream).as("name", "score")
    // register the Table object as a view and query it
    // the query contains an aggregation that produces updates
    tableEnv.createTemporaryView("InputTable", inputTable)
    val resultTable = tableEnv.sqlQuery("SELECT name, SUM(score) FROM InputTable GROUP BY name")

    // interpret the updating Table as a changelog DataStream
    val resultStream = tableEnv.toChangelogStream(resultTable)

    // add a printing sink and execute in DataStream API
    resultStream.print()
    streamEnv.execute()
  }

  def streamAndTableConvert(): Unit = {
    implicit val typeInfo: TypeInformation[String] = TypeInformation.of(classOf[String])
    // create a DataStream
    val dataStream = streamEnv.fromElements("Alice", "Bob", "John")
    // interpret the insert-only DataStream as a Table
    val inputTable = tableEnv.fromDataStream(dataStream)
    // register the Table object as a view and query it
    tableEnv.createTemporaryView("InputTable", inputTable)
    val resultTable = tableEnv.sqlQuery("SELECT UPPER(f0) FROM InputTable")
    resultTable.execute().print()
    // interpret the insert-only Table as a DataStream again
    val resultStream = tableEnv.toDataStream(resultTable).map("!!" + _.getField(0).toString + "!!")

    // add a printing sink and execute in DataStream API
    resultStream.print()
    streamEnv.execute()
  }

  def tableApiDemo(): Unit = {
    // execute with explicit sink
    tableEnv.from("InputTable").executeInsert("OutputTable")

    tableEnv.executeSql("INSERT INTO OutputTable SELECT * FROM InputTable")

    tableEnv.createStatementSet()
      .addInsert("OutputTable", tableEnv.from("InputTable"))
      .addInsert("OutputTable2", tableEnv.from("InputTable"))
      .execute()

    tableEnv.createStatementSet()
      .addInsertSql("INSERT INTO OutputTable SELECT * FROM InputTable")
      .addInsertSql("INSERT INTO OutputTable2 SELECT * FROM InputTable")
      .execute()

    // execute with implicit local sink

    tableEnv.from("InputTable").execute().print()

    tableEnv.executeSql("SELECT * FROM InputTable").print()
  }

  /**
   * table直连kafka
   * 问题：创建了多个kafka消费者，而不是同一个消费者收到的数据处理多次
   */
  def kafkaDirectLinkDemo(): Unit = {
    tableEnv.executeSql(
      """
        |CREATE TABLE KafkaTable (
        |  `traceId` STRING,
        |  `title` STRING,
        |  `createTime` BIGINT,
        |  `extra` ROW(
        |  mediaDownloaded INT,
        |  downloadFail INT
        |  ),
        |  sourceId BIGINT,
        |  `ts` TIMESTAMP(3) METADATA FROM 'timestamp'
        |) WITH (
        |  'connector' = 'kafka',
        |  'topic' = 'test',
        |  'properties.bootstrap.servers' = '10.50.168.216:9092,10.50.168.148:9092,10.50.168.242:9092',
        |  'properties.group.id' = 'testGroup',
        |  'scan.startup.mode' = 'latest-offset',
        |  'format' = 'json'
        |)
      """.stripMargin)
    var list = List[DataStream[String]]()
    for (i <- 0 to 0) {
      println(i)
      list = list :+
        tableEnv.toDataStream(
          tableEnv
            .sqlQuery("select extra.mediaDownloaded,extra.downloadFail,title,sourceId from KafkaTable where sourceId = " + i)
        )
          .map(_.toString)
          .name(i + "toMap")
    }
    for (stream <- list) {
      stream.print()
    }
    streamEnv.execute("json")
  }

  /**
   * 先连kafka取数据，然后dataStream转table
   * 缺点：启动后过滤sql不可变
   */
  def kafkaStream2TableDemo(): Unit = {
    val dataStream: DataStream[FeedMessage] = streamEnv.addSource(consumer).setParallelism(2)
      .map(str => JSON.toJavaObject(JSON.parseObject(str), classOf[FeedMessage]))

    val table = tableEnv.fromDataStream(dataStream)
    table.printSchema()
    tableEnv.createTemporaryView("KafkaTable", table)
    var list = List[DataStream[String]]()
    for (i <- 0 to 1) {
      println(i)
      list = list :+
        tableEnv.toDataStream(
          tableEnv
            .sqlQuery("select title,sourceId from KafkaTable where sourceId = '%d'".format(i))
        )
          .map(_.toString)
          .name(i + "toMap")
    }
    for (stream <- list) {
      stream.print()
    }
    streamEnv.execute("json")

  }

  def tmp(): Unit = {
    val dataStream: DataStream[FeedMessage] = streamEnv.addSource(consumer).setParallelism(2)
      .map(str => JSON.toJavaObject(JSON.parseObject(str), classOf[FeedMessage]))
      .name("convert")

    val channelStream: DataStream[String] = streamEnv.fromElements("华为", "vivo", "小米")
    val channelDesc: MapStateDescriptor[String, String] = new MapStateDescriptor[String, String](
      "jobBroadCast",
      Types.STRING,
      Types.STRING
    )
    val channelBroadcast = channelStream.broadcast(channelDesc)
    dataStream.connect(channelBroadcast).process(new BroadcastProcessFunction[FeedMessage, String, (FeedMessage, String)] {
      val stateDescriptor_ : MapStateDescriptor[String, String] = channelDesc

      override def processElement(value: FeedMessage,
                                  ctx: BroadcastProcessFunction[FeedMessage, String, (FeedMessage, String)]#ReadOnlyContext,
                                  out: Collector[(FeedMessage, String)]): Unit = {
        for (entity <- ctx.getBroadcastState(stateDescriptor_).immutableEntries()) {
          println(entity.getKey, entity.getValue)
          out.collect((value, entity.getValue))
        }
      }

      override def processBroadcastElement(value: String,
                                           ctx: BroadcastProcessFunction[FeedMessage, String, (FeedMessage, String)]#Context,
                                           out: Collector[(FeedMessage, String)]): Unit = {
        println("接收广播规则%s".format(value))
        ctx.getBroadcastState(stateDescriptor_).put(value, "%s过滤规则".format(value))
      }
    })
      .name("broadcast")
      //.setParallelism(3)
      .map(data => {
        println(data._2)
        Thread.sleep(10000)
        data._1.getTitle + data._2
      })
      .name("match")
      .setParallelism(3)
      .print()
    streamEnv.execute("test")
  }

  def main(args: Array[String]): Unit = {
    //firstTableDemo()
    //updateTable()
    //streamAndTableConvert()
    //kafkaDirectLinkDemo()
    //kafkaStream2TableDemo()
    tmp()
  }
}
