package data.connector

import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.table.descriptors.{Csv, Elasticsearch, FileSystem, Json, Kafka, Schema}
import org.apache.flink.types.Row

// 开发时要做的第一件事情
import org.apache.flink.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala._

object ESConnector {
  def main(args: Array[String]): Unit = {
    val env = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)
    val tableEnvironment = StreamTableEnvironment.create(env)

    val path = "data/access.log"

    tableEnvironment.connect(new FileSystem().path(path))
      .withFormat(new Csv())
      .withSchema(new Schema()
        .field("timestamp", DataTypes.BIGINT())
        .field("domain", DataTypes.STRING())
        .field("traffic", DataTypes.FLOAT())
      ).createTemporaryTable("access_ods")



    val resultTable = tableEnvironment.from("access_ods")
      .groupBy('domain)
          .aggregate('traffic.sum().as("traffics"))
          .select('domain, 'traffics)

    resultTable.toRetractStream[Row].print("...")



    tableEnvironment.connect(new Elasticsearch()
        .version("7")
      .host("hadoop000", 9200, "http")
      .index("bsdata_access_elasticsearch")
        .documentType("_doc")
    )
      .inUpsertMode()
      .withFormat(new Json())
      .withSchema(new Schema()
        .field("domain", DataTypes.STRING())
        .field("traffics", DataTypes.FLOAT())
      ).createTemporaryTable("esoutput")

    resultTable.executeInsert("esoutput")

    env.execute("xx")
  }
}
