package data.connector

import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.table.api.Table
import org.apache.flink.table.descriptors.{Csv, FileSystem, OldCsv, Schema}
import org.apache.flink.types.Row

// 开发时要做的第一件事情
import org.apache.flink.api.scala._
import org.apache.flink.table.api.bridge.scala._
import org.apache.flink.table.api._

object FileSystemConnector {
  def main(args: Array[String]): Unit = {
    val env = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)
    val tableEnvironment = StreamTableEnvironment.create(env)

    val path = "data/access.log"

    tableEnvironment.connect(new FileSystem().path(path))
        .withFormat(new Csv())
        .withSchema(new Schema()
            .field("timestamp", DataTypes.BIGINT())
            .field("domain", DataTypes.STRING())
            .field("traffic", DataTypes.FLOAT())
        ).createTemporaryTable("access_ods")  // 注册表

    val resultTable = tableEnvironment.from("access_ods")
      .select('domain, 'traffic)
      .filter('domain === "bsdata.com")
      //.toAppendStream[Row]//.print("API...")

    //val resultTable = tableEnvironment.from("access_ods")
    //  .groupBy('domain)
    //  .aggregate('traffic.sum().as("traffics"))
    //  .select('domain, 'traffics)

//    tableEnvironment.sqlQuery(
//      """
//        |select domain, traffic
//        |from
//        |access_ods
//        |where domain='ruozedata.com'
//        |""".stripMargin)
//      .toAppendStream[Row].print("SQL...")

    tableEnvironment.connect(new FileSystem().path("out2"))
      .withFormat(new Csv())
      .withSchema(new Schema()
        .field("domain", DataTypes.STRING())
        .field("traffics", DataTypes.FLOAT())
      ).createTemporaryTable("fileoutput")

    resultTable.executeInsert("fileoutput")

    //env.execute(getClass.getCanonicalName)
  }
}
