package com.feiwei

import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.table.api.DataTypes
import org.apache.flink.table.descriptors.{Csv, FileSystem, Schema}
import org.apache.flink.streaming.api.scala.{StreamExecutionEnvironment, _}
import org.apache.flink.table.api.scala.{StreamTableEnvironment, _}
import org.apache.flink.table.api.{DataTypes, Table}
import org.apache.flink.table.descriptors.{Csv, FileSystem, Kafka, Schema}
import org.apache.flink.table.expressions._
import org.apache.flink.table.api.scala._

import org.apache.flink.streaming.api.scala.{StreamExecutionEnvironment, _}
import org.apache.flink.table.api.scala.{StreamTableEnvironment, _}
import org.apache.flink.table.api.{DataTypes, Table}
import org.apache.flink.table.descriptors.{Csv, FileSystem, Kafka, Schema}

object day5_FileOutPutFile {


  def main(args: Array[String]): Unit = {

    val set = StreamExecutionEnvironment.getExecutionEnvironment

    val tableEnv = StreamTableEnvironment.create(set)

    //链接外部系统，创建表
    //1 读取文件
    val filePath = "E:\\repository\\company\\myself\\flink-learning\\flink-learning-demo\\src\\main\\resources\\sensor.txt"

    tableEnv.connect(new FileSystem().path(filePath))
      // .withFormat(new OldCsv())
      .withFormat(new Csv())
      .withSchema(new Schema()
        .field("id", DataTypes.STRING())
        .field("timestamp", DataTypes.BIGINT())
        .field("temp", DataTypes.DOUBLE())
      )
      .createTemporaryTable("inputTable")


    //2 转换操作
val sensorTable=tableEnv.from("inputTable")

  val t1=  sensorTable
      .select('id , 'temp)

  val t2 =  sensorTable
      .groupBy('id)
     // .aggregate("")
      .select('id,'id.count as 'count)


    //(true,(sensor_1,3))   因为是分组聚合的结果，每一次结果总的来说就一个，所以格式是true表示当前最新的数据
    t2.toRetractStream [(String,Long)].print()

    //1 读取文件
    val filePath2 = "E:\\repository\\company\\myself\\flink-learning\\flink-learning-demo\\src\\main\\resources\\out.txt"

    tableEnv.connect(new FileSystem().path(filePath2))
      .withFormat(new Csv())
      .withSchema(new Schema()
        .field("id", DataTypes.STRING())
        .field("temp", DataTypes.DOUBLE())
       // .field("cnt", DataTypes.BIGINT())

      )
      .createTemporaryTable("outTable")

      t1.insertInto("outTable")
   //t2 是聚合的操作，所以呢不能够写入文件，写入文件的只能是appenStream
    //t2.insertInto("outTable")
      set.execute()
  }


}
