import bean.ScalaClass.Burks
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.table.api.scala.StreamTableEnvironment
import org.apache.flink.table.sinks.CsvTableSink
import org.apache.flink.api.scala._

object readCSV extends App {
  val env = StreamExecutionEnvironment.getExecutionEnvironment
  val tabEnv = StreamTableEnvironment.create(env)


  val burksSource = env.readTextFile("data/burks.txt")
    .map(line => {
      val splits = line.split(",")
      Burks(splits(0),
        splits(1),
        splits(2).toDouble,
        splits(3).toDouble,
        splits(4).toDouble,
        splits(5).toDouble,
        splits(6).toDouble,
        splits(7).toDouble,
        splits(8).toDouble,
        splits(9).toDouble,
        splits(10).toDouble,
        splits(11).toDouble,
        splits(12).toDouble,
        splits(13).toDouble
      )
    })

  val csvSink = new CsvTableSink("data/output1")

  val tab1 = tabEnv.fromDataStream(burksSource)
  tabEnv.registerTable("burksTab", tab1)

  tabEnv.registerFunction("myUDTF", new Split("#"))


  val fieldNames = tab1.getSchema.getFieldNames
  val fieldTypes = tab1.getSchema.getFieldTypes

  val result1 = tabEnv.sqlQuery("select * from burksTab")

  //
  //    val fieldNames: Array[String] = Array("burk", "m_year")
  //    val fieldTypes: Array[TypeInformation[_]] = Array(Types.STRING, Types.STRING)

  tabEnv.registerTableSink("CsvSinkTable", fieldNames, fieldTypes, csvSink)

  result1.insertInto("CsvSinkTable")

  env.execute()


}
