package com.shujia.table

import org.apache.flink.api.common.typeinfo.{TypeInformation, Types}
import org.apache.flink.api.scala.ExecutionEnvironment
import org.apache.flink.table.api.scala.BatchTableEnvironment
import org.apache.flink.api.scala._
import org.apache.flink.core.fs.FileSystem.WriteMode
import org.apache.flink.table.api.scala._
import org.apache.flink.table.sinks.CsvTableSink
import org.apache.flink.types.Row

object Demo1batchTable {
  def main(args: Array[String]): Unit = {

    val env = ExecutionEnvironment.getExecutionEnvironment

    val tableEnv = BatchTableEnvironment.create(env)

    val ds = env.readTextFile("spark/data/students.txt", charsetName = "utf-8")

    val students = ds.map(line => {
      val split = line.split(",")
      Student(split(0), split(1), split(2).toInt, split(3), split(4))
    })

    //通过dataset创建table
    val stuTable = tableEnv.fromDataSet(students)

    stuTable.printSchema()


    //将table转换成ds
    stuTable.toDataSet[Row] //.print()


    // === 实际上是一个方法
    stuTable.where('age === 23).toDataSet[Row] //.print()

    stuTable
      .groupBy('clazz)
      .select('clazz, 'clazz.count.as('c), 'age.max, 'age.avg)
      .toDataSet[Row].print()


    //将table注册成一张表
    tableEnv.registerTable("student", stuTable)

    tableEnv.sqlQuery(
      """select clazz ,count(1) as c from student group by clazz
        |
      """.stripMargin).toDataSet[Row].print()


    //构建一个输出表
    val sink = new CsvTableSink("flink/data/num", ",", 1, WriteMode.OVERWRITE)
    val fieldNames: Array[String] = Array("clazz", "c")
    val fieldTypes: Array[TypeInformation[_]] = Array(Types.STRING, Types.LONG)
    tableEnv.registerTableSink("c_sum", fieldNames, fieldTypes, sink)


    tableEnv.sqlQuery(
      """
        |select clazz ,count(1) as c from student group by clazz
        |
      """.stripMargin)
      //将查询结果查询到sinkb表
      .insertInto("c_sum")


    env.execute()

  }

  case class Student(id: String, name: String, age: Int, gender: String, clazz: String)

}
