package com.desheng.bigdata.flink.table

import com.alibaba.fastjson.JSON
import com.desheng.bigdata.flink.domain.Person
import org.apache.flink.api.scala._
import org.apache.flink.api.scala.ExecutionEnvironment
import org.apache.flink.table.api.Table
import org.apache.flink.table.api.scala.BatchTableEnvironment
import org.apache.flink.types.Row

/**
  * flink除了基于dsl的操作以外，同样也支持sql操作
  *
  * 基于批进行sql的操作
  */
object _03FlinkSQLOPs {
    def main(args: Array[String]): Unit = {
        val batchEnv = ExecutionEnvironment.getExecutionEnvironment

        val tblEnv = BatchTableEnvironment.create(batchEnv)
        val dataSet = batchEnv.readTextFile("file:/E:/data/spark/sql/people.json")
                    .map(line => {
                        val jsonObj = JSON.parseObject(line)
                        val name = jsonObj.getString("name")
                        val age = jsonObj.getInteger("age")
                        val height = jsonObj.getDouble("height")
                        val province = jsonObj.getString("province")
                        ScalaPerson(name, age, height, province)
                    })
        val table: Table = tblEnv.fromDataSet(dataSet)

        /*
            sql统计：每个省有多少个人
            引用表的第一种方式
         */
        val result = tblEnv.sqlQuery(
            s"""
              |select
              |  province,
              |  count(1) counts
              |from ${table}
              |group by province
            """.stripMargin)

        tblEnv.toDataSet[Row](result)
        //引用表的第二种方式
        println("=========引用表的第二种方式============")
        //注册一张临时表
        tblEnv.registerTable("person", table)
        val result1 = tblEnv.sqlQuery(
            s"""
               |select
               |  province,
               |  count(1) counts
               |from person
               |group by province
            """.stripMargin)
        tblEnv.toDataSet[Row](result1).print()
    }
}
case class ScalaPerson(name: String, age: Int, height: Double, province: String)
