package com.wudl.flink.stream.tablesql


import org.apache.flink.types.Row
import org.apache.calcite.schema.TableFunction
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.table.api.scala.StreamTableEnvironment
import org.apache.flink.table.api.{EnvironmentSettings, Table, Types}
import org.apache.flink.table.functions.TableFunction



object CreateTableUdfWorkCount {

  def main(args: Array[String]): Unit = {
    /**
     * 初始化上下文的环境
     */
    val streamEnv: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
    val settings: EnvironmentSettings = EnvironmentSettings.newInstance().useOldPlanner().build()
    val tableEnvironment: StreamTableEnvironment = StreamTableEnvironment.create(streamEnv, settings)
    //两个隐式转换
    import org.apache.flink.streaming.api.scala._
    //  读取数据源
    val stream: DataStream[String] = streamEnv.socketTextStream("10.204.125.109", 8888)
    // 将流 转化为table
    val table: Table = tableEnvironment.fromDataStream(stream)


  }

  // 自定义一个udf

//  //自定义UDF
//  class MyFlatMapFunction extends TableFunction[Row]{
//    //定义函数处理之后的返回类型,输出单词和1
//    override def getResultType: TypeInformation[Row] = Types.ROW(Types.STRING(),Types.INT())
//
//    //函数主体
//    def eval (str:String):Unit ={
//      str.trim.split(" ").foreach(word=>{
//        var row =new Row(2)
//        row.setField(0,word)
//        row.setField(1,1)
//        collect(row)
//      })
//    }
//  }
}
