package flink_table

import java.util.Properties
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.table.api.{DataTypes, Table}
import org.apache.flink.table.descriptors.{Csv, Kafka, Schema}

//隐式转换必须
import org.apache.flink.table.api.scala._
import org.apache.flink.api.scala._

object flink_table_kafak {
    def main(args: Array[String]): Unit = {
        val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
        env.setParallelism(1)

        //创建table执行环境
        val tableEnv = StreamTableEnvironment.create(env)
        val set_up = new Properties()
        set_up.setProperty("zookeeper.connect","hadoop:2181")
        set_up.setProperty("bootstrap.servers","hadoop:9092")
        //连接kafka服务
        tableEnv.connect(new Kafka()
                .version("0.11")
                .topic("flink")
                .properties(set_up)
        )
            .withFormat(new Csv())
            .withSchema(new Schema()    // 定义schema
                .field("id",DataTypes.INT())
                .field("name",DataTypes.STRING())
                .field("sex",DataTypes.STRING())
                .field("age",DataTypes.DOUBLE())
                .field("subject",DataTypes.STRING())
            ).createTemporaryTable("kafka_student_tb")

        //读取kafka传过来的表数据
        val tab: Table = tableEnv.from("kafka_student_tb")
        tab.toAppendStream[(Int,String,String,Double,String)].print()
        env.execute("kafak_flink")
    }
}
