package com.demo.flink.sql

import org.apache.flink.streaming.api.scala.{StreamExecutionEnvironment, createTypeInformation}
import org.apache.flink.table.api.bridge.scala.StreamTableEnvironment

object DemoSQL {
  def main(args: Array[String]): Unit = {
    val env = StreamExecutionEnvironment.getExecutionEnvironment
    val filepath = args(0)
    //val filepath = "hdfs://hadoop-master:8020/user/hy.csv"
    //val filepath = "file:///c:/data/hy.csv"
    val lines = env.readTextFile(filepath)

    val data = lines.map(line => {
      val row = line.split(",")
      HY(row(0).toInt, row(1), row(2).toInt, row(3), row(4))
    })

    val tEnv = StreamTableEnvironment.create(env)
    val t_hy = tEnv.fromDataStream(data)
    t_hy.printSchema()
    tEnv.createTemporaryView("t_hy", t_hy)
    val result = tEnv.sqlQuery("select * from t_hy")
    val output = tEnv.toDataStream(result)
    output.print()
    env.execute("DemoSQL")
  }

  case class HY(hy_dm: Int, hy_mc: String, dl_dm: Int, dl_mc: String, yxbz: String)
}
