package com.shujia.flink.table

import org.apache.flink.api.common.typeinfo.{TypeInformation, Types}
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.api.scala._
import org.apache.flink.core.fs.FileSystem.WriteMode
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.table.api.scala._
import org.apache.flink.table.api.scala.StreamTableEnvironment
import org.apache.flink.table.sinks.CsvTableSink
import org.apache.flink.types.Row
import org.apache.flink.table.api.Slide
import org.apache.flink.table.functions.ScalarFunction
import org.apache.flink.table.functions.TableFunction

object Demo2StreamTable {
  def main(args: Array[String]): Unit = {

    val env = StreamExecutionEnvironment.getExecutionEnvironment

    env.setParallelism(1)
    env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)

    val tableEnv = StreamTableEnvironment.create(env)

    /**
      *
      * 1,1574666000
      * 1,1574671000
      * 1,1574667000
      * 1,1574668000
      * 1,1574669000
      * 1,1574670000
      * 1,1574671000
      * 1,1574672000
      * 1,1574673000
      * 1,1574674000
      * 1,1574675000
      * 1,1574680000
      * 1,1574760000
      *
      */
    val ds = env.socketTextStream("node1", 8888)

    val eventDS = ds.map(line => {
      val split = line.split(",")
      (split(0), split(1).toLong)
    })
      //指定时间列
      .assignAscendingTimestamps(_._2)

    //通过流构建table
    val table = tableEnv.fromDataStream(eventDS, 'id, 'ts.rowtime)


    table
      .window(Slide over 10.seconds every 5.seconds on 'ts as 'w)
      .groupBy('w, 'id)
      .select('w.end, 'id, 'id.count)
      .toAppendStream[Row]
    //.print()


    tableEnv.registerTable("t", table)
    tableEnv.sqlQuery(
      """
        |
        |SELECT  TUMBLE_START(ts, INTERVAL '5' SECOND),id ,count(1) from t
        |GROUP BY TUMBLE(ts, INTERVAL '5' SECOND),id
        |
        |
      """.stripMargin)
      .toAppendStream[Row]
    // .print()


    /**
      * toAppendStream   只适合追加的流
      * toRetractStream  可以用在任何地方
      *
      */

    table
      .groupBy('id)
      .select('id, 'id.count)
      //将table转换成ds  打印输出
      //      .toAppendStream[Row].print()
      .toRetractStream[Row] //.print()


    //构建一个输出表
    val sink = new CsvTableSink("flink/data/num1", ",", 1, WriteMode.OVERWRITE)
    val fieldNames: Array[String] = Array("id", "ts")
    val fieldTypes: Array[TypeInformation[_]] = Array(Types.STRING, Types.SQL_TIMESTAMP)
    tableEnv.registerTableSink("id_count", fieldNames, fieldTypes, sink)


    table.select('id, 'ts)
      .insertInto("id_count")


    tableEnv.registerFunction("hashCode", new MyFuncation)


    tableEnv.sqlQuery(
      """
        |select hashCode(id) from t
        |
        |
        """.stripMargin).toAppendStream[Row]//.print()


    env.execute()


  }
}

class MyFuncation extends ScalarFunction {
  def eval(s: String): Int = {
    s.hashCode()
  }
}
