package cn.getech.data.development.task

import cn.getech.data.development.bean.FlinkStreamingObj
import cn.getech.data.development.enums.CustomJDBCType
import cn.getech.data.development.sink.kudu.FlinkStreamKuduSink
import cn.getech.data.development.utils.{FlinkJDBCAnalysisUtils, FlinkUtils, HDFSSinkUtils}
import org.apache.commons.lang3.SystemUtils
import org.apache.flink.api.common.serialization.SimpleStringEncoder
import org.apache.flink.core.fs.{FileSystem, Path}
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy
import org.apache.flink.streaming.api.functions.sink.filesystem.{OutputFileConfig, StreamingFileSink}
import org.apache.flink.table.api.bridge.scala.StreamTableEnvironment
import org.slf4j.{Logger, LoggerFactory}
import org.apache.flink.streaming.api.scala._
import org.apache.flink.table.api.Table
import org.apache.flink.types.Row

import scala.collection.mutable.ArrayBuffer

/**
  * 1. 多个source
  * 2. 多个sql启动任务
  * 3. 多个sink
  */
object FlinkStreamSQLMain {
  private val logger: Logger = LoggerFactory.getLogger(this.getClass)

  def main(args: Array[String]): Unit = {
    var obj: FlinkStreamingObj = null
    if (SystemUtils.IS_OS_WINDOWS)
      obj = new FlinkStreamingObj("data-development-streaming-flink-job\\src\\main\\resources\\flink-sql-new.json", false)
    else
      obj = new FlinkStreamingObj(args(0), false)

    val env = FlinkUtils.env(obj.jobName)
    val tEnv = FlinkUtils.createStreamTableEnv(env)

    getResource(tEnv, obj.sources)
    getResource(tEnv, obj.sinks)
    // TODO 过期不可用
    //    if (obj.isJdbcSink) {
    //      obj.jdbcSinks.foreach(f = x => {
    //            val sink = FlinkJDBCAnalysisUtils.operatorUpsertSink(x)
    //      })
    //    }
    getSQL(tEnv, obj)

    env.execute(obj.jobName)
  }

  /**
    * 获取source、sink
    *
    * @param tEnv table环境
    * @param sql  sql
    */
  private def getResource(tEnv: StreamTableEnvironment, sql: String): Unit = {
    if (sql == null || sql.isEmpty) return
    val resSplit = sql.split(";")
    resSplit.foreach(println)
    resSplit.foreach(tEnv.executeSql)
  }

  /**
    * 获取sql
    *
    * @param tEnv
    */
  private def getSQL(tEnv: StreamTableEnvironment, obj: FlinkStreamingObj): Unit = {
    val sqlSplit = obj.sqls.split(";")
    sqlSplit.foreach(str => {
      if (str.trim.length < 7)
        {return}
      logger.info("sql : {}", obj.sqls)
      // 查询
      if (str.trim.substring(0, 6).equalsIgnoreCase("select")) {
        val table: Table = tEnv.sqlQuery(str)
        val ds = tEnv.toAppendStream[Row](table)
        ds.print()
      } else if (str.trim.substring(0, 6).equalsIgnoreCase("insert")) {
        if ("1".equals(obj.isDebug)) {
          var result = " "
          val i = str.toLowerCase().indexOf("select")
          result = str.substring(i, str.length)
          logger.debug("select sql : {}", result)

          val table = tEnv.sqlQuery(result)
          val ds = tEnv.toAppendStream[Row](table)
          logger.debug(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>ds :{}", ds)
          val value: DataStream[String] = ds.map(_.toString)
          value.print()
          //System.setProperty(ConfigurationManager.getProperty("hdfs.account"), ConfigurationManager.getProperty("hdfs.username"))
          val mode: FileSystem.WriteMode = FileSystem.WriteMode.valueOf("OVERWRITE")
          ds.writeAsText("hdfs:///usr/realtime/debug/" + obj.jobName + ".txt", mode).setParallelism(1)
        } else {

          try {
            tEnv.executeSql(str)
          } catch {
            case _ =>
              // TODO 临时解决方案 建议后期优化
              // 分割字符串
              val insertSqls = str.trim.split("\\s+").toBuffer.asInstanceOf[ArrayBuffer[String]]
              val sinkTableName = insertSqls(2)
              insertSqls.remove(0, 3)
              val sql = insertSqls.mkString(" ")

              // 创建临时table
              val tmpTable = tEnv.sqlQuery(sql)
              val tmpTableName = "tmp_" + System.currentTimeMillis()
              tEnv.createTemporaryView(tmpTableName, tmpTable)
              // kudu & hdfs 类型的sink
              if (obj.isJdbcSink) {
                obj.jdbcSinks.foreach(sink => { // 遍历sink
                  if (sink.sinkTableName == sinkTableName) {
                    val sinkType = CustomJDBCType.withName(sink.typename)
                    import org.apache.flink.streaming.api.scala._
                    val value = tEnv.toAppendStream[Row](tmpTable)
                    sinkType match {
                      case CustomJDBCType.kudu =>
                        logger.info("初始化kudu sink...")
                        val sink1 = new FlinkStreamKuduSink(FlinkJDBCAnalysisUtils.kuduConf(sink, 1024 * 1024 * 128))
                        value.addSink(sink1)
                      case CustomJDBCType.hive | CustomJDBCType.hdfs =>
                        logger.info("初始化hdfs sink...")
                        val conf = FlinkJDBCAnalysisUtils.hdfsConf(sink, 1024 * 1024 * 128)
                        HDFSSinkUtils(conf).sink(value)
                    }
                  }
                })
              }

          }
        }
      } else {
        try {
          tEnv.executeSql(str)
        } catch {
          case exception: Exception => {
            logger.error(exception.getMessage)
          }
        }
      }
    })
  }
}



