package cn.getech.data.development.task

import cn.getech.data.development.bean.FlinkStreamingObj
import cn.getech.data.development.enums.CustomJDBCType
import cn.getech.data.development.sink.kudu.FlinkStreamKuduSink
import cn.getech.data.development.utils.{FlinkJDBCAnalysisUtils, FlinkSQLAnalysisUtils, FlinkUtils, HDFSSinkUtils}
import org.apache.commons.lang3.SystemUtils
import org.apache.flink.core.fs.FileSystem
import org.apache.flink.table.api.bridge.scala.StreamTableEnvironment
import org.apache.flink.types.Row
import org.slf4j.{Logger, LoggerFactory}
import org.apache.flink.streaming.api.scala._

/**
  *  DAG模块主类
 * DAG
 */
object FlinkStreamDAGSQLMain {

  private val logger: Logger = LoggerFactory.getLogger(this.getClass)
  private var nextNodeTableName = ""
  private var model: FlinkStreamingObj = null

  def main(args: Array[String]): Unit = {
    if (SystemUtils.IS_OS_WINDOWS)
      model = new FlinkStreamingObj("data-development-streaming-flink-job\\src\\main\\resources\\flink-streaming-dag-kafka-kafka.json", true)
    else
      model = new FlinkStreamingObj(args(0), true)
    model.jsonParse

    println("source>>>>>>>>>>>>>>>>>>\n" + model.sources)
    println("sink>>>>>>>>>>>>>>>>>>>>\n" + model.sinks)
    println("sql>>>>>>>>>>>>>>>>>>>>>\n")


    val env = FlinkUtils.env(model.jobName)
    val tEnv: StreamTableEnvironment = FlinkUtils.createStreamTableEnv(env)
    // 处理source/sink数据
    tEnv.executeSql(model.sources)
    // 执行kafka sink
    if (model.isSink) tEnv.executeSql(model.sinks)
    analysisSql(tEnv)
    env.execute(model.jobName)
  }

  /**
   * 类型分支
   *
   */
  private def analysisSql(tEnv: StreamTableEnvironment): Unit = {
    model.typeObjs.foreach(obj => {
      var resSQL = ""
      obj.oper_type match {
        case cn.getech.data.development.enums.DAGType.source =>
        case cn.getech.data.development.enums.DAGType.select =>
          resSQL = FlinkSQLAnalysisUtils.selectSQL(obj)
        case cn.getech.data.development.enums.DAGType.filter =>
          resSQL = FlinkSQLAnalysisUtils.filterSQL(obj)
        case cn.getech.data.development.enums.DAGType.groupBy =>
          resSQL = FlinkSQLAnalysisUtils.groupBySQL(obj)
        case cn.getech.data.development.enums.DAGType.distinct =>
          resSQL = FlinkSQLAnalysisUtils.distinctSQL(obj)
      }
      println(resSQL)
      logger.info("sql : {}", resSQL)
      nextNodeTableName = obj.outputTableName
      val tmpTable = tEnv.sqlQuery(resSQL)
      if ("1".equals(model.isDebug)){
        val ds = tEnv.toAppendStream[Row](tmpTable)
        val value: DataStream[String] = ds.map(_.toString)
        value.print()
        val mode: FileSystem.WriteMode = FileSystem.WriteMode.valueOf("OVERWRITE")
        ds.writeAsText("hdfs:///usr/realtime/debug/" + model.jobName + ".txt", mode).setParallelism(1)
        return
      }
      // 创建视图
      tEnv.createTemporaryView(nextNodeTableName, tmpTable)
      val sinkTableName = obj.sinkTableName

      // TODO 临时解决方案建议后期优化 kafkasink 不走这里
      if (!sinkTableName.isEmpty) {
        try {
          val _ = tEnv.from(sinkTableName)
          tmpTable.executeInsert(sinkTableName)
        } catch {
          case _=>
            // kudu & hdfs 类型的sink
            if (model.isJdbcSink) {
              model.jdbcSinks.foreach(sink => { // 遍历sink
                if (sink.sinkTableName == sinkTableName) {
                  val sinkType = CustomJDBCType.withName(sink.typename)
                  import org.apache.flink.streaming.api.scala._
                  val value = tEnv.toAppendStream[Row](tmpTable)
                  sinkType match {
                    case CustomJDBCType.kudu =>
                      value.addSink(new FlinkStreamKuduSink(FlinkJDBCAnalysisUtils.kuduConf(sink, 1024 * 1024 * 128)))
                    case CustomJDBCType.hive | CustomJDBCType.hdfs =>
                      val conf = FlinkJDBCAnalysisUtils.hdfsConf(sink, 1024 * 1024 * 128)
                      HDFSSinkUtils(conf).sink(value)
                  }
                }
              })
            }
        }
      }
    })
  }
}