package cn.cnic.security.nssap.stearm

import org.apache.flink.api.common.serialization.SimpleStringEncoder
import org.apache.flink.api.java.functions.KeySelector
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink
import org.apache.flink.streaming.api.scala.function.{RichWindowFunction, WindowFunction}
import org.apache.flink.streaming.api.scala.{DataStream, KeyedStream, StreamExecutionEnvironment, WindowedStream, createTypeInformation}
import org.apache.flink.streaming.api.windowing.assigners.TumblingProcessingTimeWindows
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.streaming.api.windowing.windows.TimeWindow
import org.apache.flink.util.Collector
import org.apache.flink.core.fs.Path
import org.slf4j.{Logger, LoggerFactory}
/**
  * 奇安信 告警日志清洗处理，最后写入MySQL
  */
object WarnLogMain {

//  val log:Logger = LoggerFactory.getLogger(WarnLogMain.getClass)

  def main(args: Array[String]): Unit = {
    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

//    env.setParallelism(1)

    //设置kafka消费者和主题
    val TOPIC = "alarm"
    val dStream: DataStream[String] = CommonTools.kafkaConsumer(TOPIC, env)

    //分割
    val tupleStream: DataStream[(String, String)] =
      dStream.map(row =>  row.split("\\u007C!") )
        .filter( arr => arr.length == 5 )
        .map( arr => (arr(3),arr(4)) )
        .filter(f => f._2 != null && f._2.length > 0)
    // 认为到来的数据的事件时间就是有序的，没有乱序，所以不用设置Watermark
    //      .assignTimestampsAndWatermarks(
    //        WatermarkStrategy.forBoundedOutOfOrderness(Duration.ofSeconds(1))
    //          .withTimestampAssigner(new SerializableTimestampAssigner[(String, String)] {
    //            override def extractTimestamp(t: (String, String), l: Long): Long = l
    //          })
    //      )

    //--------------处理逻辑-------------------
    val value: KeyedStream[(String, String), Int] = tupleStream.keyBy(new MyKeySelector)

    // 算子处理时间 30 秒 滚动一次
    val value1: WindowedStream[(String, String), Int, TimeWindow] =
      value.window(TumblingProcessingTimeWindows.of(Time.seconds(30)))
    // 实际的处理，但是没步骤，不让用sink
    val value2: DataStream[String] = value1.apply(new MyRichWindow)

    // 暂时写入到本地
    val outputPath :String = "warnlog"
    val fileSink: StreamingFileSink[String] = StreamingFileSink
      .forRowFormat(new Path(outputPath), new SimpleStringEncoder[String]("UTF-8")) // 所有数据都写到同一个路径
      .build()
    value2.addSink(fileSink).setParallelism(1)
//    value2.print("alarmLog:")

    env.execute("奇安信告警信息etl")
  }

  // IN, OUT, KEY, W
  class MyRichWindow extends RichWindowFunction[(String, String), String, Int, TimeWindow] {

    val log:Logger = LoggerFactory.getLogger("MyRichWindow")

    override def apply(key: Int, window: TimeWindow, input: Iterable[(String, String)], out: Collector[String]): Unit = {
      for (elem <- input) {
          log.info("{}",elem.toString())
//        val nObject1: JSONObject = elem._1 match {
//          case "alarm" => JSON.parseObject(elem._2)
//          case "sandbox" => JSON.parseObject(elem._2)
//          case "sandbox_mail" => JSON.parseObject(elem._2)
//          case "yunsuo" => JSON.parseObject(elem._2)
//        }
//        val nObject: JSONObject = JSON.parseObject(elem._2)
        out.collect(elem._2)
      }
    }
  }


  class MyKeySelector extends KeySelector[(String, String),Int] {
    override def getKey(value: (String, String)): Int = {
      value._1.hashCode
    }
  }

}
