package org.project.top

import java.sql.Timestamp
import java.util
import java.util.Properties

import org.FlinkStreamApp
import org.apache.flink.api.common.functions.{AggregateFunction, RuntimeContext}
import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.api.common.state.ListStateDescriptor
import org.apache.flink.api.scala.typeutils.Types
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.functions.KeyedProcessFunction
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.api.scala.function.ProcessWindowFunction
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.streaming.api.windowing.windows.TimeWindow
import org.apache.flink.streaming.connectors.elasticsearch.{ElasticsearchSinkFunction, RequestIndexer}
import org.apache.flink.streaming.connectors.elasticsearch6.ElasticsearchSink
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer
import org.apache.flink.util.Collector
import org.apache.http.HttpHost
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.elasticsearch.action.index.IndexRequest
import org.elasticsearch.client.Requests
import org.project.bean.{ItemViewCount, UserBebavior}

import scala.collection.mutable.ListBuffer

/**
 * description ：统计热门商品 topn
 * author      ：剧情再美终是戏
 * mail        : 13286520398@163.com
 * date        ：Created in 2020/2/25 10:57
 * modified By ：
 * version:    : 1.0
 */
object HotItemsFromKafkaToEs extends FlinkStreamApp {
  override def doSomeThing(environment: StreamExecutionEnvironment) = {
    // 设置事件时间
    environment.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)

    // 配置连接 kafka 配置文件
    val properties = new Properties()
    properties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "my-group")
    properties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "hadoop101:9092")
    properties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer")
    properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer")
    properties.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest")


    // 读取源数据
    val source = environment.addSource(new FlinkKafkaConsumer[String]("t01", new SimpleStringSchema, properties))

    // 构造 es bulid
    val httpHosts = new util.ArrayList[HttpHost]()
    httpHosts.add(new HttpHost("hadoop101", 9200, "http"))
    httpHosts.add(new HttpHost("hadoop102", 9200, "http"))
    httpHosts.add(new HttpHost("hadoop103", 9200, "http"))
    val esBuild = new ElasticsearchSink.Builder[String](
      httpHosts,
      new ElasticsearchSinkFunction[String] {
        def createEsIndexRequest(data: String): IndexRequest = {

          val json = new util.HashMap[String, String]()
          json.put("data", data)

          Requests
            .indexRequest()
            .index("t011")
            .`type`("_doc")
            .source(json)
        }

        override def process(t: String, runtimeContext: RuntimeContext, requestIndexer: RequestIndexer) = {
          requestIndexer.add(createEsIndexRequest(t))
        }
      }
    )
    esBuild.setBulkFlushMaxActions(1)

    // 操作 每隔5分钟输出最近一小时内点击量最多的前N个商品
    val result = source
      .map(line => {
        val splits = line.split(",")
        UserBebavior(splits(0).toLong, splits(1).toLong, splits(2).toInt, splits(3), splits(4).toLong * 1000)
      })
      .filter(_.behavior == "pv")
      .assignAscendingTimestamps(_.timestamp)
      .keyBy(_.itemId)
      .timeWindow(Time.minutes(60), Time.minutes(5))
      .aggregate(new CountAgg, new MyProcessResult)
      .keyBy(_.windowEnd)
      .process(new TopNHotItems(3))

    // 写入 es
    result.addSink(esBuild.build())
    result.print
  }

  class TopNHotItems(n: Int) extends KeyedProcessFunction[Long, ItemViewCount, String] {
    // 创建一个 statelist 保存元素，方便后面排序以
    lazy val items = getRuntimeContext.getListState(
      new ListStateDescriptor[ItemViewCount]("items", Types.of[ItemViewCount])
    )

    override def processElement(value: ItemViewCount, ctx: KeyedProcessFunction[Long, ItemViewCount, String]#Context, out: Collector[String]) = {
      items.add(value)
      ctx.timerService().registerProcessingTimeTimer(value.windowEnd + 1)
    }

    override def onTimer(timestamp: Long, ctx: KeyedProcessFunction[Long, ItemViewCount, String]#OnTimerContext, out: Collector[String]) = {
      // 创建集合保存 statelist 元素
      val listBuffer = ListBuffer[ItemViewCount]()
      import scala.collection.JavaConversions._
      for (el <- items.get()) {
        listBuffer += el
      }

      // 清理  statelist
      items.clear()

      // 排序
      val sortedItems = listBuffer.sortBy(-_.count).take(3)

      val result = new StringBuilder
      result.append("====================================\n")
      result.append("时间: ").append(new Timestamp(timestamp - 1)).append("\n")
      for (i <- sortedItems.indices) {
        val currentItem = sortedItems(i)
        result.append("No")
          .append(i + 1)
          .append(":")
          .append("  商品ID=")
          .append(currentItem.itemId)
          .append("  浏览量=")
          .append(currentItem.count)
          .append("\n")
      }
      result.append("====================================\n\n")
      Thread.sleep(1000)
      out.collect(result.toString())
    }
  }

  class MyProcessResult extends ProcessWindowFunction[Long, ItemViewCount, Long, TimeWindow] {
    override def process(key: Long, context: Context, elements: Iterable[Long], out: Collector[ItemViewCount]) = {
      out.collect(ItemViewCount(key, context.window.getEnd, elements.toIterator.next()))
    }
  }

  class CountAgg extends AggregateFunction[UserBebavior, Long, Long] {
    override def createAccumulator() = 0

    override def add(value: UserBebavior, accumulator: Long) = accumulator + 1

    override def getResult(accumulator: Long) = accumulator

    override def merge(a: Long, b: Long) = a + b
  }

}
