package com.cmnit.gatherdata.modules.module

import java.util

import com.alibaba.fastjson.JSON
import com.alibaba.fastjson.serializer.SerializerFeature
import com.cmnit.gatherdata.enums.KafkaTopicEnum
import com.cmnit.gatherdata.modules.bean.{GantryETCBill, TollEnBillInfo, TollExBillInfo}
import com.cmnit.gatherdata.modules.utils.{HBaseUtil, KafkaStreamUtil}
import com.cmnit.gatherdata.utils.{ConfigurationManager, KafkaProducerUtils, MD5Utils}
import org.apache.hadoop.hbase.TableName
import org.apache.hadoop.hbase.client.Mutation
import org.apache.log4j.Logger
import org.apache.spark.SparkContext
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.{Seconds, StreamingContext}

object HbaseModule {

  final val logger = Logger.getLogger(HbaseModule.getClass)

  def startStreaming(sparkSession: SparkSession, kafkaBootstrapServers: String, zkConnect: String, groupId: String): Unit = {

    val topic = KafkaTopicEnum.ALL_TOPIC.getTopic
    val source_station_out = KafkaTopicEnum.TOLL_ORI_OUT_TRADE_TOPIC.getTopic
    val source_station_in = KafkaTopicEnum.TRC_ENPU_TOPIC.getTopic
    val source_gantry_bill = KafkaTopicEnum.GBUPLOAD_ETCTU_TOPIC.getTopic
    val sink_station_out = KafkaTopicEnum.TOLL_FLOW_EXIT.getTopic
    val sink_station_in = KafkaTopicEnum.TOLL_FLOW_ENTRY.getTopic
    val sink_gantry_bill = KafkaTopicEnum.GANTRY_CHARGE_ETC.getTopic

    val sc: SparkContext = sparkSession.sparkContext
    val ssc: StreamingContext = new StreamingContext(sc, Seconds(ConfigurationManager.getProperty("dual.time").toLong))
    val targetTable: String = ConfigurationManager.getProperty("hbase.target.table")

    // 获取kafka的实时流对象
    val dataInputStream = KafkaStreamUtil.getStream(ssc, kafkaBootstrapServers, zkConnect, topic, groupId)

    // 获取kafka中的源数据
    dataInputStream.foreachRDD(rdd => {
      try {
        // topic为出口车道流水TOLL_ORI_OUT_TRADE_TOPIC
        rdd
          .filter(x => !x.value.isEmpty)
          .filter(x => x.topic == source_station_out)
          .repartition(8)
          .foreachPartition(partition => {
            // 创建hbase的查询对象（不跨分区不用序列化）
            val conn = HBaseUtil.getHBaseConn
            val table = conn.getBufferedMutator(TableName.valueOf(targetTable))
            val mutations = new util.ArrayList[Mutation]
            partition
              .map(_.value)
              .map(line => {
                JSON.parseObject(line.toString, classOf[TollExBillInfo])
              })
              // 过滤hbase中已存在的rowkey
              .filter(x => HBaseUtil.isEmptyByRowkey(targetTable, MD5Utils.string2Md5(x.id + source_station_out) + x.id + source_station_out, conn))
              .foreach(x => {
                // 将新数据写入kafka
                KafkaProducerUtils.send(true, kafkaBootstrapServers, sink_station_out, JSON.toJSONString(x, SerializerFeature.DisableCircularReferenceDetect))
                // 将新数据的rowkey和topic写入hbase的插入缓存
                val put = HBaseUtil.getPutByRowkey(MD5Utils.string2Md5(x.id + source_station_out) + x.id + source_station_out, source_station_out)
                mutations.add(put)
              })
            // 写入hbase
            table.mutate(mutations)
            table.flush()
            HBaseUtil.close(conn, table, null)
          })

        // topic为入口车道流水TRC_ENPU_TOPIC
        rdd
          .filter(x => !x.value.isEmpty)
          .filter(x => x.topic == source_station_in)
          .repartition(8)
          .foreachPartition(partition => {
            // 创建hbase的查询对象（不跨分区不用序列化）
            val conn = HBaseUtil.getHBaseConn
            val table = conn.getBufferedMutator(TableName.valueOf(targetTable))
            val mutations = new util.ArrayList[Mutation]
            partition
              .map(_.value)
              .map(line => {
                JSON.parseObject(line.toString, classOf[TollEnBillInfo])
              })
              // 过滤hbase中已存在的rowkey
              .filter(x => HBaseUtil.isEmptyByRowkey(targetTable, MD5Utils.string2Md5(x.id + source_station_in) + x.id + source_station_in, conn))
              .foreach(x => {
                // 将新数据写入kafka
                KafkaProducerUtils.send(true, kafkaBootstrapServers, sink_station_in, JSON.toJSONString(x, SerializerFeature.DisableCircularReferenceDetect))
                // 将新数据的rowkey和topic写入hbase的插入缓存
                val put = HBaseUtil.getPutByRowkey(MD5Utils.string2Md5(x.id + source_station_in) + x.id + source_station_in, source_station_in)
                mutations.add(put)
              })
            // 写入hbase
            table.mutate(mutations)
            table.flush()
            HBaseUtil.close(conn, table, null)
          })

        // topic为ETC门架收费流水GBUPLOAD_ETCTU_TOPIC
        rdd
          .filter(x => !x.value.isEmpty)
          .filter(x => x.topic == source_gantry_bill)
          .repartition(8)
          .foreachPartition(partition => {
            // 创建hbase的查询对象（不跨分区不用序列化）
            val conn = HBaseUtil.getHBaseConn
            val table = conn.getBufferedMutator(TableName.valueOf(targetTable))
            val mutations = new util.ArrayList[Mutation]
            partition
              .map(_.value)
              .map(line => {
                JSON.parseObject(line.toString, classOf[GantryETCBill])
              })
              // 过滤hbase中已存在的rowkey
              .filter(x => HBaseUtil.isEmptyByRowkey(targetTable, MD5Utils.string2Md5(x.tradeid + source_gantry_bill) + x.tradeid + source_gantry_bill, conn))
              .foreach(x => {
                // 将新数据写入kafka
                KafkaProducerUtils.send(true, kafkaBootstrapServers, sink_gantry_bill, JSON.toJSONString(x, SerializerFeature.DisableCircularReferenceDetect))
                // 将新数据的rowkey和topic写入hbase的插入缓存
                val put = HBaseUtil.getPutByRowkey(MD5Utils.string2Md5(x.tradeid + source_gantry_bill) + x.tradeid + source_gantry_bill, source_gantry_bill)
                mutations.add(put)
              })
            // 写入hbase
            table.mutate(mutations)
            table.flush()
            HBaseUtil.close(conn, table, null)
          })

        // 维护本地offset
        KafkaStreamUtil.updateOffset(rdd, groupId)
      } catch {
        case e: Exception => println(e)
      }
    })
    ssc.start
    ssc.awaitTermination
  }
}
