package com.wenge.datagroup.storage.main

import com.alibaba.fastjson.{JSON, JSONObject}
import com.wenge.datagroup.storage.bean.Topic
import com.wenge.datagroup.storage.config.InitContext
import com.wenge.datagroup.storage.dataprocess.{MultilingualLanguage, MultilingualTranslation}
import com.wenge.datagroup.storage.future.AsyncProcessCreater
import com.wenge.datagroup.storage.service.TransformServiceFactory
import com.wenge.datagroup.storage.service.impl.BaseTransformServiceImpl
import com.wenge.datagroup.storage.util._
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.log4j.{Logger, PropertyConfigurator}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010._
import redis.clients.jedis.JedisCluster

import java.{lang, util}
import java.text.SimpleDateFormat
import java.util.Date
import scala.collection.JavaConverters._
import scala.collection.{breakOut, mutable}
import scala.util.{Success, Try}

object SparkStreamConsumer {
  private val logger: Logger = Logger.getLogger(SparkStreamConsumer.getClass)
  private val EMPTY_JSONOBJECT = new JSONObject
  // 优化后方案
  var initTopic: Topic = _
  val sdf = new SimpleDateFormat("yyyy-MM-dd_HH")


  def main(args: Array[String]): Unit = {
    PropertyConfigurator.configure("config/log4j.properties")
    val sparkConfMap = mutable.Map[String, String]()
    var topic = ""
    var sendTopic = ""
    var topic_partitions = ""
    var sparkConfPath = ""
    if (args != null) {
      for (configArg <- args) {
        logger.info("Args:" + configArg)
        if (configArg.startsWith("--")) {
          val split = configArg.split("=")
          val key = split(0)
          val v = split(1)
          if (key.equals("--config")) {
            ConfigUtil.setConfigFile(v)
          } else if (key.startsWith("--spark-")) {
            sparkConfMap += (key.replace("--spark-", "") -> v)
          } else if (key.equals("--topic")) {
            topic = v
            ConfigUtil.setTopic(topic)
          } else if (key.equals("--sparkConf")) {
            sparkConfPath = v
          } else if (key.equals("--topic_partitions")) {
            topic_partitions = v
            ConfigUtil.initTopicPartitionSet(topic_partitions)
          }
        }
      }
    } else {
      System.exit(0)
    }

    ConfigUtil.init()
    // 优化后方案
    InitContext.initChannelConfig(ConfigUtil.get("xmlPath"), topic)
    initTopic = InitContext.topic
    sendTopic = initTopic.sendTopicName

    println("topic:" + topic + ":" + sendTopic)
    val conf = new SparkConf()
      .setAppName("kafka-consumer-" + topic)
      //       .setMaster(ConfigUtil.get("sparkMaster"))
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      // .set("spark.streaming.kafka.consumer.poll.ms", "10000") // 获取不到数据重试间隔
      .set("spark.streaming.kafka.maxRatePerPartition", ConfigUtil.get("maxRatePerPartition"))
      .set("spark.streaming.backpressure.enabled", "true")
      .set("spark.streaming.stopGracefullyOnShutdown", "true")
    if (sparkConfPath != "") {
      val sparkConfMap = ConfigUtil.getSparkConfig(sparkConfPath).asScala
      if (sparkConfMap.nonEmpty) {
        // 加载配置文件
        for (key <- sparkConfMap.keys) {
          conf.set(key, sparkConfMap(key))
        }
      }
    }

    if (sparkConfMap.nonEmpty) {
      // 加载命令行参数
      for (key <- sparkConfMap.keys) {
        conf.set(key, sparkConfMap(key))
      }
    }

    logger.info(" 初始化ssc... batchSeconds= " + ConfigUtil.get("batchSeconds").toInt)
    val groupId = ConfigUtil.get("kafka.group")
    val sc = new SparkContext(conf)

    //"security.protocol" -> ConfigUtil.get("security.protocol"),
    //      "sasl.mechanism" -> ConfigUtil.get("sasl.mechanism"),
    //      "sasl.jaas.config" -> ConfigUtil.get("sasl.jaas.config"),
    val ssc =
      new StreamingContext(sc, Seconds(ConfigUtil.get("batchSeconds").toInt))
    val kafkaParams = Map(
      ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> ConfigUtil.get("bootstrap.servers"),
      ConsumerConfig.GROUP_ID_CONFIG -> groupId,

      ConsumerConfig.AUTO_OFFSET_RESET_CONFIG -> ConfigUtil.get("offsetReset"),
      ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG -> (false: lang.Boolean),
      ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG -> classOf[StringDeserializer],
      ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG -> classOf[StringDeserializer],
      ConsumerConfig.RECONNECT_BACKOFF_MS_CONFIG -> (2000: lang.Long),
      ConsumerConfig.RECONNECT_BACKOFF_MAX_MS_CONFIG -> (20000: lang.Long)
    )
    // 从mysql查询偏移量
    val offsetMap: mutable.HashMap[TopicPartition, Long] =
      MysqlUtil.getOffset(groupId, topic)
    logger.info("初始化kafkaTopicDS...")
    val kafkaTopicDS: InputDStream[ConsumerRecord[String, String]] =
      if (offsetMap.isEmpty || offsetMap.values.toList.sum == 0) {
        logger.info("从起始偏移量开始消费...")
        if (!ConfigUtil.topic_partitions_set.isEmpty) {
          KafkaUtils.createDirectStream(
            ssc,
            LocationStrategies.PreferConsistent,
            ConsumerStrategies.Assign(ConfigUtil.topic_partitions_set.asScala, kafkaParams))
        } else {
          KafkaUtils.createDirectStream(
            ssc,
            LocationStrategies.PreferConsistent,
            ConsumerStrategies.Subscribe[String, String](Set(topic), kafkaParams))
        }
      } else {
        logger.info("从已存在记录开始消费 = " + offsetMap)
        if (!ConfigUtil.topic_partitions_set.isEmpty) {
          KafkaUtils.createDirectStream(
            ssc,
            LocationStrategies.PreferConsistent,
            ConsumerStrategies.Assign(
              ConfigUtil.topic_partitions_set.asScala,
              kafkaParams,
              offsetMap.filter(
                tpo =>
                  ConfigUtil.topic_partitions_set.asScala
                    .map(tp => tp.partition())
                    .contains(tpo._1.partition()))
            )
          )
        } else {
          KafkaUtils.createDirectStream(
            ssc,
            LocationStrategies.PreferConsistent,
            ConsumerStrategies
              .Subscribe[String, String](Set(topic), kafkaParams, offsetMap))
        }
      }
    ssc.sparkContext.getConf.getAll.foreach(f => println("spark-conf:" + f._1 + "=" + f._2))
    val repartitionDS: DStream[JSONObject] = if ("topic_c1_original_wikipedia".equals(topic)) {
      kafkaTopicDS
        .mapPartitions(
          ites =>
            ites
              .filter(data =>
                data.value() != null && !data.value().isEmpty
              )
              .map(
                data =>
                  Try {
                    var jsonData: JSONObject = JSON.parseObject(data.value())
                    if (jsonData.containsKey("content")) {
                      if (jsonData.getString("content").contains("#REDIRECT")
                        || jsonData.getString("content").contains("#redirection")
                        || jsonData.getString("content").contains("#redirect")) {
                        jsonData = EMPTY_JSONOBJECT
                      }
                    }
                    jsonData
                  } match {
                    case Success(jsonData) => jsonData
                    case _ =>
                      logger.warn("原始数据格式异常：" + data.value())
                      EMPTY_JSONOBJECT
                  }
              ))
        // 问题数据过滤，不做处理
        .filter(!_.isEmpty)
        .repartition(ConfigUtil.get("repartition").toInt)
    } else {
      kafkaTopicDS
        .mapPartitions(
          ites =>
            ites
              .filter(data =>
                data.value() != null && !data.value().isEmpty
              )
              .map(
                data =>
                  Try {
                    var jsonData: JSONObject = JSON.parseObject(data.value())
                    jsonData
                  } match {
                    case Success(jsonData) => jsonData
                    case _ =>
                      logger.warn("原始数据格式异常：" + data.value())
                      EMPTY_JSONOBJECT
                  }
              ))
        // 问题数据过滤，不做处理
        .filter(!_.isEmpty)
        .repartition(ConfigUtil.get("repartition").toInt)
    }
    repartitionDS.cache()


    // 广播变量
    val initTopicBro: Broadcast[Topic] = sc.broadcast(initTopic)
    val druidDatasource: Broadcast[DruidDataSource] =
      sc.broadcast(
        DruidDataSource(
          ConfigUtil.get("url"),
          ConfigUtil.get("username"),
          ConfigUtil.get("password")))
    val multilingualLanguageBro: Broadcast[MultilingualLanguage] =
      sc.broadcast(new MultilingualLanguage(ConfigUtil.languageUrl))
    val multilingualTranslationBro: Broadcast[MultilingualTranslation] =
      sc.broadcast(new MultilingualTranslation(ConfigUtil.translationUrl))
    val asyncBro: Broadcast[AsyncProcessCreater] =
      sc.broadcast(
        AsyncProcessCreater(druidDatasource, multilingualLanguageBro, multilingualTranslationBro))

    // 优化后方案
    val transformService = TransformServiceFactory.getInstance(initTopic.tClazz)
    val tranJsonDS = repartitionDS.transform(
      rdd => {
        rdd.map(
          json => {
            // 数据处理优化后方案
            transformService.transferData(json, initTopicBro.value, asyncBro)
            json
          })
      })

    // 广播变量 避免 driver端和executor端传输序列化问题
    val redisSink: Broadcast[RedisSink] = {
      sc.broadcast(RedisSink(ConfigUtil.get("redis.hostAndPort"), ConfigUtil.get("redis.password")))
    }

    val kafkaSinkBro: Broadcast[KafkaSink] =
      KafkaSink.getKafkaProducer(sc, ConfigUtil.bootstrapServers)

    // 写入kafka逻辑
    if (sendTopic != null) {
      val sendDS: DStream[JSONObject] =
        tranJsonDS.repartition(ConfigUtil.sendRepartition)
      sendDS.foreachRDD {
        rdd => {
          rdd.foreachPartition {
            jsonitr => {
              val list: util.List[JSONObject] = jsonitr.toList.asJava
              kafkaSinkBro.value.sendData(list, sendTopic)
              var num = 0
              if (!list.isEmpty) {
                num = list.size()
              }

              // 集群模式
            /*  val jedisCluster: JedisCluster = redisSink.value.jedisCluster
              jedisCluster.incrBy(topic + "_" + sdf.format(new Date()), num)
              logger.info(
                "redis 记录成功！key:" + topic + "_" + sdf.format(new Date()) + ",value:" + num)
            */}
          }
        }
      }
    }

    // 保存batch的offset
    var offsetRanges = Array[OffsetRange]()
    // 获取当前DS的消息偏移量
    val transformDS = kafkaTopicDS.foreachRDD {
      rdd =>
        offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
        logger.info("获取当前DS的消息偏移量 = topic = " + offsetRanges.toString)
        // 提交偏移量
        val sqlProxy = new SqlProxy()
        val client = DruidDataSourceUtil.getConnection
        try {
          for (or <- offsetRanges) {
            sqlProxy.executeUpdate(
              client,
              "replace into `labeling_kafka_offset` (groupid,topic,`partition`,untilOffset,consumerIP,update_time) values(?,?,?,?,?,CURRENT_TIMESTAMP)",
              Array(groupId, or.topic, or.partition.toString, or.untilOffset, ConfigUtil.nodeIp)
            )
            logger.info(
              "保存偏移量 = topic = " + or.topic + " partition = " + or.partition + " fromoffset = " + or.fromOffset + " offset = " + or.untilOffset)
          }
        } catch {
          case e: Exception =>
            e.printStackTrace()
            logger.error(e.toString)
        } finally {
          sqlProxy.shutdown(client)
        }
    }

    ssc.start()
    ssc.awaitTermination()

  }

}
