package cn.tecnova.analysis

import java.text.SimpleDateFormat
import java.util.{Date, UUID}

import cn.tecnova.bean.{BaSubjectNlp, BaSubjectNlpRes, BaVolume}
import cn.tecnova.utils.{BroadcastKafkaProducer, ConfigHandler}
import com.alibaba.fastjson.{JSON, JSONObject}
import com.google.gson.Gson
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
  * description:用户数据量分析信息库_分析
  **/
object BaVolumeAnalysis {

  //屏蔽日志
  Logger.getLogger("org").setLevel(Level.ERROR)

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
      .setAppName(this.getClass.getSimpleName)
      //      .setMaster("local[*]")
//      .set("spark.streaming.kafka.maxRatePerPartition", args(0))
      .set("spark.streaming.stopGracefullyOnShutdown", "true")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .registerKryoClasses(Array(classOf[BaVolume]))

    val ssc = new StreamingContext(conf, Seconds(args(0).toInt))

    val topics: Array[String] = ConfigHandler.baVolumeAnalysisTopic.split(",")

    val groupid = "g_bavolumeanalysisV5"

    //获取取kafka上的流数据
    val datas: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent, //将拉取到的数据，均匀分散到每台Executor节点上
      ConsumerStrategies.Subscribe[String, String](topics, ConfigHandler.kafkaParams(groupid))
    )

    //kafka生产者
    val kafkaBro = ssc.sparkContext.broadcast(BroadcastKafkaProducer[String, String](ConfigHandler.kafkaProps))

    datas.foreachRDD(rdd => {

      //获取当前批次偏移量信息
      val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

      //过滤出日采集量
      val nlpArticleEmotionNum: Long = rdd.filter(record => {
        val jsonObj: JSONObject = JSON.parseObject(record.value())
        val topicName = jsonObj.getString("topic_name")
        "nlp_article_emotion".equals(topicName)
      }).coalesce(1).count()

      val useridAndIndex: RDD[(String, List[Int])] = rdd.coalesce(4).map(record => {

        val jsonObj: JSONObject = JSON.parseObject(record.value())
        val topicName = jsonObj.getString("topic_name")
        var userId = ""
        var baUserRelationArticleNum = 0
        var baPublicSentimentNum = 0
        var baPublicSentimentAllNum = 0

        //日入库数据
        if ("ba_user_relation_article".equals(topicName)) {
          baUserRelationArticleNum = 1
          userId = jsonObj.getString("user_id")
        }

        //日有效数据
        if ("ba_public_sentiment".equals(topicName)) {
          baPublicSentimentAllNum = 1
          val typ: String = jsonObj.getString("public_sentiment_type")
          userId = jsonObj.getString("user_id")
          //日预警数据
          if ("0".equals(typ)) {
            baPublicSentimentNum = 1
            userId = jsonObj.getString("user_id")
          }
        }
        (userId, List[Int](baUserRelationArticleNum, 0, baPublicSentimentNum, baPublicSentimentAllNum))
      })

      //过滤掉无效数据（"",List(0,0,0,0)）
      val notEmptyTuple = useridAndIndex.filter("" != _._1)

      //进行相同用户的聚合操作
      val userAndList: RDD[(String, List[Int])] = notEmptyTuple.reduceByKey((tp1, tp2) => tp1 zip tp2 map (tup => tup._1 + tup._2))


      //整理数据格式
      val result: RDD[String] = userAndList.map(tp => {
        val id = UUID.randomUUID().toString.replaceAll("-", "")
        val update_time = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date)
        val volume: BaVolume = BaVolume(id, tp._1, tp._2(0).toString, nlpArticleEmotionNum.toString, tp._2(2).toString, tp._2(3).toString, update_time, "ba_volume_analysis")
        val gson = new Gson()
        val value: String = gson.toJson(volume)
        value
      })

      result.foreach(str => {
        //将数据写到kafka
        println(str)
        kafkaBro.value.send("ba_volume_analysis", UUID.randomUUID().toString.replaceAll("-", ""), str)
      })

      //提交偏移量信息
      datas.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)
    })

    ssc.start()
    ssc.awaitTermination()

  }
}
