package cn.tecnova.analysis

import java.util.UUID

import cn.tecnova.bean.{BaAnalysisBean, BaArticle}
import cn.tecnova.utils.{BroadcastKafkaProducer, ConfigHandler}
import com.google.gson.Gson
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
  * description:用户相关日报文章库_分析
  *
  * 输入：业务分析流-用户相关文章库（ba_user_relation_article）
  * 分析过程：判断情感为负面的或命中的关键词平均权重大于3 的。
  * 输出：用户相关日报文章库（ba_daily_report）
  **/
object BaDailyReport {

  //屏蔽日志
  Logger.getLogger("org").setLevel(Level.ERROR)

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
      .setAppName(this.getClass.getSimpleName)
      //      .setMaster("local[*]")
      .set("spark.streaming.kafka.maxRatePerPartition", args(0))
      .set("spark.streaming.stopGracefullyOnShutdown", "true")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .registerKryoClasses(Array(classOf[BaAnalysisBean], classOf[BaArticle]))

    val ssc = new StreamingContext(conf, Seconds(args(1).toInt))

    //消费者组id
    val groupid = "g_badailyreport"

    //获取取kafka上的流数据
    val datas: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent, //将拉取到的数据，均匀分散到每台Executor节点上
      ConsumerStrategies.Subscribe[String, String](Array("ba_user_relation_article"), ConfigHandler.kafkaParams(groupid))
    )

    //kafka生产者
    val kafkaBro = ssc.sparkContext.broadcast(BroadcastKafkaProducer[String, String](ConfigHandler.kafkaProps))

    datas.foreachRDD(rdd => {

      //获取当前批次偏移量信息
      val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

      rdd.foreachPartition(iter => {

        val gson = new Gson()

        iter.foreach(record => {

          //将gson数据封装成类
          val baAnalysisBean: BaAnalysisBean = gson.fromJson(record.value(), classOf[BaAnalysisBean])

          //判断情感为负面的或命中的关键词平均权重大于3的
          if ("负面".equals(baAnalysisBean.nlp_emotion) || baAnalysisBean.baflow_rule_hitwords_weight > "3") {

            //var baAnalysisBean: BaAnalysisBean = gson.fromJson(record.value(), classOf[BaAnalysisBean])

            //改变topicname的值，转成json字符串
            baAnalysisBean.topic_name = "ba_daily_report"
            val value = gson.toJson(baAnalysisBean)

            val baArticleVal = gson.toJson(BaArticle(baAnalysisBean.uuid, baAnalysisBean.article_title, baAnalysisBean.article_content, baAnalysisBean.nlp_emotion, "ba_article"))
            //将数据写到kafka
            kafkaBro.value.send("ba_daily_report", UUID.randomUUID().toString.replaceAll("-", ""), value)
            kafkaBro.value.send("ba_article", UUID.randomUUID().toString.replaceAll("-", ""), baArticleVal)

          }

        })

      })

      //提交偏移量信息
      datas.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)
    })

    ssc.start()
    ssc.awaitTermination()

  }

}
