package cn.tecnova.analysis

import java.util.UUID

import cn.tecnova.bean.{BaSubjectNlp, BaSubjectNlpRes}
import cn.tecnova.utils.{BroadcastKafkaProducer, ConfigHandler}
import com.alibaba.fastjson.JSON
import com.google.gson.Gson
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.log4j.{Level, Logger}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, SQLContext, SparkSession}
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
  * description:用到mysql舆情事件表表（public_sentiment_event）
  **/
object BaSubjectNlpAnalysis4 {

  Logger.getLogger("org").setLevel(Level.ERROR)

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
      .setAppName(this.getClass.getSimpleName)
      //      .setMaster("local[*]")
      .set("spark.streaming.kafka.maxRatePerPartition", args(0))
      .set("spark.streaming.stopGracefullyOnShutdown", "true")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .registerKryoClasses(Array(classOf[BaSubjectNlp],classOf[BaSubjectNlpRes]))

    val sc = new SparkContext(conf)
    val sQLContext = new SQLContext(sc)
    import sQLContext.implicits._
    val ssc = new StreamingContext(sc, Seconds(args(1).toInt))

    //消费者组id
    val groupid = "g_basubjectnlpanalysis4"

    //获取kafka上的流数据
    val datas: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent, //将拉取到的数据，均匀分散到每台Executor节点上
      ConsumerStrategies.Subscribe[String, String](Array("nlp_subject_analysis"), ConfigHandler.kafkaParams(groupid))
    )

    //kafka生产者
    val kafkaBro = ssc.sparkContext.broadcast(BroadcastKafkaProducer[String, String](ConfigHandler.kafkaProps))

    //读取数据库相关的表
    val publiSentimentEventDf: DataFrame = sQLContext.read.jdbc(ConfigHandler.url, "public_sentiment_event", ConfigHandler.props).select("id","user_id")
    val publiSentimentEventDfBro = sc.broadcast(publiSentimentEventDf)

    datas.foreachRDD(rdd => {

      //获取当前批次偏移量信息
      val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

      if (!rdd.isEmpty()) {

        //将json字符串转成对应的类dataframe  并且过滤出subject_type 等于3的数据
        val nlpSubjectAnalysisDf: DataFrame = rdd.map(record => {
          JSON.parseObject(record.value(), classOf[BaSubjectNlp])
        }).filter(ba => {
          "3".equals(ba.subject_type)
        }).toDF()

        //将读取到的dataframe都注册成临时表
        nlpSubjectAnalysisDf.createOrReplaceTempView("nlpSubjectAnalysisDf")
        publiSentimentEventDfBro.value.createOrReplaceTempView("publiSentimentEventDf")


        //第四层
        val sql4 =
          """
            |select
            |user_id,subject_id,subject_name,subject_type,sentiment_analysis,hot_words,positive_word_cloud,negative_word_cloud,relation_graph,update_time
            |from
            |nlpSubjectAnalysisDf t1,publiSentimentEventDf t2
            |where t1.subject_id = t2.id
          """.stripMargin

        val fourDf = sQLContext.sql(sql4).map(row => {
          val gson = new Gson()
          val id = UUID.randomUUID().toString.replaceAll("-", "")
          val baSubjectNlp: BaSubjectNlpRes = BaSubjectNlpRes(id,row.getAs[String]("user_id"), row.getAs[String]("subject_id"), row.getAs[String]("subject_name"), row.getAs[String]("subject_type"), row.getAs[String]("sentiment_analysis"), row.getAs[String]("hot_words"), row.getAs[String]("positive_word_cloud"), row.getAs[String]("negative_word_cloud"), row.getAs[String]("relation_graph"), row.getAs[String]("update_time"), "ba_subject_nlp_analysis")
          val json = gson.toJson(baSubjectNlp)
          json
        })

        fourDf.foreach(js => {
          kafkaBro.value.send("ba_subject_nlp_analysis", UUID.randomUUID().toString.replaceAll("-", ""), js)
          println(js)
        })

      }

      //删除广播变量
      if(publiSentimentEventDfBro!=null){
        publiSentimentEventDfBro.unpersist(true)
        println("广播变量更新成功")
      }

      //提交偏移量信息
      datas.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)

    })
    ssc.start()
    ssc.awaitTermination()

  }

}
