package cn.tecnova.analysis

import java.util.UUID

import cn.tecnova.bean._
import cn.tecnova.utils.{BroadcastKafkaProducer, ConfigHandler}
import com.alibaba.fastjson.{JSON, JSONObject}
import com.google.gson.Gson
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.log4j.{Level, Logger}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SQLContext, SparkSession}
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}
import org.elasticsearch.spark.rdd.EsSpark

/**
  * description:用户相关文章NLP分析结果库_分析
  **/
object BaArticleNlpAnalysis {

  //屏蔽日志
  Logger.getLogger("org").setLevel(Level.ERROR)

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
      .setAppName(this.getClass.getSimpleName)
      //      .setMaster("local[*]")
      .set("spark.streaming.kafka.maxRatePerPartition", args(0)) //每个分区每秒拉取500条数据
      .set("spark.streaming.stopGracefullyOnShutdown", "true")
      .set("es.nodes", ConfigHandler.esNodes)
      .set("es.port", ConfigHandler.esPort)
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .registerKryoClasses(Array(classOf[NlpArticleOther],classOf[BaAnalysisBean],classOf[BaArticleNlpAnalysisRes]))

    val sc = new SparkContext(conf)

    val sQLContext = new SQLContext(sc)

    import sQLContext.implicits._

    val ssc = new StreamingContext(sc, Seconds(args(1).toInt))

    val groupid = "g_baarticlenlpanalysisV3"

    //获取取kafka上的流数据
    val datas: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent, //将拉取到的数据，均匀分散到每台Executor节点上
      ConsumerStrategies.Subscribe[String, String](Array("nlp_article_other"), ConfigHandler.kafkaParams(groupid))
    )

    //kafka生产者
    val kafkaBro = ssc.sparkContext.broadcast(BroadcastKafkaProducer[String, String](ConfigHandler.kafkaProps))

    //ba_user_relation_article  user_id and uuid
    val userRelationArticleDF: DataFrame = EsSpark.esJsonRDD(sc, "ba_user_relation_article/ba_user_relation_article").map(_._2).map(json=>{
      val jsonObj: JSONObject = JSON.parseObject(json)
      val userId = jsonObj.getString("user_id")
      val uuid = jsonObj.getString("uuid")
      UserRelationArticleBro(userId,uuid)
    }).toDF()

    val UserRABro: Broadcast[DataFrame] = sc.broadcast(userRelationArticleDF)


    datas.foreachRDD(rdd => {

      //获取当前批次偏移量信息
      val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

      val session = SparkSession.builder().config(conf).getOrCreate()
      import session.sqlContext.implicits._

      //将kafka json数据封装成类
      val nlpArticleOther: RDD[NlpArticleOther] = rdd.map(record => {
        JSON.parseObject(record.value(), classOf[NlpArticleOther])
      })

      //需要用到articleId
      val nlpArticleOtherDF: DataFrame = nlpArticleOther.toDF()
      val baUserRelationArticle: DataFrame = UserRABro.value

      val result: DataFrame = nlpArticleOtherDF.join(baUserRelationArticle).where(nlpArticleOtherDF("article_id") === baUserRelationArticle("uuid"))

      result.map(row => {
        val gson = new Gson()
        val id = UUID.randomUUID().toString.replaceAll("-", "")
        val res: BaArticleNlpAnalysisRes = BaArticleNlpAnalysisRes(id,row.getAs[String]("user_id"), row.getAs[String]("article_id"), row.getAs[String]("hot_words"), row.getAs[String]("sentiment_analysis"), row.getAs[String]("summary"), row.getAs[String]("money"), row.getAs[String]("person"), row.getAs[String]("organization"), row.getAs[String]("area"), row.getAs[String]("percent"), row.getAs[String]("number"), row.getAs[String]("nlp_time"), row.getAs[String]("nlp_date"), row.getAs[String]("score"), row.getAs[String]("lda"), row.getAs[String]("lda_context"), row.getAs[String]("lda_sort"), row.getAs[String]("clustering_type"), row.getAs[String]("relation_words"), row.getAs[String]("update_time"), "ba_article_nlp_analysis")
        gson.toJson(res)
      }).foreach (js => {
        kafkaBro.value.send("ba_article_nlp_analysis", UUID.randomUUID().toString.replaceAll("-", ""), js)
        println("数据写到kafka:" + js)
      })

      //删除广播变量
//      if(null!=UserRABro){
//
//        UserRABro.unpersist(true)
//        println("广播变量更新成功")
//
//      }

      //提交偏移量信息
      datas.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)

    })

    ssc.start()
    ssc.awaitTermination()

  }

}
