package cn.tecnova.analysis

import java.util.UUID

import cn.tecnova.bean._
import cn.tecnova.utils.{BroadcastKafkaProducer, ConfigHandler, ESUtils}
import com.alibaba.fastjson.{JSON, JSONObject}
import com.google.gson.Gson
import org.apache.commons.lang.StringUtils
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SQLContext
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}
import org.elasticsearch.index.query.QueryBuilders
import org.elasticsearch.search.SearchHit

/**
  * description:用户相关文章NLP分析结果库_分析
  **/
object BaArticleNlpAnalysisV2 {

  //屏蔽日志
  Logger.getLogger("org").setLevel(Level.ERROR)

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
      .setAppName(this.getClass.getSimpleName)
      //      .setMaster("local[*]")
      .set("spark.streaming.kafka.maxRatePerPartition", args(0))
      .set("spark.streaming.stopGracefullyOnShutdown", "true")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .registerKryoClasses(Array(classOf[NlpArticleOther],classOf[BaArticleNlpAnalysisRes]))

    val sc = new SparkContext(conf)

    val sQLContext = new SQLContext(sc)

    val ssc = new StreamingContext(sc, Seconds(args(1).toInt))

    //消费者id
    val groupid = "g_baarticlenlpanalysisV3"

    //获取取kafka上的流数据
    val datas: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent, //将拉取到的数据，均匀分散到每台Executor节点上
      ConsumerStrategies.Subscribe[String, String](Array("nlp_article_other"), ConfigHandler.kafkaParams(groupid))
    )

    //kafka生产者
    val kafkaBro = ssc.sparkContext.broadcast(BroadcastKafkaProducer[String, String](ConfigHandler.kafkaProps))


    datas.foreachRDD(rdd => {

      //获取当前批次偏移量信息
      val offsetRanges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

      //如果当前批次有数据，进行以下操作
      if (!rdd.isEmpty()) {

        //检索到有记录的数据后输出到kafka
        rdd.coalesce(1).foreachPartition(iter => {

          //获取es客户端
          val client = ESUtils.getEsClient()
          val gson = new Gson()

          try {

            iter.foreach(record => {

              val jsonObj: JSONObject = JSON.parseObject(record.value())
              //从json数据中拿出所需字段
              val articleId = jsonObj.getString("article_id")
              val hot_words = jsonObj.getString("hot_words")
              val sentiment_analysis = jsonObj.getString("sentiment_analysis")
              val summary = jsonObj.getString("summary")
              val money = jsonObj.getString("money")
              val person = jsonObj.getString("person")
              val organization = jsonObj.getString("organization")
              val area = jsonObj.getString("area")
              val percent = jsonObj.getString("percent")
              val number = jsonObj.getString("number")
              val nlp_time = jsonObj.getString("nlp_time")
              val nlp_date = jsonObj.getString("nlp_date")
              val score = jsonObj.getString("score")
              val lda = jsonObj.getString("lda")
              val lda_context = jsonObj.getString("lda_context")
              val lda_sort = jsonObj.getString("lda_sort")
              val clustering_type = jsonObj.getString("clustering_type")
              val relation_words = jsonObj.getString("relation_words")
              val update_time = jsonObj.getString("update_time")


              //如果articleId不为空，就去es中检索
              if(StringUtils.isNotEmpty(articleId)){

                //用articleId去es索引库中检索数据
                val builder = client.prepareSearch(ConfigHandler.esIndex).setTypes(ConfigHandler.esType)
                val builder1 = QueryBuilders.matchPhraseQuery("uuid", articleId)

                builder.setSize(100)
                val sr = builder.setQuery(QueryBuilders.boolQuery()
                  .must(builder1))
                  .execute()
                  .actionGet()
                val hits: Array[SearchHit] = sr.getHits.getHits

                //es中没有该记录 就写到kafka
                if (hits.length != 0) {

                  println("将数据写到kafka...")
                  //遍历查询出的数据，写到kafka
                  for(hit <- hits){

                    //取出json数据
                    val json = hit.getSourceAsString
                    val jsonobj: JSONObject = JSON.parseObject(json)
                    val userId = jsonobj.getString("user_id")

                    val id = UUID.randomUUID().toString.replaceAll("-", "")
                    //将数据封装成类，转化成json格式
                    val res: BaArticleNlpAnalysisRes = BaArticleNlpAnalysisRes(id,userId, articleId,hot_words,sentiment_analysis,summary,money,person,organization,area,percent,number,nlp_time,nlp_date,score,lda,lda_context,lda_sort,clustering_type,relation_words,update_time,"ba_article_nlp_analysis")
                    val result = gson.toJson(res)
                    //发送到kafka
                    kafkaBro.value.send("ba_article_nlp_analysis", UUID.randomUUID().toString.replaceAll("-", ""), result)
                    println(result)
                  }
                  println("数据写入kafka成功...")
                  println("-------------------------------")

                }

              }

            })

          } catch {

            case e: Exception => e.printStackTrace()

          } finally {

            //释放资源
            if (client != null) client.close()

          }

        })

      }

      //提交偏移量信息
      datas.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)

    })

    ssc.start()
    ssc.awaitTermination()

  }

}
