package cn.tecnova.test

import java.util.Date

import cn.tecnova.bean.{BaSubjectNlp, NlpArticleOther, NlpJsonBean}
import cn.tecnova.utils.ConfigHandler
import com.alibaba.fastjson.{JSON, JSONObject}
import com.google.gson.Gson
import org.apache.commons.lang.StringUtils
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.spark.sql.SaveMode
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010._

/**
  * description:
  * Rabcheng
  * Date:2019/5/7 19:49
  **/
object NLPEmo2EsHive {

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
      .setAppName(this.getClass.getSimpleName)
      //      .setMaster("local[*]")
      .set("spark.streaming.kafka.maxRatePerPartition", args(0))
      .set("spark.streaming.stopGracefullyOnShutdown", "true")
      .set("es.index.auto.create", "true")
      .set("es.nodes", ConfigHandler.esNodes)
      .set("es.port", "9600")
      .set("es.nodes.wan.only", "true")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .registerKryoClasses(Array(classOf[NlpJsonBean]))

    val sc = new SparkContext(conf)

    val hiveContext = new HiveContext(sc)

    //导入隐士转换
    import hiveContext.implicits._

    val ssc = new StreamingContext(sc, Seconds(args(1).toInt))

    //nlp kafka数据
    val nlpArr: Array[String] = ConfigHandler.NLPtopic.split(",")

    val groupid = "g_ggg"

    //所有的kafka topic
    val allData: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent, // 将拉取到的数据，均匀分散到每台Executor节点上
      ConsumerStrategies.Subscribe[String, String](Array("nlp_article_emotion"), ConfigHandler.kafkaParams(groupid))
    )

    allData.foreachRDD(rdd => {

      //导入写es相关的包
      import org.elasticsearch.spark.sql._

      //获取当前批次偏移量信息
      val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

      val res = rdd.map(record => {
        val gson = new Gson()
        val jsonObj: JSONObject = JSON.parseObject(record.value())
        val nlpJsonBean: NlpJsonBean = gson.fromJson(record.value(), classOf[NlpJsonBean])
        if (StringUtils.isNotEmpty(jsonObj.getString("article_html"))) {
          val html = jsonObj.getString("article_html").replaceAll("\\s+", "")
          nlpJsonBean.article_html = html
        }
        if (StringUtils.isNotEmpty(jsonObj.getString("article_content"))) {
          val content = jsonObj.getString("article_content").replaceAll("\\s+", "")
          nlpJsonBean.article_content = content
        }
        nlpJsonBean
      })

      if (!res.isEmpty()) {

        val frame = res.toDF()
        val stime = new Date().getTime
        frame.write.mode(SaveMode.Append).insertInto("test.nlp_article_emotion")
        println("nlpe写hive所需时间：" + (new Date().getTime - stime))
        val etime = new Date().getTime
        //将数据写入es
        frame.saveToEs("nlp_article_emotion" + "/nlp_article_emotion")
        println("nlpe写es所需时间：" + (new Date().getTime - etime))

      }

      //提交偏移量信息
      allData.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)

    })

    ssc.start()
    ssc.awaitTermination()

  }

}
