package cn.tecnova.Synchronous

import java.util.Date

import cn.tecnova.bean.{BaSubjectNlp, NlpArticleOther, NlpJsonBean, NlpSubjectAnalysis}
import cn.tecnova.utils.ConfigHandler
import com.alibaba.fastjson.{JSON, JSONObject}
import com.google.gson.Gson
import org.apache.commons.lang.StringUtils
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SaveMode}
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}

/**
  * description:nlp kafka数据存储ES Hive
  **/
object NLPtopic2ESAndHive {

  //  System.setProperty("HADOOP_USER_NAME", "root")
  Logger.getLogger("org").setLevel(Level.ERROR)

  def main(args: Array[String]): Unit = {


    val conf = new SparkConf()
      .setAppName(this.getClass.getSimpleName)
      //      .setMaster("local[*]")
      .set("spark.streaming.kafka.maxRatePerPartition", args(0))
      .set("spark.streaming.stopGracefullyOnShutdown", "true")
      .set("es.index.auto.create", "true")
      .set("es.nodes", ConfigHandler.esNodes)
      .set("es.port", ConfigHandler.esPort)
      .set("es.nodes.wan.only", "true")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .registerKryoClasses(Array(classOf[NlpJsonBean],classOf[NlpArticleOther],classOf[BaSubjectNlp]))

    val sc = new SparkContext(conf)

    val hiveContext = new HiveContext(sc)

    //导入隐士转换
    import hiveContext.implicits._

    val ssc = new StreamingContext(sc, Seconds(args(1).toInt))

    //nlp kafka数据
    val nlpArr: Array[String] = ConfigHandler.NLPtopic.split(",")

    val groupid = "g_nlptopic2esandhive4"

    //所有的kafka topic
    val allData: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent, // 将拉取到的数据，均匀分散到每台Executor节点上
      ConsumerStrategies.Subscribe[String, String](nlpArr, ConfigHandler.kafkaParams(groupid))
    )

    allData.foreachRDD(rdd => {

      //导入写es相关的包
      import org.elasticsearch.spark.sql._

      //获取当前批次偏移量信息
      val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

      if (!rdd.isEmpty()) {

        //NLP
        //nlp_article_emotion
        val nlpArticleEmotion: RDD[NlpJsonBean] = rdd.filter(record => {
          val jsonObj: JSONObject = JSON.parseObject(record.value())
          val tName = jsonObj.getString("topic_name")
          "nlp_article_emotion".equals(tName)
        }).coalesce(10).map(record => {
          val jsonObj: JSONObject = JSON.parseObject(record.value())
          val nlpJsonBean: NlpJsonBean = JSON.parseObject(record.value(), classOf[NlpJsonBean])
          if (StringUtils.isNotEmpty(jsonObj.getString("article_html"))) {
            val html = jsonObj.getString("article_html").replaceAll("\\s+", "")
            nlpJsonBean.article_html = html
          }
          if (StringUtils.isNotEmpty(jsonObj.getString("article_content"))) {
            val content = jsonObj.getString("article_content").replaceAll("\\s+", "")
            nlpJsonBean.article_content = content
          }
          nlpJsonBean
        })

        //nlp_article_other
        val nlpArticleOther: RDD[NlpArticleOther] = rdd.filter(record => {
          val jsonObj: JSONObject = JSON.parseObject(record.value())
          val tName = jsonObj.getString("topic_name")
          "nlp_article_other".equals(tName)
        }).coalesce(10).map(record => {
          val nlpArticleOther: NlpArticleOther = JSON.parseObject(record.value(), classOf[NlpArticleOther])
          nlpArticleOther
        })

        //nlp_subject_analysis
        val nlpSubjectAnalysis: RDD[BaSubjectNlp] = rdd.filter(record => {
          val jsonObj: JSONObject = JSON.parseObject(record.value())
          val tName = jsonObj.getString("topic_name")
          "nlp_subject_analysis".equals(tName)
        }).coalesce(10).map(record => {
          val nlpSubjectAnalysis: BaSubjectNlp = JSON.parseObject(record.value(), classOf[BaSubjectNlp])
          nlpSubjectAnalysis
        })


        if (!nlpSubjectAnalysis.isEmpty()) {
          val nlpSubjectAnalysisDF: DataFrame = nlpSubjectAnalysis.toDF()
          //将数据写入hive
          //          hiveContext.sql("use test")
          //          hiveContext.sql("create table if not exists nlp_subject_analysis (subject_id String,subject_name String,subject_type String,sentiment_analysis String,hot_words String,positive_word_cloud String,negative_word_cloud String,relation_graph String,update_time String,topic_name String)ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'")
          val stime = new Date().getTime
          nlpSubjectAnalysisDF.write.mode(SaveMode.Append).insertInto("test.nlp_subject_analysis")
          println("nlps写hive所需时间：" + (new Date().getTime - stime))
//          val etime = new Date().getTime
          //将数据写入es
//          nlpSubjectAnalysisDF.saveToEs("nlp_subject_analysis" + "/nlp_subject_analysis")
//          println("nlps写es所需时间：" + (new Date().getTime - etime))
        }


        if (!nlpArticleEmotion.isEmpty()) {
          val nlpJsonBeanDF: DataFrame = nlpArticleEmotion.toDF()
          //将数据写入hive
          //          hiveContext.sql("use test")
          //          hiveContext.sql("create table if not exists nlp_article_emotion (site_id String,uuid String,site_url String,site_name String,main_id String,site_cls String,enterprise_scale_cls String,customer_cls String,media_cls String,content_cls String,important_leavel String,industry_cls String,language_cls String,area_cls String,site_province String,site_city String,site_district String,article_url String,gmt_create String,article_html String,code String,article_like String,article_channel String,article_forword String,template_source String,domain String,weight String,schedule String,article_source String,article_view String,article_reply String,article_title String,article_content String,article_author String,article_pubdate String,nlp_emotion String,topic_name String)ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'")
          val stime = new Date().getTime
          nlpJsonBeanDF.write.mode(SaveMode.Append).insertInto("test.nlp_article_emotion")
          println("nlpe写hive所需时间：" + (new Date().getTime - stime))
//          val etime = new Date().getTime
          //将数据写入es
//          nlpJsonBeanDF.saveToEs("nlp_article_emotion" + "/nlp_article_emotion")
//          println("nlpe写es所需时间：" + (new Date().getTime - etime))

        }

        if (!nlpArticleOther.isEmpty()) {
          val nlpArticleOtherDF: DataFrame = nlpArticleOther.toDF()
          //将数据写入hive
          //          hiveContext.sql("use test")
          //          hiveContext.sql("create table if not exists nlp_article_other (article_id String,hot_words String,sentiment_analysis String,summary String,money String,person String,organization String,area String,percent String,number String,nlp_time String,nlp_date String,score String,lda String,lda_context String,lda_sort String,clustering_type String,relation_words String,update_time String,topic_name String)ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'")
          val stime = new Date().getTime
          nlpArticleOtherDF.write.mode(SaveMode.Append).insertInto("test.nlp_article_other")
          println("nlpo写hive所需时间：" + (new Date().getTime - stime))
//          val etime = new Date().getTime
          //将数据写入es
//          nlpArticleOtherDF.saveToEs("nlp_article_other" + "/nlp_article_other")
//          println("nlpo写es所需时间：" + (new Date().getTime - etime))
        }

      }

      //提交偏移量信息
      allData.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)

    })

    ssc.start()
    ssc.awaitTermination()

  }

}
