package cn.tecnova.test

import cn.tecnova.bean.{BaAnalysisBean, BaseFlowBean, NlpJsonBean}
import cn.tecnova.utils.{BaseFlowtopicUtils, BatopicUtils, ConfigHandler}
import com.alibaba.fastjson.{JSON, JSONObject}
import com.google.gson.Gson
import org.apache.commons.lang.StringUtils
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SaveMode}
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}

/**
  * description:baseflow_area kafka数据写到ES
  **/
object BaseflowArea2ES {

  //  System.setProperty("HADOOP_USER_NAME", "root")

  Logger.getLogger("org").setLevel(Level.ERROR)

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
      .setAppName(this.getClass.getSimpleName)
      .setMaster("local[*]")
      .set("spark.streaming.kafka.maxRatePerPartition", "200")
      .set("spark.streaming.stopGracefullyOnShutdown", "true")
    //      .set("es.index.auto.create", "true")
    //      .set("es.nodes", "172.17.22.15")
    //      .set("es.port", "9600")
    //      .set("es.nodes.wan.only","true")

    val sc = new SparkContext(conf)

    val hiveContext = new HiveContext(sc)

    val ssc = new StreamingContext(sc, Seconds(args(0).toInt))

    //所有的kafka topic
    val allData: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent, // 将拉取到的数据，均匀分散到每台Executor节点上
      ConsumerStrategies.Subscribe[String, String](Array("nlp_article_emotion"), ConfigHandler.kafkaParams("g_baseflowarea2hive"))
    )

    allData.foreachRDD(rdd => {

      import hiveContext.implicits._
      import org.elasticsearch.spark.sql._

      /*val baseflowSchool: RDD[BaseFlowBean] = BaseFlowtopicUtils.getBaseFlowBeanRDD(rdd, "nlp_article_emotion")
      if (!baseflowSchool.isEmpty()) {
        val baseflowSchoolDF: DataFrame = baseflowSchool.toDF()
        //hiveContext.sql("create table if not exists baseflow_school (site_id String,uuid String,site_url String,site_name String,main_id String,site_cls String,enterprise_scale_cls String,customer_cls String,media_cls String,content_cls String,important_leavel String,industry_cls String,language_cls String,area_cls String,site_province String,site_city String,site_district String,article_url String,gmt_create String,article_html String,code String,article_like String,article_channel String,article_forword String,template_source String,domain String,weight String,schedule String,article_source String,article_view String,article_reply String,article_title String,article_content String,article_author String,article_pubdate String,nlp_emotion String,baseflow_hit_keywords String,topic_name String)")
        //baseflowSchoolDF.registerTempTable("baseflowSchoolDF")
        baseflowSchoolDF.write.mode(SaveMode.Append).insertInto("baseflow_school")
        //hiveContext.sql("insert into baseflow_school select * from baseflowSchoolDF")
//        baseflowSchoolDF.saveToEs("baseflow_area" + "/baseflow_area")
      }*/

      val nlpArticleEmotion: RDD[NlpJsonBean] = rdd.map(record => {
        val gson = new Gson()
        val jsonObj: JSONObject = JSON.parseObject(record.value())
        val nlpJsonBean: NlpJsonBean = gson.fromJson(record.value(), classOf[NlpJsonBean])

        if (StringUtils.isNotEmpty(jsonObj.getString("article_html"))) {
          val html = jsonObj.getString("article_html").replaceAll("\\s+", "")
          nlpJsonBean.article_html = html
        }
        if (StringUtils.isNotEmpty(jsonObj.getString("article_content"))) {
          val content = jsonObj.getString("article_content").replaceAll("\\s+", "")
          nlpJsonBean.article_content = content
        }
        nlpJsonBean
      })

      if(!nlpArticleEmotion.isEmpty()){

        hiveContext.sql("create table if not exists nlp_article_other (article_id String,hot_words String,sentiment_analysis String,summary String,money String,person String,organization String,area String,percent String,number String,nlp_time String,nlp_date String,score String,lda String,lda_context String,lda_sort String,clustering_type String,relation_words String,update_time String,topic_name String)ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'")

        nlpArticleEmotion.toDF().write.mode(SaveMode.Append).insertInto("baseflow_school")

      }

    })

    ssc.start()
    ssc.awaitTermination()

  }

}
