package cn.tecnova.test

import cn.tecnova.bean._
import cn.tecnova.utils.{BatopicUtils, ConfigHandler}
import com.alibaba.fastjson.{JSON, JSONObject}
import com.google.gson.Gson
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}

/**
  * description:
  * Rabcheng
  * Date:2019/5/9 13:42
  **/
object Local2Es {

  Logger.getLogger("org").setLevel(Level.ERROR)

  def main(args: Array[String]): Unit = {


    val conf = new SparkConf()
      .setAppName(this.getClass.getSimpleName)
//      .setMaster("local[*]")
      .set("spark.streaming.kafka.maxRatePerPartition", "10")
      .set("spark.streaming.stopGracefullyOnShutdown", "true")
      .set("es.index.auto.create", "true")
      .set("es.nodes", "192.168.100.3")
      .set("es.port", "9200")
      .set("es.nodes.wan.only", "true")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .registerKryoClasses(Array(classOf[BaAnalysisBean],classOf[BaArticleNlpAnalysisRes]))

    val sc = new SparkContext(conf)

    val context = new SQLContext(sc)

    import context.implicits._

    val ssc = new StreamingContext(sc, Seconds(1))


    val groupid = "g_batopic2esandhive"

    //所有的kafka topic
    val allData: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent, // 将拉取到的数据，均匀分散到每台Executor节点上
      ConsumerStrategies.Subscribe[String, String](Array("ba_user_relation_article", "ba_article_nlp_analysis","ba_topic_mining_article"), ConfigHandler.kafkaParams(groupid))
    )

    allData.foreachRDD(rdd => {

      //导入写es相关的包
      import org.elasticsearch.spark.sql._

      //获取当前批次偏移量信息
      val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

      //ba_user_relation_article
      val baUserRelationArticle: RDD[BaAnalysisBean] = BatopicUtils.getBaseBeanRDD(rdd, "ba_user_relation_article")
      if (!baUserRelationArticle.isEmpty()) {
        val baUserRelationArticleDF: DataFrame = baUserRelationArticle.toDF()
        baUserRelationArticleDF.saveToEs("ba_user_relation_article" + "/ba_user_relation_article")
      }

      //ba_topic_mining_article
      val baTopicMiningArticle: RDD[BaAnalysisBean] = BatopicUtils.getBaseBeanRDD(rdd, "ba_topic_mining_article")
      if (!baTopicMiningArticle.isEmpty()) {
        val baTopicMiningArticleDF: DataFrame = baTopicMiningArticle.toDF()
        //          hiveContext.sql("use test")
        //          hiveContext.sql("create table if not exists ba_topic_mining_article (id String,site_id String,template_source String,uuid String,site_name String,site_url String,domain String,weight String,main_id String,site_cls String,enterprise_scale_cls String,customer_cls String,media_cls String,content_cls String,important_leavel String,industry_cls String,language_cls String,area_cls String,site_province String,site_city String,site_district String,schedule String,article_url String,code  String,article_html String,article_channel String,article_title String,article_author String,article_source String,article_pubdate String,article_view String,article_reply String,article_content String,article_like String,article_forword String,gmt_create String,nlp_emotion String,baseflow_hit_keywords String,baflow_id String,baflow_rule_id String,baflow_dest_table_name String,baflow_rule_src_table_name String,baflow_rule_hitwords String,baflow_rule_hitwords_weight String,user_id String,product_id String,product_name String,industry_id String,industry_name String,area_id String,area_name String,company_id String,company_name String,company_focus_flag String,person_id String,person_name String,person_focus_flag String,public_sentiment_type String,event_id String,event_name String,diy_risk_group_id String,diy_risk_group_name String,diy_risk_rule_id String,topic_deliver_flag String,ba_update_time String,reserve1 String,reserve2 String,reserve3 String,reserve4 String,reserve5 String,baseflow_type String,topic_name String)ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'")
        //          baTopicMiningArticleDF.write.mode(SaveMode.Append).insertInto("test.ba_topic_mining_article")
        baTopicMiningArticleDF.saveToEs("ba_topic_mining_article" + "/ba_topic_mining_article")
      }


      //ba_article_nlp_analysis
      val baArticleNlpAnalysis: RDD[BaArticleNlpAnalysisRes] = rdd.filter(record => {
        val jsonObj: JSONObject = JSON.parseObject(record.value())
        val tName = jsonObj.getString("topic_name")
        "ba_article_nlp_analysis".equals(tName)
      }).map(record => {
        val gson = new Gson()
        val baArticleNlpAnalysis: BaArticleNlpAnalysisRes = gson.fromJson(record.value(), classOf[BaArticleNlpAnalysisRes])
        baArticleNlpAnalysis
      })

      if (!baArticleNlpAnalysis.isEmpty()) {
        val baArticleNlpAnalysisDF: DataFrame = baArticleNlpAnalysis.toDF()
        baArticleNlpAnalysisDF.saveToEs("ba_article_nlp_analysis" + "/ba_article_nlp_analysis")
      }

      //提交偏移量信息
      allData.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)

    })

    ssc.start()
    ssc.awaitTermination()

  }

}
