package cn.tecnova.test

import cn.tecnova.bean.BaseFlowBean
import cn.tecnova.utils.ConfigHandler
import com.alibaba.fastjson.{JSON, JSONObject}
import com.google.gson.Gson
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SaveMode
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}

/**
  * description:
  **/
object TestHive {
  System.setProperty("HADOOP_USER_NAME", "root")

  Logger.getLogger("org").setLevel(Level.ERROR)
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
      .setAppName(this.getClass.getSimpleName)
      .setMaster("local[*]")
      .set("spark.streaming.kafka.maxRatePerPartition", "200")
      .set("spark.streaming.stopGracefullyOnShutdown", "true")

    val sc = new SparkContext(conf)

    val hiveContext = new HiveContext(sc)

    val ssc = new StreamingContext(sc, Seconds(2))

    //所有的kafka topic
    val allData: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent, // 将拉取到的数据，均匀分散到每台Executor节点上
      ConsumerStrategies.Subscribe[String, String](Array("nlp_article_emotion"), ConfigHandler.kafkaParams("g_baseflowarea2es"))
    )

    allData.foreachRDD(rdd=>{
      import hiveContext.implicits._

      val test: RDD[Testcla] = rdd.map(record => {

        val jsonObj: JSONObject = JSON.parseObject(record.value())
        val html = jsonObj.getString("article_html").replaceAll("\\s+","")
        val htmcontent = jsonObj.getString("article_content").replaceAll("\\s+","")

        val gson = new Gson()
        val baseFlowBean: Testcla = gson.fromJson(record.value(), classOf[Testcla])
        baseFlowBean.article_html = html
        baseFlowBean.article_content = htmcontent
        baseFlowBean
      })

      hiveContext.sql("use test")
//      hiveContext.sql("create table if not exists baseflow_school (site_id String,uuid String,site_url String,site_name String,main_id String,site_cls String,enterprise_scale_cls String,customer_cls String,media_cls String,content_cls String,important_leavel String,industry_cls String,language_cls String,area_cls String,site_province String,site_city String,site_district String,article_url String,gmt_create String,article_html String,code String,article_like String,article_channel String,article_forword String,template_source String,domain String,weight String,schedule String,article_source String,article_view String,article_reply String,article_title String,article_content String,article_author String,article_pubdate String)ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'")
//      test.toDF().registerTempTable("testdf")//36
//      hiveContext.sql("insert into baseflow_school select * from testdf")
      test.toDF().write.mode(SaveMode.Append)insertInto("baseflow_school")
      println("over")

    })

    ssc.start()
    ssc.awaitTermination()

  }

}
