package cn.tecnova.Synchronous

import java.util.Date

import cn.tecnova.bean.{BaSubjectNlp, BaseFlowBean, NlpArticleOther, NlpJsonBean}
import cn.tecnova.utils.{BaseFlowtopicUtils, ConfigHandler}
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SaveMode}
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}

/**
  * description:基础事务流kafka数据存储ES Hive
  **/
object BaseflowTopic2ESAndHive {

  //  System.setProperty("HADOOP_USER_NAME", "root")
  Logger.getLogger("org").setLevel(Level.ERROR)

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
      .setAppName(this.getClass.getSimpleName)
      //      .setMaster("local[*]")
      .set("spark.streaming.kafka.maxRatePerPartition", args(0))
      .set("spark.streaming.stopGracefullyOnShutdown", "true")
      .set("es.index.auto.create", "true")
      .set("es.nodes", ConfigHandler.esNodes)
      .set("es.port", ConfigHandler.esPort)
      .set("es.nodes.wan.only", "true")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .registerKryoClasses(Array(classOf[BaseFlowBean]))

    val sc = new SparkContext(conf)

    val hiveContext = new HiveContext(sc)

    //导入隐士转换
    import hiveContext.implicits._

    val ssc = new StreamingContext(sc, Seconds(args(1).toInt))

    //基础事务流kafka数据
    val baseFlowArr: Array[String] = ConfigHandler.baseFlowtopic.split(",")

    val groupid = "g_baseflowtopic2esandhive2"

    //所有的kafka topic
    val allData: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent, // 将拉取到的数据，均匀分散到每台Executor节点上
      ConsumerStrategies.Subscribe[String, String](baseFlowArr, ConfigHandler.kafkaParams(groupid))
    )

    allData.foreachRDD(rdd => {

      //导入写es相关的包
      import org.elasticsearch.spark.sql._

      //获取当前批次偏移量信息
      val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

      if (!rdd.isEmpty()) {

        //基础事务流  BaseFlowBean
        val baseflowArea: RDD[BaseFlowBean] = BaseFlowtopicUtils.getBaseFlowBeanRDD(rdd, "baseflow_area").coalesce(10)
        if (!baseflowArea.isEmpty()) {
          val baseflowAreaDF: DataFrame = baseflowArea.toDF().cache()
          //写数据到hive
          //          hiveContext.sql("use test")
          //          hiveContext.sql("create table if not exists baseflow_area (site_id String,uuid String,site_url String,site_name String,main_id String,site_cls String,enterprise_scale_cls String,customer_cls String,media_cls String,content_cls String,important_leavel String,industry_cls String,language_cls String,area_cls String,site_province String,site_city String,site_district String,article_url String,gmt_create String,article_html String,code String,article_like String,article_channel String,article_forword String,template_source String,domain String,weight String,schedule String,article_source String,article_view String,article_reply String,article_title String,article_content String,article_author String,article_pubdate String,nlp_emotion String,baseflow_hit_keywords String,baseflow_type String,topic_name String)ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'")
          val stime = new Date().getTime
          baseflowAreaDF.write.mode(SaveMode.Append).insertInto("test.baseflow_area")
          println("baseflow_area写hive所需时间：" + (new Date().getTime - stime))
          //val etime = new Date().getTime
          //写数据到es
          //baseflowAreaDF.saveToEs("baseflow_area" + "/baseflow_area")
          //println("baseflow_area写es所需时间：" + (new Date().getTime - etime))
        }

        val baseflowCompany: RDD[BaseFlowBean] = BaseFlowtopicUtils.getBaseFlowBeanRDD(rdd, "baseflow_company").coalesce(10)
        if (!baseflowCompany.isEmpty()) {
          val baseflowCompanyDF: DataFrame = baseflowCompany.toDF().cache()
          //          hiveContext.sql("use test")
          //          hiveContext.sql("create table if not exists baseflow_company (site_id String,uuid String,site_url String,site_name String,main_id String,site_cls String,enterprise_scale_cls String,customer_cls String,media_cls String,content_cls String,important_leavel String,industry_cls String,language_cls String,area_cls String,site_province String,site_city String,site_district String,article_url String,gmt_create String,article_html String,code String,article_like String,article_channel String,article_forword String,template_source String,domain String,weight String,schedule String,article_source String,article_view String,article_reply String,article_title String,article_content String,article_author String,article_pubdate String,nlp_emotion String,baseflow_hit_keywords String,baseflow_type String,topic_name String)ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'")
          val stime = new Date().getTime
          baseflowCompanyDF.write.mode(SaveMode.Append).insertInto("test.baseflow_company")
          println("baseflow_company写hive所需时间：" + (new Date().getTime - stime))
          //val etime = new Date().getTime
          //baseflowCompanyDF.saveToEs("baseflow_company" + "/baseflow_company")
          //println("baseflow_company写es所需时间：" + (new Date().getTime - etime))
        }

        val baseflowPerson: RDD[BaseFlowBean] = BaseFlowtopicUtils.getBaseFlowBeanRDD(rdd, "baseflow_person").coalesce(10)
        if (!baseflowPerson.isEmpty()) {
          val baseflowPersonDF: DataFrame = baseflowPerson.toDF().cache()
          //          hiveContext.sql("use test")
          //          hiveContext.sql("create table if not exists baseflow_person (site_id String,uuid String,site_url String,site_name String,main_id String,site_cls String,enterprise_scale_cls String,customer_cls String,media_cls String,content_cls String,important_leavel String,industry_cls String,language_cls String,area_cls String,site_province String,site_city String,site_district String,article_url String,gmt_create String,article_html String,code String,article_like String,article_channel String,article_forword String,template_source String,domain String,weight String,schedule String,article_source String,article_view String,article_reply String,article_title String,article_content String,article_author String,article_pubdate String,nlp_emotion String,baseflow_hit_keywords String,baseflow_type String,topic_name String)ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'")
          val stime = new Date().getTime
          baseflowPersonDF.write.mode(SaveMode.Append).insertInto("test.baseflow_person")
          println("baseflow_person写hive所需时间：" + (new Date().getTime - stime))
          //val etime = new Date().getTime
          //baseflowPersonDF.saveToEs("baseflow_person" + "/baseflow_person")
          //println("baseflow_person写es所需时间：" + (new Date().getTime - etime))
        }
        /*
                val baseflowGov: RDD[BaseFlowBean] = BaseFlowtopicUtils.getBaseFlowBeanRDD(rdd, "baseflow_gov").coalesce(10).cache()
                if (!baseflowGov.isEmpty()) {
                  val baseflowGovDF: DataFrame = baseflowGov.toDF()
        //          hiveContext.sql("use test")
        //          hiveContext.sql("create table if not exists baseflow_gov (site_id String,uuid String,site_url String,site_name String,main_id String,site_cls String,enterprise_scale_cls String,customer_cls String,media_cls String,content_cls String,important_leavel String,industry_cls String,language_cls String,area_cls String,site_province String,site_city String,site_district String,article_url String,gmt_create String,article_html String,code String,article_like String,article_channel String,article_forword String,template_source String,domain String,weight String,schedule String,article_source String,article_view String,article_reply String,article_title String,article_content String,article_author String,article_pubdate String,nlp_emotion String,baseflow_hit_keywords String,baseflow_type String,topic_name String)ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'")
                  baseflowGovDF.write.mode(SaveMode.Append).insertInto("test.baseflow_gov")
                  baseflowGovDF.saveToEs("baseflow_gov" + "/baseflow_gov")
                }


                val baseflowSchool: RDD[BaseFlowBean] = BaseFlowtopicUtils.getBaseFlowBeanRDD(rdd, "baseflow_school").coalesce(10).cache()
                if (!baseflowSchool.isEmpty()) {
                  val baseflowSchoolDF: DataFrame = baseflowSchool.toDF()
        //          hiveContext.sql("use test")
        //          hiveContext.sql("create table if not exists baseflow_school (site_id String,uuid String,site_url String,site_name String,main_id String,site_cls String,enterprise_scale_cls String,customer_cls String,media_cls String,content_cls String,important_leavel String,industry_cls String,language_cls String,area_cls String,site_province String,site_city String,site_district String,article_url String,gmt_create String,article_html String,code String,article_like String,article_channel String,article_forword String,template_source String,domain String,weight String,schedule String,article_source String,article_view String,article_reply String,article_title String,article_content String,article_author String,article_pubdate String,nlp_emotion String,baseflow_hit_keywords String,baseflow_type String,topic_name String)ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'")
                  baseflowSchoolDF.write.mode(SaveMode.Append).insertInto("test.baseflow_school")
                  baseflowSchoolDF.saveToEs("baseflow_school" + "/baseflow_school")
                }


                val baseflowCd: RDD[BaseFlowBean] = BaseFlowtopicUtils.getBaseFlowBeanRDD(rdd, "baseflow_cd").coalesce(10).cache()
                if (!baseflowCd.isEmpty()) {
                  val baseflowCdDF: DataFrame = baseflowCd.toDF()
        //          hiveContext.sql("use test")
        //          hiveContext.sql("create table if not exists baseflow_cd (site_id String,uuid String,site_url String,site_name String,main_id String,site_cls String,enterprise_scale_cls String,customer_cls String,media_cls String,content_cls String,important_leavel String,industry_cls String,language_cls String,area_cls String,site_province String,site_city String,site_district String,article_url String,gmt_create String,article_html String,code String,article_like String,article_channel String,article_forword String,template_source String,domain String,weight String,schedule String,article_source String,article_view String,article_reply String,article_title String,article_content String,article_author String,article_pubdate String,nlp_emotion String,baseflow_hit_keywords String,baseflow_type String,topic_name String)ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'")
                  baseflowCdDF.write.mode(SaveMode.Append).insertInto("test.baseflow_cd")
                  baseflowCdDF.saveToEs("baseflow_cd" + "/baseflow_cd")
                }


                val baseflowStock: RDD[BaseFlowBean] = BaseFlowtopicUtils.getBaseFlowBeanRDD(rdd, "baseflow_stock").coalesce(10).cache()
                if (!baseflowStock.isEmpty()) {
                  val baseflowStockDF: DataFrame = baseflowStock.toDF()
        //          hiveContext.sql("use test")
        //          hiveContext.sql("create table if not exists baseflow_stock (site_id String,uuid String,site_url String,site_name String,main_id String,site_cls String,enterprise_scale_cls String,customer_cls String,media_cls String,content_cls String,important_leavel String,industry_cls String,language_cls String,area_cls String,site_province String,site_city String,site_district String,article_url String,gmt_create String,article_html String,code String,article_like String,article_channel String,article_forword String,template_source String,domain String,weight String,schedule String,article_source String,article_view String,article_reply String,article_title String,article_content String,article_author String,article_pubdate String,nlp_emotion String,baseflow_hit_keywords String,baseflow_type String,topic_name String)ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'")
                  baseflowStockDF.write.mode(SaveMode.Append).insertInto("test.baseflow_stock")
                  baseflowStockDF.saveToEs("baseflow_stock" + "/baseflow_stock")
                }

                val baseflowFinancial: RDD[BaseFlowBean] = BaseFlowtopicUtils.getBaseFlowBeanRDD(rdd, "baseflow_financial").coalesce(10).cache()
                if (!baseflowFinancial.isEmpty()) {
                  val baseflowFinancialDF: DataFrame = baseflowFinancial.toDF()
        //          hiveContext.sql("use test")
        //          hiveContext.sql("create table if not exists baseflow_financial (site_id String,uuid String,site_url String,site_name String,main_id String,site_cls String,enterprise_scale_cls String,customer_cls String,media_cls String,content_cls String,important_leavel String,industry_cls String,language_cls String,area_cls String,site_province String,site_city String,site_district String,article_url String,gmt_create String,article_html String,code String,article_like String,article_channel String,article_forword String,template_source String,domain String,weight String,schedule String,article_source String,article_view String,article_reply String,article_title String,article_content String,article_author String,article_pubdate String,nlp_emotion String,baseflow_hit_keywords String,baseflow_type String,topic_name String)ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'")
                  baseflowFinancialDF.write.mode(SaveMode.Append).insertInto("test.baseflow_financial")
                  baseflowFinancialDF.saveToEs("baseflow_financial" + "/baseflow_financial")
                }

                val baseflowNgo: RDD[BaseFlowBean] = BaseFlowtopicUtils.getBaseFlowBeanRDD(rdd, "baseflow_ngo").coalesce(10).cache()
                if (!baseflowNgo.isEmpty()) {
                  val baseflowNgoDF: DataFrame = baseflowNgo.toDF()
        //          hiveContext.sql("use test")
        //          hiveContext.sql("create table if not exists baseflow_ngo (site_id String,uuid String,site_url String,site_name String,main_id String,site_cls String,enterprise_scale_cls String,customer_cls String,media_cls String,content_cls String,important_leavel String,industry_cls String,language_cls String,area_cls String,site_province String,site_city String,site_district String,article_url String,gmt_create String,article_html String,code String,article_like String,article_channel String,article_forword String,template_source String,domain String,weight String,schedule String,article_source String,article_view String,article_reply String,article_title String,article_content String,article_author String,article_pubdate String,nlp_emotion String,baseflow_hit_keywords String,baseflow_type String,topic_name String)ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'")
                  baseflowNgoDF.write.mode(SaveMode.Append).insertInto("test.baseflow_ngo")
                  baseflowNgoDF.saveToEs("baseflow_ngo" + "/baseflow_ngo")
                }*/

      }

      //提交偏移量信息
      allData.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)
    })

    ssc.start()
    ssc.awaitTermination()

  }

}
