import java.util.UUID

import cn.tecnova.bean.{BaSubjectNlp, BaSubjectNlpRes}
import cn.tecnova.utils.{BroadcastKafkaProducer, ConfigHandler}
import com.google.gson.Gson
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{DataFrame, Dataset, SQLContext}
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}

/**
  * description:主体表（user_subject）
  **/
object BaSubjectNlpAnalysis1V2 {

  Logger.getLogger("org").setLevel(Level.ERROR)

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
      .setAppName(this.getClass.getSimpleName)
      //      .setMaster("local[*]")
      .set("spark.streaming.kafka.maxRatePerPartition", args(0))
      .set("spark.streaming.stopGracefullyOnShutdown", "true")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .registerKryoClasses(Array(classOf[BaSubjectNlp],classOf[BaSubjectNlpRes]))

    val sc = new SparkContext(conf)

    val sqlContext = new SQLContext(sc)
    //导入隐式转换
    import sqlContext.implicits._

    val ssc = new StreamingContext(sc, Seconds(args(1).toInt))

    //读取数据库相关的表  并广播到每个executor
    val userSubjectDf: DataFrame = sqlContext.read.jdbc(ConfigHandler.url, "user_subject", ConfigHandler.props).select("user_id", "subject_type", "id").toDF("user_id","subject_type_1","id")
    val userSubBro = sc.broadcast(userSubjectDf)

    //消费者组id
    val groupid = "g_basubjectnlpanalysis1"

    //获取kafka上的流数据
    val datas: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent, //将拉取到的数据，均匀分散到每台Executor节点上
      ConsumerStrategies.Subscribe[String, String](Array("nlp_subject_analysis"), ConfigHandler.kafkaParams(groupid))
    )

    //kafka生产者
    val kafkaBro = ssc.sparkContext.broadcast(BroadcastKafkaProducer[String, String](ConfigHandler.kafkaProps))

    datas.foreachRDD(rdd => {

      //获取当前批次偏移量信息
      val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

      if (!rdd.isEmpty()) {

        //将json字符串转成对应的类dataframe 并且过滤出subject_type = 1或者是2的数据
        val nlpSubjectAnalysisDf: DataFrame = rdd.map(record => {
          val gson = new Gson()
          gson.fromJson(record.value(), classOf[BaSubjectNlp])
        }).filter(ba => {
          Array("1", "2").contains(ba.subject_type)
        }).toDF()

        val broUserSubDf: DataFrame = userSubBro.value

        //将读取到的dataframe都注册成临时表
        broUserSubDf.createOrReplaceTempView("userSubjectDf")
        nlpSubjectAnalysisDf.createOrReplaceTempView("nlpSubjectAnalysisDf")

        //第一层
        val sql1 =
          """
            |select
            |user_id,subject_id,subject_name,subject_type,sentiment_analysis,hot_words,positive_word_cloud,negative_word_cloud,relation_graph,update_time
            |from nlpSubjectAnalysisDf t1, userSubjectDf t2
            |where t1.subject_type = t2.subject_type_1 and t1.subject_id = t2.id
          """.stripMargin
        val join: Dataset[String] = sqlContext.sql(sql1).map(row => {
          val gson = new Gson()
          val id = UUID.randomUUID().toString.replaceAll("-", "")
          val baSubjectNlp: BaSubjectNlpRes = BaSubjectNlpRes(id, row.getAs[String]("user_id"), row.getAs[String]("subject_id"), row.getAs[String]("subject_name"), row.getAs[String]("subject_type"), row.getAs[String]("sentiment_analysis"), row.getAs[String]("hot_words"), row.getAs[String]("positive_word_cloud"), row.getAs[String]("negative_word_cloud"), row.getAs[String]("relation_graph"), row.getAs[String]("update_time"), "ba_subject_nlp_analysis")
          val json = gson.toJson(baSubjectNlp)
          json
        })

        //数据写到kafka
        join.foreach(js => {
          kafkaBro.value.send("ba_subject_nlp_analysis", UUID.randomUUID().toString.replaceAll("-", ""), js)
          println(js)
          println("数据写入kafka成功")
        })
      }

      //删除广播变量
      if(userSubBro!=null){
        userSubBro.unpersist(true)
        println("广播变量更新成功")
      }

      //提交偏移量信息
      datas.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)

    })

    ssc.start()
    ssc.awaitTermination()

  }

}
