package cn.tecnova.analysis

import java.sql.{Connection, DriverManager, PreparedStatement, ResultSet}
import java.text.SimpleDateFormat
import java.util.{Date, UUID}

import cn.tecnova.bean.NlpJsonBean
import cn.tecnova.utils.ConfigHandler
import com.google.gson.Gson
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}

import scala.util.control.Breaks

/**
  * description:舆情事件相关文章_分析
  * map
  **/
object EventRelationArticleV2 {

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
      .setAppName(this.getClass.getSimpleName)
      .setMaster("local[*]")
      .set("spark.streaming.stopGracefullyOnShutdown", "true")

    val ssc = new StreamingContext(conf, Seconds(2))

    val con: Connection = DriverManager.getConnection(ConfigHandler.url, ConfigHandler.user, ConfigHandler.passwd)
    val mediaStat = con.prepareStatement(
      """
        select
        id,name
        from media_type
      """.stripMargin)

    val mediaTypeSet: ResultSet = mediaStat.executeQuery()
    var mediaList = List[(String, String)]()
    while (mediaTypeSet.next()) {
      val mediaTypeId = mediaTypeSet.getString("id")
      val mediaTypeName = mediaTypeSet.getString("name")
      mediaList :+= (mediaTypeId, mediaTypeName)
    }

    val map: Map[String, String] = mediaList.toMap
    //将媒体字典文件广播
    val mediaDic = ssc.sparkContext.broadcast(map)

    val data: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent, // 将拉取到的数据，均匀分散到每台Executor节点上
      ConsumerStrategies.Subscribe[String, String](Array(ConfigHandler.topic), ConfigHandler.kafkaParams("g_432"))
    )

    //将json数据转换成class
    val jsonDS: DStream[NlpJsonBean] = data.map(re => {
      val gson = new Gson()
      gson.fromJson(re.value(), classOf[NlpJsonBean])
    })




    jsonDS.foreachRDD(rdd => {
      val session = SparkSession.builder().config(rdd.sparkContext.getConf).getOrCreate()
      //将rdd装换成dataframe
      val dataFrame: DataFrame = session.createDataFrame(rdd)

      import session.implicits._

      var value = ""

      val res: Dataset[String] = dataFrame.map(row => {

        var conn: Connection = null;
        var sentimentEvent: PreparedStatement = null;

        try {

          conn = DriverManager.getConnection(ConfigHandler.url, ConfigHandler.user, ConfigHandler.passwd)

          sentimentEvent = conn.prepareStatement(
            """
              select
              id,user_id,article_id,article_title,keywords
              from public_sentiment_event
              where run_flag = 1 and del_flag = 0 and start_time <= ? and end_time >= ?
            """.stripMargin)

          val currentTime = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date)
          sentimentEvent.setString(1, currentTime)
          sentimentEvent.setString(2, currentTime)
          val sentimentEventSet: ResultSet = sentimentEvent.executeQuery()

          //文章id
          val articleId = row.getAs[String]("uuid")
          //站点名称
          val siteName = row.getAs[String]("site_name")
          //媒体类型
          val mediaCls = row.getAs[String]("media_cls")
          //文章标题提取
          val articleTitle: String = row.getAs[String]("article_title")
          //作者信息
          val articleAuthor = row.getAs[String]("article_author")
          //站点网址
          val siteUrl = row.getAs[String]("site_url")
          //发布时间
          val articlePubdate = row.getAs[String]("article_pubdate")
          //文章内容
          val articleContent: String = row.getAs[String]("article_content")

          //遍历
          while (sentimentEventSet.next()) {
            //userId
            val userId: String = sentimentEventSet.getString("user_id")
            //事件ID
            val eventId: String = sentimentEventSet.getString("article_id")
            //事件名称
            val eventName = sentimentEventSet.getString("article_title")

            //取出keywords分组
            val keyWords: Array[String] = sentimentEventSet.getString("keywords").split(",")

            val loop = new Breaks
            var hitWords = ""

            //为了跳出循环
            loop.breakable {
              //拿出一组数据判断是否都包含
              for (group <- keyWords) {

                var wordList = List[Int]()
                val words: Array[String] = group.split(" ")

                //判断每个词是否包含在文章
                for (w <- words) {
                  val flag: Int = if (articleContent.contains(w)) 1 else 0
                  wordList :+= flag
                }
                //表示都包含,则跳出循环
                if (!wordList.contains(0)) {

                  hitWords = group.replaceAll(" ", ",")

                  val id = UUID.randomUUID().toString.replaceAll("-", "")
                  val update_time = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date)


                  value = value +"--" +  id + "," + userId + "," + eventId + "," + eventName + "," + articleId + "," + siteName + "," + mediaDic.value.getOrElse(mediaCls, mediaCls) +
                    "," + articleTitle + "," + articleAuthor + "," + siteUrl + "," + "" + "," + articlePubdate + "," + hitWords +
                    "" + "," + "" + "," + "" + "," + "" + "," + "" + "," + "" + "," + "" + "," + "" + "," + "" + "," + "" + update_time


                  //跳出循环
                  loop.break()
                }

              }
            }

          }

        } catch {

          case e: Exception => e.printStackTrace()

        } finally {
          if (sentimentEvent != null) sentimentEvent.close()
          if (conn != null) conn.close()
        }
        value
      })


      val resultDS: Dataset[String] = res.flatMap(_.split("--"))


      resultDS.show()


    })

    ssc.start()
    ssc.awaitTermination()

  }

}
