package cn.tecnova.analysis

import java.sql.{Connection, DriverManager, PreparedStatement, ResultSet, SQLException}
import java.text.SimpleDateFormat
import java.util.{Date, UUID}

import cn.tecnova.bean.{EventRelationArticle, NlpJsonBean}
import cn.tecnova.utils.{BroadcastTools, ConfigHandler}
import com.google.gson.Gson
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, broadcast}
import org.apache.spark.sql.{DataFrame, Dataset, SaveMode, SparkSession}
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}

import scala.util.control.Breaks

/**
  * description:舆情事件相关文章_分析
  * mappartitions
  **/
object EventRelationArticleV3 {

  Logger.getLogger("org").setLevel(Level.ERROR)
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
      .setAppName(this.getClass.getSimpleName)
      .setMaster("local[*]")
      .set("spark.streaming.stopGracefullyOnShutdown", "true")

    val ssc = new StreamingContext(conf, Seconds(2))

    //获取到媒体数据并将数据进行广播
    val mediaMap: Map[String, String] = BroadcastTools.getMediaDic()
    val mediaDic: broadcast.Broadcast[Map[String, String]] = ssc.sparkContext.broadcast(mediaMap)

    //从kafka接收数据
    val data: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent, // 将拉取到的数据，均匀分散到每台Executor节点上
      ConsumerStrategies.Subscribe[String, String](Array(ConfigHandler.topic), ConfigHandler.kafkaParams("g_543"))
    )

    //将json数据转换成class
    val jsonDS: DStream[NlpJsonBean] = data.map(re => {
      val gson = new Gson()
      gson.fromJson(re.value(), classOf[NlpJsonBean])
    })


    jsonDS.foreachRDD(rdd => {
//      val session = SparkSession.builder().config(rdd.sparkContext.getConf).getOrCreate()
//      //将rdd装换成dataframe
//      val dataFrame: DataFrame = session.createDataFrame(rdd)

      //数据之间自定义分隔符
      val separator = "#%$^"
      val value1: RDD[String] = rdd.mapPartitions(iter => {

        var conn: Connection = null
        var sentimentEvent: PreparedStatement = null

        var value = ""
        try {

          conn = DriverManager.getConnection(ConfigHandler.url, ConfigHandler.user, ConfigHandler.passwd)

          val eventRelation: Iterator[String] = iter.map(js => {

            sentimentEvent = conn.prepareStatement(
              """
              select
              id,user_id,article_id,article_title,keywords
              from public_sentiment_event
              where run_flag = 1 and del_flag = 0 and start_time <= ? and end_time >= ?
            """.stripMargin)

            val currentTime = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date)
            sentimentEvent.setString(1, currentTime)
            sentimentEvent.setString(2, currentTime)
            val sentimentEventSet: ResultSet = sentimentEvent.executeQuery()

            //文章id
            val articleId: String = js.uuid
            //站点名称
            val siteName: String = js.site_name
            //媒体类型
            val mediaCls: String = js.media_cls
            //文章标题提取
            val articleTitle: String = js.article_title
            //作者信息
            val articleAuthor: String = js.article_author
            //站点网址
            val siteUrl: String = js.site_url
            //发布时间
            val articlePubdate: String = js.article_pubdate
            //文章内容
            val articleContent: String = js.article_content

            //遍历
            while (sentimentEventSet.next()) {
              //userId
              val userId: String = sentimentEventSet.getString("user_id")
              //事件ID
              val eventId: String = sentimentEventSet.getString("article_id")
              //事件名称
              val eventName = sentimentEventSet.getString("article_title")

              //取出keywords分组
              val keyWords: Array[String] = sentimentEventSet.getString("keywords").split(",")

              val loop = new Breaks
              var hitWords = ""

              //为了跳出循环
              loop.breakable {
                //拿出一组数据判断是否都包含
                for (group <- keyWords) {

                  var wordList = List[Int]()
                  val words: Array[String] = group.split(" ")

                  //判断每个词是否包含在文章
                  for (w <- words) {
                    val flag: Int = if (articleContent.contains(w)) 1 else 0
                    wordList :+= flag
                  }
                  //表示都包含,则跳出循环
                  if (!wordList.contains(0)) {

                    hitWords = group.replaceAll(" ", ",")

                    val sp = "!#%&"
                    val id = UUID.randomUUID().toString
                    val update_time = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date)

                    value = value + separator + id + sp + userId + sp + eventId + sp + eventName + sp + articleId + sp + siteName + sp + mediaDic.value.getOrElse(mediaCls, mediaCls) +
                      sp + articleTitle + sp + articleAuthor + sp + siteUrl + sp + "" + sp + articlePubdate + sp + hitWords + sp +
                      "" + sp + "" + sp + "" + sp + "" + sp + "" + sp + "" + sp + "" + sp + "" + sp + "" + sp + "" + sp + update_time + sp + articleContent + ""

                    //跳出循环
                    loop.break()
                  }
                }
              }
            }
            value
          })
          eventRelation
        } catch {

          case e: Exception => null

        } finally {
//          if (sentimentEvent != null) sentimentEvent.close()
//          if (conn != null) conn.close()
        }

      }).flatMap(_.split(separator))


      value1.map(s => {
        val arr: Array[String] = s.split("!#%&")
        EventRelationArticle(arr)
      }).foreach(println)

      //写入kafka
      /*result.foreachPartition(iter=>{

        val productor: KafkaProducer[String, String] = new KafkaProducer[String, String](ConfigHandler.kafkaProps)
        iter.foreach(cla=>{

          productor.send(new ProducerRecord[String,String]("event_relation_article",UUID.randomUUID().toString.replaceAll("-",""),cla.toString))
        })
      })

      import org.elasticsearch.spark._

      //数据写入hive
      result.write.mode(SaveMode.Append).insertInto("event_relation_article")
      //数据写入es
      result.rdd.saveToEs("event_relation_article")*/

    })

    ssc.start()
    ssc.awaitTermination()

  }

}
