package cn.tecnova.analysis

import java.sql.{Connection, DriverManager, PreparedStatement, ResultSet}
import java.text.SimpleDateFormat
import java.util.{Date, UUID}

import cn.tecnova.bean.NlpJsonBean
import cn.tecnova.utils.ConfigHandler
import com.google.gson.Gson
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import org.apache.spark.SparkConf
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}

import scala.util.control.Breaks

/**
  * description:用户自定义风险库_分析
  **/
object DiyRelationArticleV2 {

//  Logger.getLogger("org").setLevel(Level.ERROR)

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
      .setAppName(this.getClass.getSimpleName)
      .setMaster("local[*]")
      .set("spark.streaming.stopGracefullyOnShutdown", "true")

    val ssc = new StreamingContext(conf, Seconds(2))

    val con: Connection = DriverManager.getConnection(ConfigHandler.url, ConfigHandler.user, ConfigHandler.passwd)
    val mediaStat = con.prepareStatement(
        """
          select
          id,name
          from media_type
        """.stripMargin)

    val mediaTypeSet: ResultSet = mediaStat.executeQuery()
    var mediaList = List[(String, String)]()
    while (mediaTypeSet.next()) {
      val mediaTypeId = mediaTypeSet.getString("id")
      val mediaTypeName = mediaTypeSet.getString("name")
      mediaList :+= (mediaTypeId, mediaTypeName)
    }

    val map: Map[String, String] = mediaList.toMap
    //将媒体字典文件广播
    val mediaDic = ssc.sparkContext.broadcast(map)

    val data: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent, // 将拉取到的数据，均匀分散到每台Executor节点上
      ConsumerStrategies.Subscribe[String, String](Array(ConfigHandler.topic), ConfigHandler.kafkaParams("g_234"))
    )

    //将json数据转换成case class
    val jsonDS: DStream[NlpJsonBean] = data.map(re => {
      val gson = new Gson()
      gson.fromJson(re.value(), classOf[NlpJsonBean])
    })

    jsonDS.foreachRDD(rdd => {

      val session = SparkSession.builder().config(rdd.sparkContext.getConf).getOrCreate()
      val dataFrame: DataFrame = session.createDataFrame(rdd)


      dataFrame.foreachPartition(iter => {

        var conn: Connection = null;
        var wordGroup: PreparedStatement = null;
        var information: PreparedStatement = null;

        try {

          conn = DriverManager.getConnection(ConfigHandler.url, ConfigHandler.user, ConfigHandler.passwd)

          //查询用户自定义词组表  看词组的启用和禁用状态，只选择启动run_flag=1，未删除del_flag=0的记录
          wordGroup = conn.prepareStatement(
            """
              select
              id,user_id,group_name
              from diy_keyword_group
              where run_flag = 1 and del_flag = 0
            """.stripMargin)
          val wordGroupSet: ResultSet = wordGroup.executeQuery()

          iter.foreach(row => {
            println("-----------")

            //json数据源数据
            //媒体类型
            val mediaCls = row.getAs[String]("media_cls")
            //文章id
            val articleId = row.getAs[String]("uuid")
            //站点重要度
            val importantLeavel = row.getAs[String]("important_leavel")
            //站点网址
            val siteUrl = row.getAs[String]("site_url")
            //站点名称
            val siteName = row.getAs[String]("site_name")
            //文章标题提取
            val articleTitle: String = row.getAs[String]("article_title")
            //文章内容
            val articleContent: String = row.getAs[String]("article_content")
            //作者信息
            val articleAuthor = row.getAs[String]("article_author")
            //发布时间
            val articlePubdate = row.getAs[String]("article_pubdate")

            //遍历用户自定义风险词组表：diy_keyword_group
            while (wordGroupSet.next()) {

              val userId: String = wordGroupSet.getString("user_id")
              val groupId: String = wordGroupSet.getString("id")
              val groupName: String = wordGroupSet.getString("group_name")

              //词组信息表
              information = conn.prepareStatement(
                """
                  select
                  id,userid,filter_type,filter_key_site,region_words,subject_words,event_words,
                  exclusion_words,contain_urls,exclusion_urls,site_type
                  from diy_keyword_lines
                  where del_flag = 0 and group_id = ?
                """.stripMargin)

              information.setString(1, groupId)

              val wordInfoSet = information.executeQuery()

              //遍历筛选条件
              while (wordInfoSet.next()) {

                val groupLineId = wordInfoSet.getString("id")

                //媒体类型
                val siteTpyeArr: Array[String] = wordInfoSet.getString("site_type").split(",")

                //文章站点重要度
                val filterKeySite = wordInfoSet.getInt("filter_key_site")

                //判断站点网址
                //val siteUrl = wordInfoSet.getString("site_url")
                //禁止网站
                val exclusionUrls: Array[String] = wordInfoSet.getString("exclusion_urls").split("|")
                //包含网站
                val containUrls: Array[String] = wordInfoSet.getString("contain_urls").split("|")
                //地域关键词
                val regionWords = wordInfoSet.getString("region_words")
                //主题关键词
                val subjectWords = wordInfoSet.getString("subject_words")
                //时间关键词
                val eventWords = wordInfoSet.getString("event_words")
                //排除词
                val exclusionWords: Array[String] = wordInfoSet.getString("exclusion_words").split(",")

                //判断匹配类型  仅标题？ 仅正文？ 标题或正文？
                val filterType: Int = wordInfoSet.getInt("filter_type")

                //筛选逻辑
                //文章的（media_cls）媒体类型是否包含在用户自定义筛选条件的媒体类型site_type中
                if (siteTpyeArr.contains(mediaCls)) {
                  //这篇文章是要的
                  //文章的（important_leavel）站点重要度是否是用户自定义的站点重要度filter_key_site
                  if (importantLeavel.toInt == filterKeySite) {

                    //自定义的包含网站内容contain_urls 包含siteurl  自定义的禁止网站内容exclusion_urls 不包含siteurl
                    if (containUrls.contains(siteUrl) && !exclusionUrls.contains(siteUrl)) {

                      //排除词判断
                      var content = ""
                      //判断标题与内容是否是
                      //标题与内容
                      if (0 == filterType) {
                        //内容
                        content = articleTitle + articleContent
                        sendMessToKafka(mediaDic, mediaCls, articleId, siteUrl, siteName, articleTitle, articleAuthor, articlePubdate, userId, groupId, groupName, groupLineId, regionWords, subjectWords, eventWords, exclusionWords, content)

                        //仅标题
                      } else if (1 == filterType) {
                        content = articleTitle
                        sendMessToKafka(mediaDic, mediaCls, articleId, siteUrl, siteName, articleTitle, articleAuthor, articlePubdate, userId, groupId, groupName, groupLineId, regionWords, subjectWords, eventWords, exclusionWords, content)

                        //仅正文
                      } else {
                        content = articleContent
                        sendMessToKafka(mediaDic, mediaCls, articleId, siteUrl, siteName, articleTitle, articleAuthor, articlePubdate, userId, groupId, groupName, groupLineId, regionWords, subjectWords, eventWords, exclusionWords, content)

                      }
                    }
                  }
                }
                //筛选结束的地方
              }
            }
          })

        } catch {

          case e: Exception => e.printStackTrace()

        } finally {

          if (information != null) information.close()
          if (wordGroup != null) wordGroup.close()
          if (conn != null) conn.close()

        }
      })
    })

    ssc.start()
    ssc.awaitTermination()
  }

  /**
    * 数据存入kafka
    *
    * @param mediaDic
    * @param mediaCls
    * @param articleId
    * @param siteUrl
    * @param siteName
    * @param articleTitle
    * @param articleAuthor
    * @param articlePubdate
    * @param userId
    * @param groupId
    * @param groupName
    * @param groupLineId
    * @param regionWords
    * @param subjectWords
    * @param eventWords
    * @param exclusionWords
    * @param content
    * @return
    */
  private def sendMessToKafka(mediaDic: Broadcast[Map[String, String]], mediaCls: String, articleId: String, siteUrl: String, siteName: String, articleTitle: String, articleAuthor: String, articlePubdate: String, userId: String, groupId: String, groupName: String, groupLineId: String, regionWords: String, subjectWords: String, eventWords: String, exclusionWords: Array[String], content: String) = {
    //判断内容是否包含排除词
    val flag: Boolean = contentIsExclusion(exclusionWords, content)

    //如果不包含排除词
    if (!flag) {

      //自定义词组命中的词
      val hitWords: String = isNotContainsWordsAndGetHit(regionWords, subjectWords, eventWords, content)

      //包含一个组合
      if (""!=hitWords) {

        //文章按条件筛选完成
        val productor: KafkaProducer[String, String] = new KafkaProducer[String, String](ConfigHandler.props)
        val id = UUID.randomUUID().toString.replaceAll("-", "")
        val update_time = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date)

        val value = id + "," + userId + "," + groupId + "," + groupName + "," + groupLineId + "," + articleId + "" +
          "," + siteName + "," + mediaDic.value.getOrElse(mediaCls, mediaCls) + "," + articleTitle + "," + articleAuthor +
          "," + siteUrl + "," + "" + "," + articlePubdate + "," + hitWords + "," + "" + "," + "" + "," + "" + "," + "" + "," +
          "" + "," + "" + "," + "" + "," + "" + "," + "" + "," + "" + update_time

        productor.send(new ProducerRecord[String, String]("diy_relation_article", value))

      }
    }
  }

  /**
    * 判断是否包含关键词
    *
    * @param regionWords  地域关键词
    * @param subjectWords 主体关键词
    * @param eventWords   事件关键词
    * @param content      文章或标题内容
    * @return 是否包含
    */
  private def isNotContainsWordsAndGetHit(regionWords: String, subjectWords: String, eventWords: String, content: String) = {

    val regionSplited = regionWords.split(",")
    val subjectSplited = subjectWords.split(",")
    val eventSplited = eventWords.split(",")

    var hitWords = ""
    val loop = new Breaks;

    //为了跳出循环
      loop.breakable {

      //遍历三三组合，判断是否同时满足
      for (region <- regionSplited) {
        for (subject <- subjectSplited) {
          for (event <- eventSplited) {
            //如果有一组包含
            if (content.contains(region) && content.contains(subject) && content.contains(event)) {

              hitWords = region + "," + subject + "," + event
              //跳出循环
              loop.break

            }
          }
        }
      }
    }
    hitWords
    }
    /**
    * 判断内容是否包含排除词
    *
    * @param exclusionWords 排除词数组
    * @param content        内容是文章或是标题或是标题与文章
    */
  private def contentIsExclusion(exclusionWords: Array[String], content: String) = {

    var exclusionList = List[Boolean]()
    for (exclusionWord: String <- exclusionWords) {
      //如果用户自定义内容包含排除词    这篇文章不要
      val f = content.contains(exclusionWord)
      exclusionList :+= f
    }
    //包含排除词
    val bool = if (exclusionList.contains(true)) false else true

    bool

  }
}
