package com.sunzm.spark.sql.filter

import com.alibaba.fastjson.{JSON, JSONObject}
import com.sunzm.common.utils.ParameterTool
import org.apache.commons.lang3.StringUtils
import org.apache.commons.lang3.time.DateFormatUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.log4j.{Level, Logger}
import org.apache.spark.serializer.KryoSerializer
import org.apache.spark.sql.{Dataset, SparkSession}

/**
 * chatMsg消息临时过滤
 *
 * spark-submit \
 * --master spark://192.168.30.178:7077 \
 * --class com.sunzm.spark.sql.filter.ChatMsgTmpFilterJob \
 * --deploy-mode cluster \
 * --executor-memory 4G  \
 * --total-executor-cores 18 \
 * --executor-cores 3  \
 * --driver-memory 4G  \
 * Spark-Job/spark-demo-1.0.jar \
 * --master spark://192.168.30.178:7077 \
 * --companyId a9a33a54e6374c778185cbdbbcd87c02 \
 * --hdfsServer 192.168.30.178:8020,192.168.30.55:8020 \
 * --readFile "/data/data_center/cus/chatMsg/chatMsg-2021-06-{0*,10*,11*,12*,13*,14*,15*}" \
 * --savePath "/tmp/sunzm/robot-msg/2021-06/" \
 * --robotIds "1,2,4,5,7"
 *
 * @author Administrator
 * @version 1.0
 * @date 2021-06-15 14:04
 */
object ChatMsgTmpFilterJob {
  def main(args: Array[String]): Unit = {

    val params = ParameterTool.fromArgs(args)

    //格式： 192.168.30.178:8020,192.168.30.55:8020
    val hdfsServer: String = params.getRequired("hdfsServer")
    //格式： /data/data_center/cus/chatMsg/chatMsg-2021-05-{15*,16*,17*,18*,19*,2*,3*}
    val readFile: String = params.getRequired("readFile")

    //开始判断哪个NameNode是active状态
    val hdfsServerArray: Array[String] = StringUtils.split(hdfsServer, ",")

    //格式： hdfs://192.168.30.178:8020
    var hdfsURI = "hdfs://"
    var fs: FileSystem = null
    var isOk = false
    hdfsServerArray.foreach(line => {
      try {
        //完整的URI长度应该大于10
        if (StringUtils.length(hdfsURI) <= 10) {
          val hdfsIpPort = StringUtils.trim(line)

          val path: Path = new Path(s"${hdfsURI}${hdfsIpPort}")

          val hadoopConf = new Configuration()

          fs = path.getFileSystem(hadoopConf)

          val hdfsReadFile = s"${hdfsURI}${hdfsIpPort}${readFile}"

          val existPath = StringUtils.substring(hdfsReadFile, 0, hdfsReadFile.lastIndexOf("/") + 1)

          if (fs.exists(new Path(existPath))) {

            hdfsURI = s"${hdfsURI}${hdfsIpPort}"

            isOk = true
          }

        }
      } catch {
        case e: Throwable => {
          println(s"当前NameNode状态不正常: ${line}")
          e.printStackTrace()
        }
      }

    })

    fs.close()

    processFunc(args)(hdfsURI, readFile)
  }

  def processFunc(args: Array[String])(hdfsURI: String, readFile: String) = {
    println(s"使用的hdfsURI: ${hdfsURI}")

    val params = ParameterTool.fromArgs(args)

    //获取必须的参数
    val queryCompanyId: String = params.getRequired("companyId")

    val savePath: String = params.getRequired("savePath")

    //下面的参数如果不传，就给一个默认值

    Logger.getLogger("org").setLevel(Level.WARN)

    val builder: SparkSession.Builder = SparkSession
      .builder()
      .appName(s"${this.getClass.getSimpleName.stripSuffix("$")}")
      .config("spark.serializer", classOf[KryoSerializer].getName)
      .config("spark.executor.extraJavaOptions", "-XX:+UseConcMarkSweepGC")
      .config("spark.default.parallelism", 8)
      .config("spark.sql.shuffle.partitions", 8)

    if (params.has("master")) {
      builder.master(params.getString("master"))
    }

    if (params.has("appName")) {
      builder.appName(params.getString("appName"))
    }

    val spark: SparkSession = builder.getOrCreate()

    import spark.implicits._

    val dataDS: Dataset[String] = spark.read.textFile(s"${hdfsURI}${readFile}")

    //先过滤出需要的那个公司的数据(filter)
    val filterDS: Dataset[String] = dataDS.filter(line => {
      var res = false

      try {
        val jSONObject = JSON.parseObject(line)

        val companyId = jSONObject.getString("companyId")

        val typeInt = jSONObject.getIntValue("type")

        if (StringUtils.equals(queryCompanyId, companyId) && (typeInt == 5 || typeInt == 10)) {
          res = true
        }
      } catch {
        case _ => {
          println(s"JSON解析异常: ${line}")
        }
      }

      res

    })

    filterDS.cache()

    //过滤不满足要求的数据
    //再进行格式转换（map）：过滤掉不需要的字段（map）
    filterDS.filter(line => {
      var res = false

      try {
        val jSONObject = JSON.parseObject(line)

        val msg: String = StringUtils.trim(jSONObject.getString("msg"))

        val typeInt: Int = jSONObject.getIntValue("type")
        //0访客; 1机器人;  2人工客服
        val senderType: Int = jSONObject.getIntValue("senderType")
        val receiverType: Int = jSONObject.getIntValue("receiverType")

        var robotObject = jSONObject.getJSONObject("robot")

        if (robotObject == null) {
          robotObject = new JSONObject
        }

        //所需数据：机器人会话数据，用户输入的内容部分。
        if (typeInt == 5 && senderType == 0 && receiverType == 1 && StringUtils.isNotBlank(msg)
          && StringUtils.length(msg) > 1) {
          if (!StringUtils.startsWithIgnoreCase(msg, "<img") && !StringUtils.startsWithIgnoreCase(msg, "<audio")
            && !StringUtils.startsWithIgnoreCase(msg, "<div") && !StringUtils.startsWithIgnoreCase(msg, "<css")
            && !StringUtils.startsWithIgnoreCase(msg, "<html")) {
            res = true
          }
        }

      } catch {
        case _ => {
          println(s"JSON解析异常: ${line}")
        }
      }

      res
    }).map(line => {

      val jSONObject = JSON.parseObject(line)

      val msg: String = StringUtils.trim(jSONObject.getString("msg"))

      val cid = jSONObject.getString("cid")
      val senderName = jSONObject.getString("senderName")
      val receiverName = jSONObject.getString("receiverName")
      val dateTime = jSONObject.getLongValue("t")
      val sendTime = DateFormatUtils.format(dateTime, "yyyy-MM-dd HH:mm:ss")

      (cid, senderName, receiverName, sendTime, msg)
    }).toDF("cid", "senderName", "receiverName", "sendTime", "msg")
      .createOrReplaceTempView("t1")

    filterDS.filter(line => {

      val jSONObject = JSON.parseObject(line)

      val typeInt: Int = jSONObject.getIntValue("type")

      typeInt == 10

    }).map(line => {
      val jSONObject = JSON.parseObject(line)

      val cid = jSONObject.getString("cid")

      val partnerId = jSONObject.getString("partnerId")

      (cid, partnerId)
    }).toDF("cid", "partnerId")
      .createOrReplaceTempView("t2")

    val resultDF = spark.sql(
      """
        | SELECT t1.*, t2.partnerId FROM t1 JOIN t2 ON t1.cid = t2.cid
        |""".stripMargin)

    resultDF.coalesce(1).write.mode("overwrite").json(s"${hdfsURI}${savePath}")

    spark.close()
  }
}
