package com.sunzm.spark.sql.diff

import com.alibaba.fastjson.JSON
import org.apache.commons.lang3.StringUtils
import org.apache.log4j.{Level, Logger}
import org.apache.spark.serializer.KryoSerializer
import org.apache.spark.sql.{Dataset, SaveMode, SparkSession}

/**
 *
 * 给定2个文件，找到jiej差异
 *
 * @author Administrator
 * @version 1.0
 * @date 2021-07-19 16:02
 */
object FindDiffJob {
  def main(args: Array[String]): Unit = {
    Logger.getLogger("org").setLevel(Level.WARN)

    val builder: SparkSession.Builder = SparkSession
      .builder()
      .appName(s"${this.getClass.getSimpleName.stripSuffix("$")}")
      .master("local[*]")
      .config("spark.serializer", classOf[KryoSerializer].getName)
      .config("spark.executor.extraJavaOptions", "-XX:+UseConcMarkSweepGC")
      .config("spark.default.parallelism", 8)
      .config("spark.sql.shuffle.partitions", 8)

    val spark: SparkSession = builder.getOrCreate()

    import spark.implicits._

    val leftFileName = "D:/data/spark/diff/call_center_message-2022-01-15.json"
    val rightFileName = "D:/data/spark/diff/call_log_record-2022-01-15.json"
    val filterCompanyId = "ca7338ec98cb42c3a3799624622bff3c"
    val leftMoreFile = "D:/data/spark/diff/leftmore/"
    val rightMoreFile = "D:/data/spark/diff/rightmore/"

    val leftDataDS: Dataset[String] = spark.read.textFile(leftFileName)
    val rightDataDS: Dataset[String] = spark.read.textFile(rightFileName)

    val leftFilteredDS: Dataset[(String, String)] = leftDataDS.filter(line => {
      var res = false

      try {
        val jSONObject = JSON.parseObject(line)

        val companyId = jSONObject.getString("companyId")

        val callid = jSONObject.getString("callid")
        //val callRecordId = jSONObject.getIntValue("callRecordId")

        //if (StringUtils.equals(filterCompanyId, companyId) && callDirection == 0 && callFlag == 1) {
        if (StringUtils.equals(filterCompanyId, companyId)) {
          res = true
        }
      } catch {
        case _ => {
          println(s"JSON解析异常: ${line}")
        }
      }

      res
    }).map(line => {
      val jSONObject = JSON.parseObject(line)
      val id = jSONObject.getString("callid")

      (id, line)
    }).toDF("id", "line").as[(String, String)]

    val rightFilteredDS: Dataset[(String, String)] = rightDataDS.filter(line => {
      var res = false

      try {

        val jSONObject = JSON.parseObject(line)
        val companyId = jSONObject.getString("companyId")

        //val fields = StringUtils.splitByWholeSeparatorPreserveAllTokens(line, ",")
        /*val id = StringUtils.trim(StringUtils.replace(fields(1), "\t", ""))
        val companyId = StringUtils.trim(StringUtils.replace(fields(2), "\t", ""))
        val callDirectionStr = StringUtils.trim(StringUtils.replace(fields(9), "\t", ""))
        val callFlagStr = StringUtils.trim(StringUtils.replace(fields(11), "\t", ""))

        if (StringUtils.equals(filterCompanyId, companyId) && StringUtils.equals("呼入", callDirectionStr)
          && StringUtils.equals("已接通", callFlagStr)) {
          res = true
        }*/
        if (StringUtils.equals(filterCompanyId, companyId)) {
          res = true
        }
      } catch {
        case _ => {
          println(s"数据异常: ${line}")
        }
      }

      res
    }).map(line => {
      val jSONObject = JSON.parseObject(line)
      val id = jSONObject.getString("callRecordId")

      (id, line)
    }).toDF("id", "line").as[(String, String)]

    leftFilteredDS.createOrReplaceTempView("v_left")
    rightFilteredDS.createOrReplaceTempView("v_right")

    //求差异,看哪个多
    //左侧多的数据
    spark.sql(
      """
        |SELECT t1.line FROM
        | v_left t1 LEFT JOIN v_right t2
        | ON t1.id = t2.id
        | WHERE t2.id IS NULL
        |""".stripMargin).coalesce(1).write.mode(SaveMode.Overwrite)
      .text(leftMoreFile)

    //右侧多的数据
    spark.sql(
      """
        |SELECT t2.line FROM
        | v_left t1 RIGHT JOIN v_right t2
        | ON t1.id = t2.id
        | WHERE t1.id IS NULL
        |""".stripMargin).coalesce(1).write.mode(SaveMode.Overwrite)
      .text(rightMoreFile)

    spark.close()
  }
}
