package com.ctbri.manage.compute.scala

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.spark.sql.SparkSession
import org.slf4j.{Logger, LoggerFactory}

import scala.jdk.CollectionConverters.asScalaIteratorConverter
import com.amazon.deequ.VerificationSuite
import com.amazon.deequ.checks.{Check, CheckLevel, CheckStatus}
import com.amazon.deequ.constraints.ConstraintStatus
import org.apache.spark.sql.functions.{length, when}

/**
 * @Author wangxuem
 * @create 2023/3/6 11:55
 */
private case class Item(val id: scala.Long, val productName: _root_.scala.Predef.String,
                               val description: _root_.scala.Predef.String, val priority: _root_.scala.Predef.String,
                               val numViews: scala.Long) extends scala.AnyRef with scala.Product with scala.Serializable {
}
object Test {
  protected final val logger: Logger = LoggerFactory.getLogger(this.getClass)

  def main(args: Array[String]): Unit = {
    qualityTest()
  }

  def operate(): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .appName("app")
      .getOrCreate()
    val rdd = spark.sparkContext.parallelize(Seq(
      Item(1, "Thingy A Thingy A Thingy A", "awesome thing.", "high", 0),
      Item(2, "Thingy B", "available at http://thingb.com", null, 0),
      Item(3, null, null, "low", 5),
      Item(4, "Thingy D", "checkout https://thingd.ca", "low", 10),
      Item(5, "Thingy E", null, "high", 12)))

    val df1 = spark.createDataFrame(rdd)
    val df2 = df1.withColumn("code",
      when(df1("productName").isNull, df1("description"))
        .when(df1("description").isNull, df1("productName"))
        .when(length(df1("productName")) >= length(df1("description")), df1("productName"))
        .otherwise(df1("description")))
      .withColumn("productNameLength", length(df1("productName")))
      .withColumn("descriptionLength", length(df1("description")))
    df2.show()
  }

  def wordcount(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      //      .master("local[*]")
      .appName("app")
      .getOrCreate()
    if (args.length == 0) {
      logger.info("args length is 0")
      System.exit(1)
    }
    val result = spark.sparkContext.textFile(args(0))
      .flatMap(_.split(" "))
      .map((_, 1))
      .reduceByKey(_ + _)
    result.collect().foreach(println)
    spark.close()
    logger.info("finish!")
  }

  def readHive(): Unit = {
    val sparkBuilder = SparkSession
      .builder
      .master("local")
      .appName("Spk Pi")
    val conf = new Configuration()
    // 这里的文件地址可以换成从数据库里查询
    val core = new Path("D:\\dev\\config\\hadoop\\core-site.xml")
    val hdfs = new Path("D:\\dev\\config\\hadoop\\hdfs-site.xml")
    val hive = new Path("D:\\dev\\config\\hadoop\\hive-site.xml")
    conf.addResource(core)
    conf.addResource(hdfs)
    conf.addResource(hive)
    for (c <- conf.iterator().asScala) {
      sparkBuilder.config(c.getKey, c.getValue)
    }
    val spark = sparkBuilder.enableHiveSupport().getOrCreate()
    spark.sql("show databases").show()

  }

  def qualityTest(): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .appName("app")
      .getOrCreate()
    val rdd = spark.sparkContext.parallelize(Seq(
      Item(1, "Thingy A", "awesome thing.", "high", 0),
      Item(2, "Thingy B", "available at http://thingb.com", null, 0),
      Item(3, null, null, "low", 5),
      Item(4, "Thingy D", "checkout https://thingd.ca", "low", 10),
      Item(5, "Thingy E", null, "high", 12)))

    val data = spark.createDataFrame(rdd)
    val verificationResult = VerificationSuite()
      .onData(data)
      .addCheck(
        Check(CheckLevel.Error, "unit testing my data")
          .hasSize(_ == 5) // we expect 5 rows
          .isComplete("id") // should never be NULL
          .isUnique("id") // should not contain duplicates
          .isComplete("productName") // should never be NULL
          // should only contain the values "high" and "low"
          .isContainedIn("priority", Array("high", "low"))
          .isNonNegative("numViews") // should not contain negative values
          // at least half of the descriptions should contain a url
          .containsURL("description", _ >= 0.5)
          // half of the items should have less than 10 views
          .hasApproxQuantile("numViews", 0.5, _ <= 10))
      .run()
    if (verificationResult.status == CheckStatus.Success) {
      println("The data passed the test, everything is fine!")
    } else {
      println("We found errors in the data:\n")
      val rc = verificationResult.checkResults
      val r2 = rc.flatMap(x=>x._2.constraintResults)
      r2.foreach(x => {
        println(x.constraint+":"+x.message+":"+x.metric+":"+x.productIterator)
      })

      val resultsForAllConstraints = verificationResult.checkResults
        .flatMap { case (_, checkResult) => checkResult.constraintResults }

      resultsForAllConstraints
        .filter {
          _.status != ConstraintStatus.Success
        }
        .foreach { result => println(s"${result.constraint}: ${result.message.get}") }
    }
  }

}
