package com.ctbri.manage.bydeequ.example

import com.amazon.deequ.VerificationSuite
import com.amazon.deequ.analyzers.Completeness
import com.amazon.deequ.checks.{Check, CheckLevel, CheckStatus}
import com.amazon.deequ.constraints.ConstraintStatus
import com.amazon.deequ.repository.fs.FileSystemMetricsRepository
import org.apache.spark.SparkContext
import org.apache.spark.sql._
import com.amazon.deequ.repository.{MetricsRepository, ResultKey}
import com.google.common.io.Files
import org.apache.spark.sql.catalyst.dsl.expressions.{DslExpression, StringToAttributeConversionHelper}
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.types.{StringType, StructField, StructType}
import org.apache.spark.sql.functions._

import java.io.File
/**
 * @author songyunlong
 * @createTime 2023/5/16 22:18
 * @description
 */
object TestExample extends App {
    def withSpark(func: SparkSession => Unit): Unit={
        val session = SparkSession.builder()
            .master("local")
            .appName("test")
            .config("spark.ui.enabled", "false")
            .getOrCreate()
        session.sparkContext.setCheckpointDir(System.getProperty("java.io.tmpdir"))

        try {
            func(session)
        } finally {
            session.stop()
            println("spark has stoped!")
            System.clearProperty("spark.driver.port")
        }
    }
    this.withSpark {spark=>
        val sc: SparkContext = spark.sparkContext
        sc.setLogLevel("WARN")
        val rdd = spark.sparkContext.parallelize(Seq(
            Item(1, "Thingy A", "awesome thing.", "high", 0),
            Item(2, "Thingy B", "available at http://thingb.com", null, 0),
            Item(3, null, null, "low", 5),
            Item(4, "Thingy D", "checkout https://thingd.ca", "low", 10),
            Item(5, "Thingy E", null, "high", 12)))
        val data = spark.createDataFrame(rdd)
        data.show()
        val metricsFile = new File(Files.createTempDir(), "metrics.json")


        val repository: MetricsRepository = FileSystemMetricsRepository(spark, metricsFile.getAbsolutePath)
        val resultKey = ResultKey(System.currentTimeMillis(), Map("tag" -> "repository"))
        val verificationResult = VerificationSuite()
            .onData(data)
            .addCheck(
                Check(CheckLevel.Error, "unit testing my data")
                    .hasSize(_ == 10) // we expect 5 rows
                    .isComplete("description") // should never be NULL
                    .isUnique("productName") // should not contain duplicates
                    .isComplete("productName") // should never be NULL
                    // should only contain the values "high" and "low"
                    .isContainedIn("priority", Array("high", "low"))
                    .isNonNegative("numViews") // should not contain negative values
                    // at least half of the descriptions should contain a url
                    .containsURL("description", _ >= 0.5)
                    // half of the items should have less than 10 views
                    .hasApproxQuantile("numViews", 0.7, _ <= 9)
            )
            .useRepository(repository)
            .saveOrAppendResult(resultKey)
            .run()

        val resultsForAllConstraints = verificationResult.checkResults
            .flatMap { case (_, checkResult) => checkResult.constraintResults }

        val resultsForAllConstraintsDF = resultsForAllConstraints
            .map{result=>
                result.status match{
                    case ConstraintStatus.Success => Row(s"${result.constraint} successed: Value meet the constraint requirement")
                    case ConstraintStatus.Failure => Row(s"${result.constraint} failed: ${result.message.get}")
                }
            }
        val rdd2 = spark.sparkContext.parallelize(resultsForAllConstraintsDF.toSeq)
        val schemaOfData2 = StructType(Array(StructField(name="message", dataType=StringType, nullable=true)))
        val data2 = spark.createDataFrame(rdd2, schema=schemaOfData2).select(
            row_number().over(Window.orderBy(lit(1))).alias("b_id"), col("message"))
        println("===========")
        data2.show(20, false)
        println("===========")

//        val completenessOfProductName = repository
//            .loadByKey(resultKey).get
//            .metric(Completeness("productName")).get
//
//        println(s"The completeness of the productName column is: $completenessOfProductName")
//        val json = repository.load()
//            .after(System.currentTimeMillis() - 10000)
//            .getSuccessMetricsAsJson()
//
//        println(s"Metrics from the last 10 minutes:\n$json")
//        val seccessMetric = repository.load()
//            .withTagValues(Map("tag" -> "repository"))
//            .getSuccessMetricsAsDataFrame(spark)
//            .drop("tag")
//            .withColumn(colName="a_id", row_number().over(Window.orderBy(lit(1))))
//        seccessMetric.show(numRows=20, truncate=false)
//        val finalResult = seccessMetric.join(right=data2,
//                                                        joinExprs=seccessMetric.col("a_id")===data2.col("b_id"),
//                                                        joinType="left")
//            .drop("name").drop("a_id").drop("b_id")
//        finalResult.unionByName(finalResult).show(20, true)
//        finalResult.unionByName(finalResult).repartition(1).write
//            .mode("overwrite")
//            .format("csv")
//            .option("header", "true")
//            .save("/Users/songyunlong/Desktop/spark_java/Deequ")

//        val value: Dataset[Row] = seccessMetric.select("value")
//        value.show(20, false)
//        sc.stop()
    }
}
