package com.ctbri.manage.bydeequ.calculate

import org.apache.spark.sql.Row.unapplySeq
import org.apache.spark.sql.{Column, DataFrame, SparkSession}
import org.apache.spark.sql.functions._
import org.apache.spark.sql._
import org.apache.spark.sql.types.{BooleanType, DataTypes}

import java.util._
import scala.language.postfixOps

/**
 * @author songyunlong
 * @createTime 2023/6/28 16:48
 * @description
 */
object ForJavaCalcResultGeneration {
    /**
     *
     * @param checkType: String, "Dataset" or "Column"
     * @param spark: SparkSession
     * @param result: String
     * @param columnName: String
     * @param assertionExpress: String
     * @param message: String
     * @param dataSavePath: String
     */
    def resultGenerationForCustomFunc(checkType: String, spark: SparkSession, result: String, columnName: String, assertionExpress: String, message: String, dataSavePath: String): Unit = {
        val rdd = spark.sparkContext.parallelize(Seq(ConfigInfoTotal(
            checkType,
            columnName,
            result,
            assertionExpress,
            message
        )))
        val data = spark.createDataFrame(rdd)
        data.show(100, false)
        data.repartition(1).write
            .mode("overwrite")
            .format("csv")
            .option("header", "true")
            .save(dataSavePath)
    }
    def twoTablePerPosEqualResultAndDataListGeneration(checkType: String,
                                                       spark: SparkSession,
                                                       assertionExpress: String,
                                                       dataSavePath: String,
                                                       data: DataFrame,
                                                       boolColList: Array[String],
                                                       keyList: Array[String],
                                                       meetRequirement: Boolean): DataFrame={
        import spark.implicits._
        assert(boolColList.length >= 1)
//        data.printSchema()
        // 由于布尔数据表中已经将null值都过滤掉了，所以不存在有null值的情况
        val dataResult = boolColList.foldLeft(data)((df, colName) => df.filter($"${colName}"===true))
        val meetRate = dataResult.count().toDouble / data.count()
        println(s"meet-requirement data count : ${dataResult.count()}, total data count: ${data.count()}")
//        val meetKey = dataResult.select(keyList.map(colName => $"df1_${colName}"): _*)
        val meetKey = keyList.foldLeft(dataResult.select(keyList.map(colName => $"df1_${colName}"): _*))((df, colName)=>df.withColumnRenamed(s"df1_${colName}", s"df1_${colName}_t"))
//        println("meetKey: =============")
        meetKey.show(100, false)
        dataResult.show(100, false)
        val message = meetRate match {
            case 1.0 => "TwoTablePerPosEqual successed: Value meet the constraint requirement"
            case _ => s"TwoTablePerPosEqual failed: Value: $meetRate does not meet the constraint requirement!"
        }
        val rdd = spark.sparkContext.parallelize(Seq(ConfigInfoTotal(
            checkType,
            "*",
            meetRate.toString,
            assertionExpress,
            message
        )))
        val resultData = spark.createDataFrame(rdd)
        resultData.show(100, false)
        resultData.repartition(1).write
            .mode("overwrite")
            .format("csv")
            .option("header", "true")
            .save(dataSavePath)
        if (meetRequirement) dataResult
        else{
//            val notMeetDataResult = data
//                .filter(
//                    !concat_ws(",", keyList.toSeq.map(colName => $"df1_${colName}"): _*)
//                        .isInCollection(
//                            meetKey.select(concat_ws(",", keyList.toSeq.map(colName => $"df1_${colName}"): _*)).collect().map(_.get(0))))
            val notMeetDataResult1 = data
    .join(meetKey, keyList.map(colName => data(s"df1_${colName}")===$"df1_${colName}_t").reduce((colA, colB)=>colA and colB), "left")
    .where(keyList.map(colName => $"df1_${colName}_t".isNull).reduce((colA, colB)=>colA and colB))
    val notMeetDataResult = keyList.foldLeft(notMeetDataResult1)((df, colName) => df.drop($"df1_${colName}_t"))
            notMeetDataResult.show(100, false)
            notMeetDataResult
        }
    }
    def main(args: Array[String]): Unit = {
        println("Hello Scala!")
    }
}
