// SPDX-FileCopyrightText: 2023 LakeSoul Contributors
//
// SPDX-License-Identifier: Apache-2.0

package org.apache.spark.sql.lakesoul.schema

import com.dmetasoul.lakesoul
import com.dmetasoul.lakesoul.tables.LakeSoulTable
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.functions._
import org.apache.spark.sql.lakesoul.test.LakeSoulSQLCommandTest
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.{AnalysisException, QueryTest, Row, SparkSession}
import org.junit.runner.RunWith
import org.scalatestplus.junit.JUnitRunner

import java.util.concurrent.CountDownLatch


/**
  * This Suite tests the behavior of LakeSoul commands when a schema altering commit is run after the
  * command completes analysis but before the command starts the transaction. We want to make sure
  * that we do not corrupt tables.
  */
@RunWith(classOf[JUnitRunner])
class SchemaValidationSuite extends QueryTest with SharedSparkSession with LakeSoulSQLCommandTest {

  class BlockingRule(
                      blockActionLatch: CountDownLatch,
                      startConcurrentUpdateLatch: CountDownLatch) extends Rule[LogicalPlan] {
    override def apply(plan: LogicalPlan): LogicalPlan = {
      startConcurrentUpdateLatch.countDown()
      blockActionLatch.await()
      plan
    }
  }

  /**
    * Blocks the thread with the help of an optimizer rule until end of scope.
    * We need two latches to ensure that the thread executing the query is blocked until
    * the other thread concurrently updates the metadata. `blockActionLatch` blocks the action
    * until it is counted down by the thread updating the metadata. `startConcurrentUpdateLatch`
    * will block the concurrent update to happen until it is counted down by the action reaches the
    * optimizer rule.
    */
  private def withBlockedExecution(
                                    t: Thread,
                                    blockActionLatch: CountDownLatch,
                                    startConcurrentUpdateLatch: CountDownLatch)(f: => Unit): Unit = {
    t.start()
    startConcurrentUpdateLatch.await()
    try {
      f
    } finally {
      blockActionLatch.countDown()
      t.join()
    }
  }

  def cloneSession(spark: SparkSession): SparkSession = {
    val cloneMethod = classOf[SparkSession].getDeclaredMethod("cloneSession")
    cloneMethod.setAccessible(true)
    val clonedSession = cloneMethod.invoke(spark).asInstanceOf[SparkSession]
    clonedSession
  }

  /**
    * Common base method for both the path based and table name based tests.
    */
  private def testConcurrentChangeBase(identifier: String)(
    createTable: (SparkSession, String) => Unit,
    actionToTest: (SparkSession, String) => Unit,
    concurrentChange: (SparkSession, String) => Unit): Unit = {
    createTable(spark, identifier)

    // Clone the session to run the query in a separate thread.
    val newSession = cloneSession(spark)
    val blockActionLatch = new CountDownLatch(1)
    val startConcurrentUpdateLatch = new CountDownLatch(1)
    val rule = new BlockingRule(blockActionLatch, startConcurrentUpdateLatch)
    newSession.experimental.extraOptimizations :+= rule

    var actionException: Exception = null
    val actionToTestThread = new Thread() {
      override def run(): Unit = {
        try {
          actionToTest(newSession, identifier)
        } catch {
          case e: Exception =>
            actionException = e
        }
      }
    }
    withBlockedExecution(actionToTestThread, blockActionLatch, startConcurrentUpdateLatch) {
      concurrentChange(spark, identifier)
    }
    if (actionException != null) {
      throw actionException
    }
  }

  /**
    * tests the behavior of concurrent changes to schema on a blocked command.
    *
    * @param testName         - name of the test
    * @param createTable      - method that creates a table given an identifier and spark session.
    * @param actionToTest     - the method we want to test.
    * @param concurrentChange - the concurrent query that updates the schema of the table
    *
    *                         All the above methods take SparkSession and the table path as parameters
    */
  def testConcurrentChange(testName: String)(
    createTable: (SparkSession, String) => Unit,
    actionToTest: (SparkSession, String) => Unit,
    concurrentChange: (SparkSession, String) => Unit): Unit = {

    test(testName) {
      withTempDir { tempDir =>
        testConcurrentChangeBase(tempDir.getCanonicalPath)(
          createTable,
          actionToTest,
          concurrentChange
        )
      }
    }
  }

  /**
    * tests the behavior of concurrent changes pf schema on a blocked command with metastore tables.
    *
    * @param testName         - name of the test
    * @param createTable      - method that creates a table given an identifier and spark session.
    * @param actionToTest     - the method we want to test.
    * @param concurrentChange - the concurrent query that updates the schema of the table
    *
    *                         All the above methods take SparkSession and the table name as parameters
    */
  def testConcurrentChangeWithTable(testName: String)(
    createTable: (SparkSession, String) => Unit,
    actionToTest: (SparkSession, String) => Unit,
    concurrentChange: (SparkSession, String) => Unit): Unit = {

    val tblName = "metastoreTable"
    test(testName) {
      withTable(tblName) {
        testConcurrentChangeBase(tblName)(
          createTable,
          actionToTest,
          concurrentChange
        )
      }
    }
  }

  /**
    * Creates a method to remove a column from the table by taking column as an argument.
    */
  def dropColFromSampleTable(col: String): (SparkSession, String) => Unit = {
    (spark: SparkSession, tblPath: String) => {
      spark.read.format("lakesoul").load(tblPath)
        .drop(col)
        .write
        .format("lakesoul")
        .mode("overwrite")
        .option("overwriteSchema", "true")
        .save(tblPath)
    }
  }

  /**
    * Adding a column to the schema will result in the blocked thread appending to the table
    * with null values for the new column.
    */
  testConcurrentChange("write - add a column concurrently")(
    createTable = (spark: SparkSession, tblPath: String) => {
      spark.range(10).write.format("lakesoul").save(tblPath)
    },
    actionToTest = (spark: SparkSession, tblPath: String) => {
      spark.range(11, 20).write.format("lakesoul")
        .mode("append")
        .save(tblPath)

      val appendedCol2Values = spark.read.format("lakesoul")
        .load(tblPath)
        .filter(col("id") <= 20)
        .select("col2")
        .distinct()
        .collect()
        .toList
      assert(appendedCol2Values == List(Row(null)))
    },
    concurrentChange = (spark: SparkSession, tblPath: String) => {
      spark.range(21, 30).withColumn("col2", lit(2)).write
        .format("lakesoul")
        .mode("append")
        .option("mergeSchema", "true")
        .save(tblPath)
    }
  )

  /**
    * Removing a column while a query is in running should throw an analysis
    * exception
    */
  testConcurrentChange("write - remove a column concurrently")(
    createTable = (spark: SparkSession, tblPath: String) => {
      spark.range(10).withColumn("col2", lit(1))
        .write
        .format("lakesoul")
        .save(tblPath)
    },
    actionToTest = (spark: SparkSession, tblPath: String) => {
      val e = intercept[AnalysisException] {
        spark.range(11, 20)
          .withColumn("col2", lit(1)).write.format("lakesoul")
          .mode("append")
          .save(tblPath)
      }
      assert(e.getMessage.contains(
        "A schema mismatch detected when writing to the table"))
    },
    concurrentChange = dropColFromSampleTable("col2")
  )

  /**
    * Removing a column while performing a delete should be caught while
    * writing the deleted files(i.e files with rows that were not deleted).
    */
  testConcurrentChange("delete - remove a column concurrently")(
    createTable = (spark: SparkSession, tblPath: String) => {
      spark.range(10).withColumn("col2", lit(1))
        .write
        .format("lakesoul")
        .save(tblPath)
    },
    actionToTest = (spark: SparkSession, tblPath: String) => {
      val lakeSoulTable = LakeSoulTable.forPath(spark, tblPath)
      val e = intercept[Exception] {
        lakeSoulTable.delete(col("id") === 1)
      }
      assert(e.getMessage.contains(s"Can't resolve column col2"))
    },
    concurrentChange = dropColFromSampleTable("col2")
  )

  /**
    * Removing a column(referenced in condition) while performing a delete will
    * result in a no-op.
    */
  testConcurrentChange("delete - remove condition column concurrently")(
    createTable = (spark: SparkSession, tblPath: String) => {
      spark.range(10).withColumn("col2", lit(1))
        .repartition(2)
        .write
        .format("lakesoul")
        .save(tblPath)
    },
    actionToTest = (spark: SparkSession, tblPath: String) => {
      val lakeSoulTable = lakesoul.tables.LakeSoulTable.forPath(spark, tblPath)
      val e = intercept[Exception] {
        //        lakeSoulTable.delete(col("id") === 1)
        lakeSoulTable.delete(col("id").isNull)
      }
      //      assert(e.getMessage.contains(s"cannot resolve '`id`' given input columns"))
      assert(e.getMessage.contains("Can't resolve column id in root"))
    },
    concurrentChange = dropColFromSampleTable("id")
  )

  /**
    * An update command that has to rewrite files will have the old schema,
    * we catch the outdated schema during the write.
    */
  testConcurrentChange("update - remove a column concurrently")(
    createTable = (spark: SparkSession, tblPath: String) => {
      spark.range(10).withColumn("col2", lit(1))
        .write
        .format("lakesoul")
        .save(tblPath)
    },
    actionToTest = (spark: SparkSession, tblPath: String) => {
      val lakeSoulTable = lakesoul.tables.LakeSoulTable.forPath(spark, tblPath)
      val e = intercept[AnalysisException] {
        lakeSoulTable.update(col("id") =!= 1, Map("col2" -> lit(-1)))
      }
      assert(e.getMessage.contains(s"Can't resolve column col2"))
    },
    concurrentChange = dropColFromSampleTable("col2")
  )

  /**
    * Removing a column(referenced in condition) while performing a update will
    * result in a no-op.
    */
  testConcurrentChange("update - remove condition column concurrently")(
    createTable = (spark: SparkSession, tblPath: String) => {
      spark.range(10).withColumn("col2", lit(1))
        .repartition(2)
        .write
        .format("lakesoul")
        .save(tblPath)
    },
    actionToTest = (spark: SparkSession, tblPath: String) => {
      val lakeSoulTable = lakesoul.tables.LakeSoulTable.forPath(spark, tblPath)
      val e = intercept[Exception] {
        lakeSoulTable.update(col("id").isNull, Map("id" -> lit("2")))
      }
      assert(e.getMessage.contains("Can't resolve column id in root"))
    },
    concurrentChange = dropColFromSampleTable("id")
  )


  /**
    * Alter table to add a column and at the same time add a column concurrently.
    */
  testConcurrentChangeWithTable("alter table add column - remove column and add same column")(
    createTable = (spark: SparkSession, tblName: String) => {
      spark.range(10).write.format("lakesoul").saveAsTable(tblName)
    },
    actionToTest = (spark: SparkSession, tblName: String) => {
      val e = intercept[AnalysisException] {
        spark.sql(s"ALTER TABLE `$tblName` ADD COLUMNS (col2 string)")
      }
      assert(e.getMessage.contains("Found duplicate column(s) in adding columns: col2"))
    },
    concurrentChange = (spark: SparkSession, tblName: String) => {
      spark.sql(s"ALTER TABLE `$tblName` ADD COLUMNS (col2 string)")
    }
  )
}

