/*
 * Copyright (2021) The Delta Lake Project Authors.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package org.apache.spark.sql.delta

import java.io.File
import java.nio.charset.StandardCharsets.UTF_8
import java.sql.Timestamp
import java.text.SimpleDateFormat
import java.util.Date

import scala.concurrent.duration._
import scala.language.implicitConversions

import org.apache.spark.sql.delta.DeltaHistoryManager.BufferingLogDeletionIterator
import org.apache.spark.sql.delta.DeltaTestUtils.createTestAddFile
import org.apache.spark.sql.delta.actions.{Action, CommitInfo, SingleAction}
import org.apache.spark.sql.delta.coordinatedcommits.CatalogOwnedTestBaseSuite
import org.apache.spark.sql.delta.test.DeltaSQLCommandTest
import org.apache.spark.sql.delta.test.DeltaSQLTestUtils
import org.apache.spark.sql.delta.test.DeltaTestImplicits._
import org.apache.spark.sql.delta.util.{DateTimeUtils, DeltaCommitFileProvider, FileNames,
  JsonUtils, TimestampFormatter}
import org.apache.hadoop.fs.{FileStatus, Path}

import org.apache.spark.sql.{functions, AnalysisException, QueryTest, Row}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.util.ManualClock

class DeltaTimeTravelSuite extends QueryTest
  with SharedSparkSession
  with DeltaSQLTestUtils
  with DeltaSQLCommandTest
  with CatalogOwnedTestBaseSuite {

  import testImplicits._

  private val timeFormatter = new SimpleDateFormat("yyyyMMddHHmmssSSS")

  private implicit def durationToLong(duration: FiniteDuration): Long = {
    duration.toMillis
  }

  private implicit def longToTimestamp(ts: Long): Timestamp = new Timestamp(ts)

  private def modifyCommitTimestamp(deltaLog: DeltaLog, version: Long, ts: Long): Unit = {
    val file = new File(DeltaCommitFileProvider(deltaLog.update()).deltaFile(version).toUri)
    file.setLastModified(ts)
    val crc = new File(FileNames.checksumFile(deltaLog.logPath, version).toUri)
    if (crc.exists()) {
      crc.setLastModified(ts)
    }
  }

  private def modifyCheckpointTimestamp(deltaLog: DeltaLog, version: Long, ts: Long): Unit = {
    val file = new File(FileNames.checkpointFileSingular(deltaLog.logPath, version).toUri)
    file.setLastModified(ts)
  }

  /** Generate commits with the given timestamp in millis. */
  private def generateCommitsCheap(deltaLog: DeltaLog, clock: ManualClock, commits: Long*): Unit = {
    var startVersion = deltaLog.unsafeVolatileSnapshot.version + 1
    commits.foreach { ts =>
      val action =
        createTestAddFile(encodedPath = startVersion.toString, modificationTime = startVersion)
      clock.setTime(ts)
      deltaLog.startTransaction().commitManually(action)
      modifyCommitTimestamp(deltaLog, startVersion, ts)
      startVersion += 1
    }
  }

  /** Generate commits with the given timestamp in millis. */
  private def generateCommits(location: String, commits: Long*): Unit = {
    var deltaLog = DeltaLog.forTable(spark, dataPath = location)
    var startVersion = deltaLog.unsafeVolatileSnapshot.version + 1
    commits.foreach { ts =>
      val rangeStart = startVersion * 10
      val rangeEnd = rangeStart + 10
      spark.range(rangeStart, rangeEnd).write.format("delta").mode("append").save(location)
      // Construct a new delta log here before calling `DeltaCommitFileProvider` to get the commit
      // file path. This is b/c [[Snapshot.logSegment.deltas]] will *not* be automatically updated
      // after triggering backfill.
      // We will then overwrite the commit timestamp for unbackilled commits even if we have already
      // backfilled them. This leads to the failure in certain UT where we manually modify/overwrite
      // the commit timestamps.
      // To correctly update the deltas in [[LogSegment]], we construct a fresh delta log.
      DeltaLog.clearCache()
      deltaLog = DeltaLog.forTable(spark, dataPath = location)
      val filePath = DeltaCommitFileProvider
        .apply(snapshot = deltaLog.unsafeVolatileSnapshot)
        .deltaFile(version = startVersion)
      if (isICTEnabledForNewTablesCatalogOwned) {
        InCommitTimestampTestUtils.overwriteICTInDeltaFile(deltaLog, filePath, Some(ts))
        InCommitTimestampTestUtils.overwriteICTInCrc(deltaLog, startVersion, Some(ts))
      } else {
        val file = new File(filePath.toUri)
        file.setLastModified(ts)
      }
      startVersion += 1
    }
  }

  private def identifierWithTimestamp(identifier: String, ts: Long): String = {
    s"$identifier@${timeFormatter.format(new Date(ts))}"
  }

  private def identifierWithVersion(identifier: String, v: Long): String = {
    s"$identifier@v$v"
  }

  private implicit def longToTimestampExpr(value: Long): String = {
    s"cast($value / 1000 as timestamp)"
  }

  private def getSparkFormattedTimestamps(values: Long*): Seq[String] = {
    // Simulates getting timestamps directly from Spark SQL
    values.map(new Timestamp(_)).toDF("ts")
      .select($"ts".cast("string")).as[String].collect()
      .map(i => s"$i")
  }

  private def historyTest(testName: String)(f: (DeltaLog, ManualClock) => Unit): Unit = {
    testQuietly(testName) {
      val clock = new ManualClock()
      withTempDir { dir => f(DeltaLog.forTable(spark, dir, clock), clock) }
    }
  }

  historyTest("getCommits should monotonize timestamps") { (deltaLog, clock) =>
    if (catalogOwnedDefaultCreationEnabledInTests) {
      // This is fine for CC tables since ICT should've been enabled from the beginning.
      // Hence, we should *never* call [[DeltaHistoryManager.getCommitsWithNonIctTimestamps]].
      cancel("This test is not compatible with CC since ICT should've been enabled from the " +
        "beginning for CC tables.")
    }
    val start = 1540415658000L
    // Make the commits out of order
    generateCommitsCheap(deltaLog,
      clock,
      start,
      start - 5.seconds, // adjusts to start + 1 ms
      start + 1.milli,   // adjusts to start + 2 ms
      start + 2.millis,  // adjusts to start + 3 ms
      start - 2.seconds, // adjusts to start + 4 ms
      start + 10.seconds)

    val commits = DeltaHistoryManager.getCommitsWithNonIctTimestamps(
      deltaLog.store,
      deltaLog.logPath,
      0,
      None,
      deltaLog.newDeltaHadoopConf())
    // Note that when InCommitTimestamps are enabled, the monotization of timestamps is not
    // performed by getCommits. Instead, the timestamps are already monotonized before they
    // are written in the commit.
    assert(commits.map(_.timestamp) === Seq(start,
      start + 1.millis, start + 2.millis, start + 3.millis, start + 4.millis, start + 10.seconds))
  }

  historyTest("describe history timestamps are adjusted according to file timestamp") {
      (deltaLog, clock) =>
    if (isICTEnabledForNewTablesCatalogOwned) {
      // File timestamp adjustment is not needed when ICT is enabled.
      cancel("This test is not compatible with InCommitTimestamps.")
    }
    // this is in '2018-10-24', so earlier than today. The recorded timestamps in commitInfo will
    // be much after this
    val start = 1540415658000L
    // Make the commits out of order
    generateCommitsCheap(deltaLog,
      clock,
      start,
      start - 5.seconds, // adjusts to start + 1 ms
      start + 1.milli   // adjusts to start + 2 ms
    )

    val history = new DeltaHistoryManager(deltaLog)
    val commits = history.getHistory(None)
    assert(commits.map(_.timestamp.getTime) === Seq(start + 2.millis, start + 1.milli, start))
  }

  historyTest("should filter only delta files when computing earliest version") {
      (deltaLog, clock) =>
    val start = 1540415658000L
    clock.setTime(start)
    generateCommitsCheap(deltaLog, clock, start, start + 10.seconds, start + 20.seconds)

    val history = new DeltaHistoryManager(deltaLog)
    assert(history.getActiveCommitAtTime(start + 15.seconds, false).version === 1)

    val commits2 = history.getHistory(Some(10))
    assert(commits2.last.version === Some(0))

    assert(new File(FileNames.unsafeDeltaFile(deltaLog.logPath, 0L).toUri).delete())
    val e = intercept[AnalysisException] {
      history.getActiveCommitAtTime(start + 15.seconds, false).version
    }
    if (catalogOwnedDefaultCreationEnabledInTests &&
        // Since we are creating a table w/ three initial commits, the table would have
        // unbackfilled commits if the backfill batch size is greater or equal to three.
        // See [[generateCommitsCheap]] for details.
        catalogOwnedCoordinatorBackfillBatchSize.exists(_ >= 3)) {
      // We throw an "incorrect" exception for CC tables if there exist any unbackfilled commits
      // and the backfilled commits have been manually deleted. E.g., the 0.json we are deleting
      // in this UT.
      //
      // Please see the comment in [[DeltaHistoryManager.getEarliestRecreatableCommit]] for the
      // detailed rationale.
      assert(e.getMessage.contains("[DELTA_NO_COMMITS_FOUND]"))
    } else {
      assert(e.getMessage.contains("recreatable"))
    }
  }

  historyTest("resolving commits should return commit before timestamp") { (deltaLog, clock) =>
    val start = 1540415658000L
    clock.setTime(start)
    // Make a commit every 20 minutes
    val commits = Seq.tabulate(10)(i => start + (i * 20).minutes)
    generateCommitsCheap(deltaLog, clock, commits: _*)
    // When maxKeys is 2, we will use the parallel search algorithm, when it is 1000, we will
    // use the linear search method
    Seq(1, 2, 1000).foreach { maxKeys =>
      val history = new DeltaHistoryManager(deltaLog, maxKeys)

      (0 until 10).foreach { i =>
        assert(history.getActiveCommitAtTime(start + (i * 20 + 10).minutes, true).version === i)
      }

      val e = intercept[DeltaErrors.TemporallyUnstableInputException] {
        // This is 20 minutes after the last commit
        history.getActiveCommitAtTime(start + 200.minutes, false)
      }
      checkError(
        e,
        "DELTA_TIMESTAMP_GREATER_THAN_COMMIT",
        sqlState = "42816",
        parameters = Map(
          "providedTimestamp" -> "2018-10-24 17:34:18.0",
          "tableName" -> "2018-10-24 17:14:18.0",
          "maximumTimestamp" -> "2018-10-24 17:14:18")
      )
      assert(history.getActiveCommitAtTime(start + 180.minutes, true).version === 9)

      val e2 = intercept[AnalysisException] {
        history.getActiveCommitAtTime(start - 10.minutes, true)
      }
      assert(e2.getMessage.contains("before the earliest version"))
    }
  }

  /**
   * Creates FileStatus objects, where the name is the version of a commit, and the modification
   * timestamps come from the input.
   */
  private def createFileStatuses(modTimes: Long*): Iterator[FileStatus] = {
    modTimes.zipWithIndex.map { case (time, version) => new FileStatus(
      10L, false, 1, 10L, time, FileNames.checkpointFileSingular(new Path("/foo"), version))
    }.iterator
  }

  /**
   * Creates a log deletion iterator with a retention `maxTimestamp` and `maxVersion` (both
   * inclusive). The input iterator takes the original file timestamps, and the deleted output will
   * return the adjusted timestamps of files that would actually be consumed by the iterator.
   */
  private def testBufferingLogDeletionIterator(
      maxTimestamp: Long,
      maxVersion: Long)(inputTimestamps: Seq[Long], deleted: Seq[Long]): Unit = {
    val i = new BufferingLogDeletionIterator(createFileStatuses(inputTimestamps: _*),
      maxTimestamp, maxVersion, FileNames.getFileVersion)
    deleted.foreach { ts =>
      assert(i.hasNext, s"Was supposed to delete $ts, but iterator returned hasNext: false")
      assert(i.next().getModificationTime === ts, "Returned files out of order!")
    }
    assert(!i.hasNext, "Iterator should be consumed")
  }

  test("BufferingLogDeletionIterator: iterator behavior") {
    val i1 = new BufferingLogDeletionIterator(Iterator.empty, 100, 100, _ => 1)
    intercept[NoSuchElementException](i1.next())
    assert(!i1.hasNext)

    testBufferingLogDeletionIterator(maxTimestamp = 100, maxVersion = 100)(
      inputTimestamps = Seq(10, 11),
      deleted = Seq(10)
    )

    testBufferingLogDeletionIterator(maxTimestamp = 100, maxVersion = 100)(
      inputTimestamps = Seq(10, 15, 25, 26),
      deleted = Seq(10, 15, 25)
    )
  }

  test("BufferingLogDeletionIterator: " +
    "early exit while handling adjusted timestamps due to timestamp") {
    // only should return 5 because 5 < 7
    testBufferingLogDeletionIterator(maxTimestamp = 7, maxVersion = 100)(
      inputTimestamps = Seq(5, 10, 8, 12),
      deleted = Seq(5)
    )

    // Should only return 5, because 10 is used to adjust the following 8 to 11
    testBufferingLogDeletionIterator(maxTimestamp = 10, maxVersion = 100)(
      inputTimestamps = Seq(5, 10, 8, 12),
      deleted = Seq(5)
    )

    // When it is 11, we can delete both 10 and 8
    testBufferingLogDeletionIterator(maxTimestamp = 11, maxVersion = 100)(
      inputTimestamps = Seq(5, 10, 8, 12),
      deleted = Seq(5, 10, 11)
    )

    // When it is 12, we can return all, except last one
    testBufferingLogDeletionIterator(maxTimestamp = 12, maxVersion = 100)(
      inputTimestamps = Seq(5, 10, 8, 12, 13),
      deleted = Seq(5, 10, 11, 12)
    )

    // Should only return 5, because 10 is used to adjust the following 8 to 11
    testBufferingLogDeletionIterator(maxTimestamp = 10, maxVersion = 100)(
      inputTimestamps = Seq(5, 10, 8),
      deleted = Seq(5)
    )

    // When it is 11, we can delete both 10 and 8
    testBufferingLogDeletionIterator(maxTimestamp = 11, maxVersion = 100)(
      inputTimestamps = Seq(5, 10, 8, 12),
      deleted = Seq(5, 10, 11)
    )
  }

  test("BufferingLogDeletionIterator: " +
    "early exit while handling adjusted timestamps due to version") {
    // only should return 5 because we can delete only up to version 0
    testBufferingLogDeletionIterator(maxTimestamp = 100, maxVersion = 0)(
      inputTimestamps = Seq(5, 10, 8, 12),
      deleted = Seq(5)
    )

    // Should only return 5, because 10 is used to adjust the following 8 to 11
    testBufferingLogDeletionIterator(maxTimestamp = 100, maxVersion = 1)(
      inputTimestamps = Seq(5, 10, 8, 12),
      deleted = Seq(5)
    )

    // When we can delete up to version 2, we can return up to version 2
    testBufferingLogDeletionIterator(maxTimestamp = 100, maxVersion = 2)(
      inputTimestamps = Seq(5, 10, 8, 12),
      deleted = Seq(5, 10, 11)
    )

    // When it is version 3, we can return all, except last one
    testBufferingLogDeletionIterator(maxTimestamp = 100, maxVersion = 3)(
      inputTimestamps = Seq(5, 10, 8, 12, 13),
      deleted = Seq(5, 10, 11, 12)
    )

    // Should only return 5, because 10 is used to adjust the following 8 to 11
    testBufferingLogDeletionIterator(maxTimestamp = 100, maxVersion = 1)(
      inputTimestamps = Seq(5, 10, 8),
      deleted = Seq(5)
    )

    // When we can delete up to version 2, we can return up to version 2
    testBufferingLogDeletionIterator(maxTimestamp = 100, maxVersion = 2)(
      inputTimestamps = Seq(5, 10, 8, 12),
      deleted = Seq(5, 10, 11)
    )
  }

  test("BufferingLogDeletionIterator: multiple adjusted timestamps") {
    Seq(9, 10, 11).foreach { retentionTimestamp =>
      // Files should be buffered but not deleted, because of the file 11, which has adjusted ts 12
      testBufferingLogDeletionIterator(maxTimestamp = retentionTimestamp, maxVersion = 100)(
        inputTimestamps = Seq(5, 10, 8, 11, 14),
        deleted = Seq(5)
      )
    }

    // Safe to delete everything before (including) file: 11 which has adjusted timestamp 12
    testBufferingLogDeletionIterator(maxTimestamp = 12, maxVersion = 100)(
      inputTimestamps = Seq(5, 10, 8, 11, 14),
      deleted = Seq(5, 10, 11, 12)
    )

    Seq(0, 1, 2).foreach { retentionVersion =>
      testBufferingLogDeletionIterator(maxTimestamp = 100, maxVersion = retentionVersion)(
        inputTimestamps = Seq(5, 10, 8, 11, 14),
        deleted = Seq(5)
      )
    }

    testBufferingLogDeletionIterator(maxTimestamp = 100, maxVersion = 3)(
      inputTimestamps = Seq(5, 10, 8, 11, 14),
      deleted = Seq(5, 10, 11, 12)
    )

    // Test when the last element is adjusted with both timestamp and version
    Seq(9, 10, 11).foreach { retentionTimestamp =>
      testBufferingLogDeletionIterator(maxTimestamp = retentionTimestamp, maxVersion = 100)(
        inputTimestamps = Seq(5, 10, 8, 9),
        deleted = Seq(5)
      )
    }

    testBufferingLogDeletionIterator(maxTimestamp = 12, maxVersion = 100)(
      inputTimestamps = Seq(5, 10, 8, 9, 13),
      deleted = Seq(5, 10, 11, 12)
    )

    Seq(0, 1, 2).foreach { retentionVersion =>
      testBufferingLogDeletionIterator(maxTimestamp = 100, maxVersion = retentionVersion)(
        inputTimestamps = Seq(5, 10, 8, 9),
        deleted = Seq(5)
      )
    }

    testBufferingLogDeletionIterator(maxTimestamp = 100, maxVersion = 3)(
      inputTimestamps = Seq(5, 10, 8, 9, 13),
      deleted = Seq(5, 10, 11, 12)
    )

    Seq(9, 10, 11).foreach { retentionTimestamp =>
      testBufferingLogDeletionIterator(maxTimestamp = retentionTimestamp, maxVersion = 100)(
        inputTimestamps = Seq(10, 8, 9),
        deleted = Nil
      )
    }

    // Test the first element causing cascading adjustments
    testBufferingLogDeletionIterator(maxTimestamp = 12, maxVersion = 100)(
      inputTimestamps = Seq(10, 8, 9, 13),
      deleted = Seq(10, 11, 12)
    )

    Seq(0, 1).foreach { retentionVersion =>
      testBufferingLogDeletionIterator(maxTimestamp = 100, maxVersion = retentionVersion)(
        inputTimestamps = Seq(10, 8, 9),
        deleted = Nil
      )
    }

    testBufferingLogDeletionIterator(maxTimestamp = 100, maxVersion = 2)(
      inputTimestamps = Seq(10, 8, 9, 13),
      deleted = Seq(10, 11, 12)
    )

    // Test multiple batches of time adjustments
    testBufferingLogDeletionIterator(maxTimestamp = 12, maxVersion = 100)(
      inputTimestamps = Seq(5, 10, 8, 9, 12, 15, 14, 14), // 5, 10, 11, 12, 13, 15, 16, 17
      deleted = Seq(5)
    )

    Seq(13, 14, 15, 16).foreach { retentionTimestamp =>
      testBufferingLogDeletionIterator(maxTimestamp = retentionTimestamp, maxVersion = 100)(
        inputTimestamps = Seq(5, 10, 8, 9, 12, 15, 14, 14), // 5, 10, 11, 12, 13, 15, 16, 17
        deleted = Seq(5, 10, 11, 12, 13)
      )
    }

    testBufferingLogDeletionIterator(maxTimestamp = 17, maxVersion = 100)(
      inputTimestamps = Seq(5, 10, 8, 9, 12, 15, 14, 14, 18), // 5, 10, 11, 12, 13, 15, 16, 17, 18
      deleted = Seq(5, 10, 11, 12, 13, 15, 16, 17)
    )
  }

  test("[SPARK-45383] Time travel on a non-existing table should throw AnalysisException") {
    intercept[AnalysisException] {
      spark.sql("SELECT * FROM not_existing VERSION AS OF 0")
    }
  }

  test("as of timestamp in between commits should use commit before timestamp") {
    withTempDir { dir =>
      val tblLoc = dir.getCanonicalPath
      val start = System.currentTimeMillis() - 5.days.toMillis
      generateCommits(tblLoc, start, start + 20.minutes, start + 40.minutes)

      val tablePathUri = identifierWithTimestamp(tblLoc, start + 10.minutes)

      val df1 = spark.read.format("delta").load(tablePathUri)
      checkAnswer(df1.groupBy().count(), Row(10L))

      // 2 minutes after start
      val timeTwoMinutesAfterStart = new Timestamp(start + 2.minutes)
      val df2 = spark.read.format("delta")
        .option("timestampAsOf", timeTwoMinutesAfterStart.toString).load(tblLoc)

      checkAnswer(df2.groupBy().count(), Row(10L))
    }
  }

  test("as of timestamp on exact timestamp") {
    withTempDir { dir =>
      val tblLoc = dir.getCanonicalPath
      val start = System.currentTimeMillis() - 5.days.toMillis
      generateCommits(tblLoc, start, start + 20.minutes)

      // Simulate getting the timestamp directly from Spark SQL
      val ts = getSparkFormattedTimestamps(start, start + 20.minutes)

      checkAnswer(
        spark.read.format("delta").option("timestampAsOf", ts.head).load(tblLoc).groupBy().count(),
        Row(10L)
      )

      checkAnswer(
        spark.read.format("delta").option("timestampAsOf", ts(1)).load(tblLoc).groupBy().count(),
        Row(20L)
      )

      checkAnswer(
        spark.read.format("delta").load(identifierWithTimestamp(tblLoc, start)).groupBy().count(),
        Row(10L)
      )

      checkAnswer(
        spark.read.format("delta").load(identifierWithTimestamp(tblLoc, start + 20.minutes))
          .groupBy().count(),
        Row(20L)
      )
    }
  }

  test("as of timestamp on invalid timestamp") {
    withTempDir { dir =>
      val tblLoc = dir.getCanonicalPath
      val start = 1540415658000L
      generateCommits(tblLoc, start, start + 20.minutes)

      val ex = intercept[AnalysisException] {
        spark.read.format("delta").option("timestampAsOf", "i am not a timestamp")
          .load(tblLoc).groupBy().count()
      }

      assert(ex.getMessage.contains(
        "The provided timestamp ('i am not a timestamp') cannot be converted to a valid timestamp"))
    }
  }

  test("as of exact timestamp after last commit should fail") {
    withTempDir { dir =>
      val tblLoc = dir.getCanonicalPath
      val start = 1540415658000L
      generateCommits(tblLoc, start)

      // Simulate getting the timestamp directly from Spark SQL
      val ts = getSparkFormattedTimestamps(start + 10.minutes)

      val e1 = intercept[DeltaErrors.TemporallyUnstableInputException] {
        spark.read.format("delta").option("timestampAsOf", ts.head).load(tblLoc).collect()
      }
      checkError(
        e1,
        "DELTA_TIMESTAMP_GREATER_THAN_COMMIT",
        sqlState = "42816",
        parameters = Map(
          "providedTimestamp" -> "2018-10-24 14:24:18.0",
          "tableName" -> "2018-10-24 14:14:18.0",
          "maximumTimestamp" -> "2018-10-24 14:14:18")
      )

      val e2 = intercept[DeltaErrors.TemporallyUnstableInputException] {
        spark.read.format("delta").load(identifierWithTimestamp(tblLoc, start + 10.minutes))
          .collect()
      }
      checkError(
        e2,
        "DELTA_TIMESTAMP_GREATER_THAN_COMMIT",
        sqlState = "42816",
        parameters = Map(
          "providedTimestamp" -> "2018-10-24 14:24:18.0",
          "tableName" -> "2018-10-24 14:14:18.0",
          "maximumTimestamp" -> "2018-10-24 14:14:18")
      )

      checkAnswer(
        spark.read.format("delta").option("timestampAsOf", "2018-10-24 14:14:18")
          .load(tblLoc).groupBy().count(),
        Row(10)
      )
    }
  }

  test("as of with versions") {
    withTempDir { dir =>
      val tblLoc = dir.getCanonicalPath
      val start = System.currentTimeMillis() - 5.days.toMillis
      generateCommits(tblLoc, start, start + 20.minutes, start + 40.minutes)

      val df = spark.read.format("delta").load(identifierWithVersion(tblLoc, 0))
      checkAnswer(df.groupBy().count(), Row(10L))

      checkAnswer(
        spark.read.format("delta").option("versionAsOf", "0").load(tblLoc).groupBy().count(),
        Row(10)
      )

      checkAnswer(
        spark.read.format("delta").option("versionAsOf", 1).load(tblLoc).groupBy().count(),
        Row(20)
      )

      val e1 = intercept[AnalysisException] {
        spark.read.format("delta").option("versionAsOf", 3).load(tblLoc).collect()
      }
      assert(e1.getMessage.contains("[0, 2]"))

      val deltaLog = DeltaLog.forTable(spark, tblLoc)
      new File(FileNames.unsafeDeltaFile(deltaLog.logPath, 0).toUri).delete()
      val e2 = intercept[AnalysisException] {
        spark.read.format("delta").option("versionAsOf", 0).load(tblLoc).collect()
      }
      if (catalogOwnedDefaultCreationEnabledInTests &&
          // Since we are creating a table w/ three initial commits, the table would have
          // unbackfilled commits if the backfill batch size is greater or equal to three.
          // See [[generateCommits]] for details.
          catalogOwnedCoordinatorBackfillBatchSize.exists(_ >= 3)) {
        // We throw an "incorrect" exception for CC tables if there exist any unbackfilled commits
        // and the backfilled commits have been manually deleted. E.g., the 0.json we are deleting
        // in this UT.
        //
        // Please see the comment in [[DeltaHistoryManager.getEarliestRecreatableCommit]] for the
        // detailed rationale.
        assert(e2.getMessage.contains("[DELTA_NO_COMMITS_FOUND]"))
      } else {
        assert(e2.getMessage.contains("recreatable"))
      }
    }
  }

  test("time travelling with adjusted timestamps") {
    if (isICTEnabledForNewTablesCatalogOwned) {
      // ICT Timestamps are always monotonically increasing. Therefore,
      // this test is not needed when ICT is enabled.
      cancel("This test is not compatible with InCommitTimestamps.")
    }
    withTempDir { dir =>
      val tblLoc = dir.getCanonicalPath
      val start = System.currentTimeMillis() - 5.days.toMillis
      generateCommits(tblLoc, start, start - 5.seconds, start + 3.minutes)

      val ts = getSparkFormattedTimestamps(
        start, start + 1.milli, start + 119.seconds, start - 3.seconds)

      checkAnswer(
        spark.read.option("timestampAsOf", ts.head).format("delta").load(tblLoc).groupBy().count(),
        Row(10L)
      )

      checkAnswer(
        spark.read.option("timestampAsOf", ts(1)).format("delta").load(tblLoc).groupBy().count(),
        Row(20L)
      )

      checkAnswer(
        spark.read.option("timestampAsOf", ts(2)).format("delta").load(tblLoc).groupBy().count(),
        Row(20L)
      )

      val e = intercept[AnalysisException] {
        spark.read.option("timestampAsOf", ts(3)).format("delta").load(tblLoc).collect()
      }
      assert(e.getMessage.contains("before the earliest version"))
    }
  }

  test("can't provide both version and timestamp in DataFrameReader") {
    val e = intercept[IllegalArgumentException] {
      spark.read.option("versionaSof", 1)
        .option("timestampAsOF", "fake").format("delta").load("/some/fake")
    }
    assert(e.getMessage.contains("either provide 'timestampAsOf' or 'versionAsOf'"))
  }

  test("don't time travel a valid delta path with @ syntax") {
    withTempDir { dir =>
      val path = new File(dir, "base@v0").getCanonicalPath
      spark.range(10).write.format("delta").mode("append").save(path)
      spark.range(10).write.format("delta").mode("append").save(path)

      checkAnswer(
        spark.read.format("delta").load(path),
        spark.range(10).union(spark.range(10)).toDF()
      )

      checkAnswer(
        spark.read.format("delta").load(path + "@v0"),
        spark.range(10).toDF()
      )
    }
  }

  test("don't time travel a valid non-delta path with @ syntax") {
    val format = "json"
    withTempDir { dir =>
      val path = new File(dir, "base@v0").getCanonicalPath
      spark.range(10).write.format(format).mode("append").save(path)
      spark.range(10).write.format(format).mode("append").save(path)

      checkAnswer(
        spark.read.format(format).load(path),
        spark.range(10).union(spark.range(10)).toDF()
      )

      checkAnswer(
        spark.table(s"$format.`$path`"),
        spark.range(10).union(spark.range(10)).toDF()
      )

      intercept[AnalysisException] {
        spark.read.format(format).load(path + "@v0").count()
      }

      intercept[AnalysisException] {
        spark.table(s"$format.`$path@v0`").count()
      }
    }
  }

  test("scans on different versions of same table are executed correctly") {
    withTempDir { dir =>
      val path = dir.getCanonicalPath
      spark.range(5).selectExpr("id as key", "id * 10 as value").write.format("delta").save(path)

      spark.range(5, 10).selectExpr("id as key", "id * 10 as value")
        .write.format("delta").mode("append").save(path)

      val df = spark.read.format("delta").option("versionAsOf", "0").load(path).as("a").join(
        spark.read.format("delta").option("versionAsOf", "1").load(path).as("b"),
        functions.expr("a.key == b.key"),
        "fullOuter"
      ).where("a.key IS NULL")  // keys 5 to 9 should be null
      assert(df.count() == 5)
    }
  }

  test("timestamp as of expression for table in database") {
    withDatabase("testDb") {
      sql("CREATE DATABASE testDb")
      withTable("tbl") {
        spark.range(10).write.format("delta").saveAsTable("testDb.tbl")
        val ts = sql("DESCRIBE HISTORY testDb.tbl").select("timestamp").head().getTimestamp(0)

        sql(s"SELECT * FROM testDb.tbl TIMESTAMP AS OF " +
          s"coalesce(CAST ('$ts' AS TIMESTAMP), current_date())")
      }
    }
  }

  test("time travel with schema changes - should instantiate old schema") {
    withTempDir { dir =>
      val tblLoc = dir.getCanonicalPath
      spark.range(10).write.format("delta").mode("append").save(tblLoc)
      spark.range(10, 20).withColumn("part", 'id)
        .write.format("delta").mode("append").option("mergeSchema", true).save(tblLoc)

      checkAnswer(
        spark.read.option("versionAsOf", 0).format("delta").load(tblLoc),
        spark.range(10).toDF())

      checkAnswer(
        spark.read.format("delta").load(identifierWithVersion(tblLoc, 0)),
        spark.range(10).toDF())
    }
  }

  test("time travel with partition changes - should instantiate old schema") {
    withTempDir { dir =>
      val tblLoc = dir.getCanonicalPath
      val v0 = spark.range(10).withColumn("part5", 'id % 5)

      v0.write.format("delta").partitionBy("part5").mode("append").save(tblLoc)
      spark.range(10, 20).withColumn("part2", 'id % 2)
        .write
        .format("delta")
        .partitionBy("part2")
        .mode("overwrite")
        .option("overwriteSchema", true)
        .save(tblLoc)

      checkAnswer(
        spark.read.option("versionAsOf", 0).format("delta").load(tblLoc),
        v0)

      checkAnswer(
        spark.read.format("delta").load(identifierWithVersion(tblLoc, 0)),
        v0)
    }
  }

  test("time travel support in SQL") {
    withTempDir { dir =>
      val tblLoc = dir.getCanonicalPath
      val start = System.currentTimeMillis() - 5.days.toMillis
      generateCommits(tblLoc, start, start + 20.minutes)
      val tableName = "testTable"

      withTable(tableName) {
        spark.sql(s"create table $tableName(id long) using delta location '$tblLoc'")

        checkAnswer(
          spark.sql(s"SELECT * from $tableName FOR VERSION AS OF 0"),
          spark.read.option("versionAsOf", 0).format("delta").load(tblLoc))

        checkAnswer(
          spark.sql(s"SELECT * from $tableName VERSION AS OF 1"),
          spark.read.option("versionAsOf", 1).format("delta").load(tblLoc))

        val ex = intercept[VersionNotFoundException] {
          spark.sql(s"SELECT * from $tableName FOR VERSION AS OF 2")
        }
        checkError(
          ex,
          "DELTA_VERSION_NOT_FOUND",
          sqlState = "22003",
          parameters = Map("userVersion" -> "2", "earliest" -> "0", "latest" -> "1"))

        val timeAtVersion0 = new Timestamp(start).toString
        val timeAtVersion1 = new Timestamp(start + 20.minutes).toString
        val timeAfterVersion2 = new Timestamp(start + 6.hours).toString

        checkAnswer(
          spark.sql(s"SELECT * from $tableName FOR TIMESTAMP AS OF '$timeAtVersion0'"),
          spark.read.option("versionAsOf", 0).format("delta").load(tblLoc))

        checkAnswer(
          spark.sql(s"SELECT * from $tableName TIMESTAMP AS OF '$timeAtVersion1'"),
          spark.read.option("versionAsOf", 1).format("delta").load(tblLoc))

        val ex2 = intercept[DeltaErrors.TemporallyUnstableInputException] {
          spark.sql(s"SELECT * from $tableName FOR TIMESTAMP AS OF '$timeAfterVersion2'")
        }

        checkError(
          ex2,
          "DELTA_TIMESTAMP_GREATER_THAN_COMMIT",
          sqlState = "42816",
          parameters = Map(
            "providedTimestamp" -> s"$timeAfterVersion2",
            "tableName" -> s"$timeAtVersion1",
            "maximumTimestamp" -> s"${timeAtVersion1.replaceFirst("\\.\\d+$", "")}") // exclude ms
        )
      }
    }
  }


  test("SPARK-41154: Correct relation caching for queries with time travel spec") {
    val tblName = "tab"
    withTable(tblName) {
      sql(s"CREATE TABLE $tblName USING DELTA AS SELECT 1 as c")
      sql(s"INSERT INTO $tblName SELECT 2 as c")
      checkAnswer(
        sql(s"""
          |SELECT * FROM $tblName VERSION AS OF '0'
          |UNION ALL
          |SELECT * FROM $tblName VERSION AS OF '1'
          |""".stripMargin),
        Row(1) :: Row(1) :: Row(2) :: Nil)
    }
  }

  test("Dataframe-based time travel works with different timestamp precisions") {
    val tblName = "test_tab"
    withTable(tblName) {
      sql(s"CREATE TABLE spark_catalog.default.$tblName (a int) USING DELTA")
      // Ensure that the current timestamp is different from the one in the table.
      Thread.sleep(1000)
      // Microsecond precision timestamp.
      val current_time_micros = spark.sql("SELECT current_timestamp() as ts")
        .select($"ts".cast("string"))
        .head().getString(0)
      // Millisecond precision timestamp.
      val current_time_millis = new Timestamp(System.currentTimeMillis())
      // Second precision timestamp.
      val sdf = new java.text.SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
      val current_time_seconds = sdf.format(new java.sql.Timestamp(System.currentTimeMillis()))

      sql(s"INSERT INTO spark_catalog.default.$tblName VALUES (1)")
      checkAnswer(spark.read.option("timestampAsOf", current_time_micros)
        .table(s"spark_catalog.default.$tblName"), Seq.empty)
      checkAnswer(spark.read.option("timestampAsOf", current_time_millis.toString)
        .table(s"spark_catalog.default.$tblName"), Seq.empty)
      checkAnswer(spark.read.option("timestampAsOf", current_time_seconds)
        .table(s"spark_catalog.default.$tblName"), Seq.empty)
    }
  }

  // Helper to generate a unique test table, commits, and timestamps for time travel blocking tests
  def withTestTable(testBody: (String, String, String) => Unit): Unit = {
    withTempDir { dir =>
      val tblLoc = dir.getCanonicalPath
      val tableName = "testTable"
      withTable(tableName) {
        // create six versions spaced 1 day apart
        val start = System.currentTimeMillis() - 10.days.toMillis
        generateCommits(
          tblLoc,
          start, // v0
          start + 0.5.days, // v1
          start + 1.days, // v2
          start + 2.days, // v3
          start + 2.5.days, // v4
          start + 4.days, // v5
          start + 5.days  // v6
        )
        // timestamps for v4 and v6
        val t4 = new Timestamp(start + 2.5.days.toMillis).toString
        val t6 = new Timestamp(start + 5.days.toMillis).toString

        spark.sql(s"CREATE TABLE $tableName(id LONG) USING delta LOCATION '$tblLoc'")
        spark.sql(s"ALTER TABLE $tableName" +
          s" SET TBLPROPERTIES ('delta.enableChangeDataFeed' = 'true')")

        testBody(tableName, t4, t6)
      }
    }
  }

  // Helper to assert whether a given SQL should or should not throw the retention exception
  def assertBlocked(sql: String, shouldThrow: Boolean): Unit = {
    val msg = "Cannot time travel beyond delta.deletedFileRetentionDuration"
    if (shouldThrow) {
      val ex = intercept[Exception]( spark.sql(sql) )
      assert(ex.getMessage.contains(msg))
    } else {
      spark.sql(sql)  // must succeed
    }
  }

  // 1) SELECT ... AS OF
  test("Block time travel beyond deletedFileRetention") {
    withTestTable { (tbl, t4, t6) =>
      Seq(
        s"SELECT * FROM $tbl VERSION AS OF 2" -> true,
        s"SELECT * FROM $tbl TIMESTAMP AS OF '$t4'" -> true,
        s"SELECT * FROM $tbl VERSION AS OF 5" -> false,
        s"SELECT * FROM $tbl TIMESTAMP AS OF '$t6'" -> false
      ).foreach { case (sql, fail) => assertBlocked(sql, fail) }

      spark.sql(s"ALTER TABLE $tbl " +
        s"SET TBLPROPERTIES ('delta.deletedFileRetentionDuration' = 'interval 0 HOURS')")
      // Even after lowering retention to zero, a simple select * should still work
      // which references the latest version
      assertBlocked(s"SELECT * FROM $tbl", false)
      // After setting it to zero, any time travel will fail
      Seq(
        s"SELECT * FROM $tbl VERSION AS OF 5" -> true,
        s"SELECT * FROM $tbl TIMESTAMP AS OF '$t6'" -> true
      ).foreach { case (sql, fail) => assertBlocked(sql, fail) }
    }
  }

  // 2) SELECT ... CHANGES AS OF
  test("Block CDC beyond deletedFileRetention") {
    withTestTable { (tbl, t4, t6) =>
      Seq(
        s"SELECT * FROM table_changes('$tbl', 2)" -> true,
        s"SELECT * FROM table_changes('$tbl', '$t4')" -> true,
        s"SELECT * FROM table_changes('$tbl', 5)" -> false,
        s"SELECT * FROM table_changes('$tbl', '$t6')" -> false
      ).foreach { case (sql, fail) => assertBlocked(sql, fail) }

      spark.sql(s"ALTER TABLE $tbl " +
        s"SET TBLPROPERTIES ('delta.deletedFileRetentionDuration' = 'interval 0 HOURS')")
      // After setting it to zero, any previous version will fail
      Seq(
        s"SELECT * FROM table_changes('$tbl', 5)" -> true,
        s"SELECT * FROM table_changes('$tbl', '$t6')" -> true
      ).foreach { case (sql, fail) => assertBlocked(sql, fail) }
    }
  }

  // 3) RESTORE ... AS OF
  test("Block restore table beyond deletedFileRetention") {
    withTestTable { (tbl, t4, t6) =>
      Seq(
        s"RESTORE TABLE $tbl TO VERSION AS OF 2" -> true,
        s"RESTORE TABLE $tbl TO TIMESTAMP AS OF '$t4'" -> true,
        s"RESTORE TABLE $tbl TO VERSION AS OF 5" -> false,
        s"RESTORE TABLE $tbl TO TIMESTAMP AS OF '$t6'" -> false
      ).foreach { case (sql, fail) => assertBlocked(sql, fail) }

      spark.sql(s"ALTER TABLE $tbl" +
        s" SET TBLPROPERTIES ('delta.deletedFileRetentionDuration' = 'interval 0 HOURS')")
      // After setting it to zero, any previous version will fail
      Seq(
        s"RESTORE TABLE $tbl TO VERSION AS OF 5" -> true,
        s"RESTORE TABLE $tbl TO TIMESTAMP AS OF '$t6'" -> true
      ).foreach { case (sql, fail) => assertBlocked(sql, fail) }
    }
  }

  // 4) CLONE ... AS OF
  test("Block clone table beyond deletedFileRetention") {
    withTestTable { (tbl, t4, t6) =>
      val targets = Seq("targetTable1", "targetTable2", "targetTable3")

      Seq(
        s"CREATE TABLE ${targets(0)} SHALLOW CLONE $tbl VERSION AS OF 2" -> true,
        s"CREATE TABLE ${targets(0)} SHALLOW CLONE $tbl TIMESTAMP AS OF '$t4'" -> true,
        s"CREATE TABLE ${targets(0)} SHALLOW CLONE $tbl VERSION AS OF 5" -> false,
        s"CREATE TABLE ${targets(1)} SHALLOW CLONE $tbl TIMESTAMP AS OF '$t6'" -> false
      ).foreach { case (sql, fail) => assertBlocked(sql, fail) }

      spark.sql(s"ALTER TABLE $tbl" +
        s" SET TBLPROPERTIES ('delta.deletedFileRetentionDuration' = 'interval 0 HOURS')")
      // After setting it to zero, any previous version will fail
      Seq(
        s"CREATE TABLE ${targets(0)} SHALLOW CLONE $tbl VERSION AS OF 5" -> true,
        s"CREATE TABLE ${targets(0)} SHALLOW CLONE $tbl TIMESTAMP AS OF '$t6'" -> true
      ).foreach { case (sql, fail) => assertBlocked(sql, fail) }
    }
  }
}

class DeltaTimeTravelWithCatalogOwnedBatch1Suite extends DeltaTimeTravelSuite {
  override def catalogOwnedCoordinatorBackfillBatchSize: Option[Int] = Some(1)
}

class DeltaTimeTravelWithCatalogOwnedBatch2Suite extends DeltaTimeTravelSuite {
  override def catalogOwnedCoordinatorBackfillBatchSize: Option[Int] = Some(2)
}

class DeltaTimeTravelWithCatalogOwnedBatch100Suite extends DeltaTimeTravelSuite {
  override def catalogOwnedCoordinatorBackfillBatchSize: Option[Int] = Some(100)
}
