#include "duckdb/catalog/catalog_entry/duck_index_entry.hpp"
#include "duckdb/catalog/catalog_entry/duck_table_entry.hpp"
#include "duckdb/catalog/catalog_entry/scalar_macro_catalog_entry.hpp"
#include "duckdb/catalog/catalog_entry/table_catalog_entry.hpp"
#include "duckdb/catalog/catalog_entry/type_catalog_entry.hpp"
#include "duckdb/catalog/catalog_entry/view_catalog_entry.hpp"
#include "duckdb/catalog/duck_catalog.hpp"
#include "duckdb/common/checksum.hpp"
#include "duckdb/common/encryption_functions.hpp"
#include "duckdb/common/encryption_key_manager.hpp"
#include "duckdb/common/serializer/binary_deserializer.hpp"
#include "duckdb/common/serializer/buffered_file_reader.hpp"
#include "duckdb/common/serializer/memory_stream.hpp"
#include "duckdb/common/string_util.hpp"
#include "duckdb/execution/index/art/art.hpp"
#include "duckdb/execution/index/index_type_set.hpp"
#include "duckdb/main/attached_database.hpp"
#include "duckdb/main/client_context.hpp"
#include "duckdb/main/config.hpp"
#include "duckdb/main/connection.hpp"
#include "duckdb/main/database.hpp"
#include "duckdb/parser/parsed_data/alter_table_info.hpp"
#include "duckdb/parser/parsed_data/create_schema_info.hpp"
#include "duckdb/parser/parsed_data/create_view_info.hpp"
#include "duckdb/parser/parsed_data/drop_info.hpp"
#include "duckdb/planner/binder.hpp"
#include "duckdb/planner/expression_binder/index_binder.hpp"
#include "duckdb/planner/parsed_data/bound_create_table_info.hpp"
#include "duckdb/storage/single_file_block_manager.hpp"
#include "duckdb/storage/storage_manager.hpp"
#include "duckdb/storage/table/column_data.hpp"
#include "duckdb/storage/table/delete_state.hpp"
#include "duckdb/storage/write_ahead_log.hpp"
#include "duckdb/transaction/meta_transaction.hpp"
#include "duckdb/main/client_data.hpp"
#include "duckdb/main/settings.hpp"

namespace duckdb {

class ReplayState {
public:
	ReplayState(AttachedDatabase &db, ClientContext &context) : db(db), context(context), catalog(db.GetCatalog()) {
	}

	AttachedDatabase &db;
	ClientContext &context;
	Catalog &catalog;
	optional_ptr<TableCatalogEntry> current_table;
	MetaBlockPointer checkpoint_id;
	idx_t wal_version = 1;
	optional_idx current_position;
	optional_idx checkpoint_position;
	optional_idx expected_checkpoint_id;

	struct ReplayIndexInfo {
		ReplayIndexInfo(TableIndexList &index_list, unique_ptr<Index> index, const string &table_schema,
		                const string &table_name)
		    : index_list(index_list), index(std::move(index)), table_schema(table_schema), table_name(table_name) {};

		reference<TableIndexList> index_list;
		unique_ptr<Index> index;
		string table_schema;
		string table_name;
	};
	vector<ReplayIndexInfo> replay_index_infos;
};

class WriteAheadLogDeserializer {
public:
	WriteAheadLogDeserializer(ReplayState &state_p, BufferedFileReader &stream_p, bool deserialize_only = false)
	    : state(state_p), db(state.db), context(state.context), catalog(state.catalog), data(nullptr),
	      stream(nullptr, 0), deserializer(stream_p), deserialize_only(deserialize_only) {
		deserializer.Set<Catalog &>(catalog);
	}
	WriteAheadLogDeserializer(ReplayState &state_p, unique_ptr<data_t[]> data_p, idx_t size,
	                          bool deserialize_only = false)
	    : state(state_p), db(state.db), context(state.context), catalog(state.catalog), data(std::move(data_p)),
	      stream(data.get(), size), deserializer(stream), deserialize_only(deserialize_only) {
		deserializer.Set<Catalog &>(catalog);
	}

	static WriteAheadLogDeserializer GetEntryDeserializer(ReplayState &state_p, BufferedFileReader &stream,
	                                                      bool deserialize_only = false) {
		if (state_p.wal_version == 1) {
			// old WAL versions do not have checksums
			return WriteAheadLogDeserializer(state_p, stream, deserialize_only);
		}

		if (state_p.wal_version == 2) {
			// read the size and checksum
			auto size = stream.Read<uint64_t>();
			auto stored_checksum = stream.Read<uint64_t>();
			auto offset = stream.CurrentOffset();
			auto file_size = stream.FileSize();

			if (offset + size > file_size) {
				throw SerializationException(
				    "Corrupt WAL file: entry size exceeded remaining data in file at byte position %llu "
				    "(found entry with size %llu bytes, file size %llu bytes)",
				    offset, size, file_size);
			}

			// allocate a buffer and read data into the buffer
			auto buffer = unique_ptr<data_t[]>(new data_t[size]);
			stream.ReadData(buffer.get(), size);

			// compute and verify the checksum
			auto computed_checksum = Checksum(buffer.get(), size);
			if (stored_checksum != computed_checksum) {
				throw IOException("Corrupt WAL file: entry at byte position %llu computed checksum %llu does not match "
				                  "stored checksum %llu",
				                  offset, computed_checksum, stored_checksum);
			}

			return WriteAheadLogDeserializer(state_p, std::move(buffer), size, deserialize_only);
		}

		if (state_p.wal_version == 3) {
			auto &database = state_p.db.GetDatabase();
			//! Version 3 means that the WAL is encrypted
			//! For encryption, the length field remains plaintext
			//! After the length field, we store a 12-byte nonce (for GCM)
			//! After the nonce, we store the checksum, followed by the actual entry
			//! After the stored entry, we store a 16-byte nonce (for GCM).

			// read the size (this is excluding the nonce, checksum and tag)
			auto size = stream.Read<uint64_t>();
			// the ciphertext size is including the checksum
			const auto ciphertext_size = size + sizeof(uint64_t);

			auto offset = stream.CurrentOffset();
			auto file_size = stream.FileSize();

			EncryptionNonce nonce;
			EncryptionTag tag;

			if (offset + nonce.size() + ciphertext_size + tag.size() > file_size) {
				throw SerializationException(
				    "Corrupt Encrypted WAL file: entry size exceeded remaining data in file at byte position %llu "
				    "(found entry with size %llu bytes, file size %llu bytes)",
				    offset, size, file_size);
			}

			stream.ReadData(nonce.data(), nonce.size());

			auto &keys = EncryptionKeyManager::Get(state_p.db.GetDatabase());
			auto &catalog = state_p.db.GetCatalog().Cast<DuckCatalog>();
			auto derived_key = keys.GetKey(catalog.GetEncryptionKeyId());

			//! initialize the decryption
			auto encryption_state = database.GetEncryptionUtil()->CreateEncryptionState(
			    state_p.db.GetStorageManager().GetCipher(), MainHeader::DEFAULT_ENCRYPTION_KEY_LENGTH);
			encryption_state->InitializeDecryption(nonce.data(), nonce.size(), derived_key,
			                                       MainHeader::DEFAULT_ENCRYPTION_KEY_LENGTH);

			//! Allocate a decryption buffer
			auto buffer = unique_ptr<data_t[]>(new data_t[ciphertext_size]);
			auto out_buffer = unique_ptr<data_t[]>(new data_t[size]);

			stream.ReadData(buffer.get(), ciphertext_size);
			encryption_state->Process(buffer.get(), ciphertext_size, buffer.get(), ciphertext_size);

			//! read and verify the stored tag
			stream.ReadData(tag.data(), tag.size());

			encryption_state->Finalize(buffer.get(), ciphertext_size, tag.data(), tag.size());

			//! read the stored checksum
			auto stored_checksum = Load<uint64_t>(buffer.get());

			//! copy the decrypted data to the output buffer
			memcpy(out_buffer.get(), buffer.get() + sizeof(stored_checksum), size);

			// compute and verify the checksum
			auto computed_checksum = Checksum(out_buffer.get(), size);
			if (stored_checksum != computed_checksum) {
				throw IOException("Corrupt WAL file: entry at byte position %llu computed checksum %llu does not match "
				                  "stored checksum %llu",
				                  offset, computed_checksum, stored_checksum);
			}

			return WriteAheadLogDeserializer(state_p, std::move(out_buffer), size, deserialize_only);
		}

		throw IOException("Failed to read WAL of version %llu - can only read version 1, 2 and 3 (encrypted)",
		                  state_p.wal_version);
	}

	bool ReplayEntry() {
		deserializer.Begin();
		auto wal_type = deserializer.ReadProperty<WALType>(100, "wal_type");
		if (wal_type == WALType::WAL_FLUSH) {
			deserializer.End();
			return true;
		}
		ReplayEntry(wal_type);
		deserializer.End();
		return false;
	}

	bool DeserializeOnly() const {
		return deserialize_only;
	}

	static void ThrowVersionError(idx_t checkpoint_iteration, idx_t expected_checkpoint_iteration);

protected:
	void ReplayEntry(WALType wal_type);

	void ReplayVersion();

	void ReplayCreateTable();
	void ReplayDropTable();
	void ReplayAlter();

	void ReplayCreateView();
	void ReplayDropView();

	void ReplayCreateSchema();
	void ReplayDropSchema();

	void ReplayCreateType();
	void ReplayDropType();

	void ReplayCreateSequence();
	void ReplayDropSequence();
	void ReplaySequenceValue();

	void ReplayCreateMacro();
	void ReplayDropMacro();

	void ReplayCreateTableMacro();
	void ReplayDropTableMacro();

	void ReplayCreateIndex();
	void ReplayDropIndex();

	void ReplayUseTable();
	void ReplayInsert();
	void ReplayRowGroupData();
	void ReplayDelete();
	void ReplayUpdate();
	void ReplayCheckpoint();

private:
	void ReplayIndexData(IndexStorageInfo &info);

private:
	ReplayState &state;
	AttachedDatabase &db;
	ClientContext &context;
	Catalog &catalog;
	unique_ptr<data_t[]> data;
	MemoryStream stream;
	BinaryDeserializer deserializer;
	bool deserialize_only;
	optional_idx expected_checkpoint_id;
};

//===--------------------------------------------------------------------===//
// Replay
//===--------------------------------------------------------------------===//
unique_ptr<WriteAheadLog> WriteAheadLog::Replay(QueryContext context, StorageManager &storage_manager,
                                                const string &wal_path) {
	auto &fs = FileSystem::Get(storage_manager.GetAttached());
	auto handle = fs.OpenFile(wal_path, FileFlags::FILE_FLAGS_READ | FileFlags::FILE_FLAGS_NULL_IF_NOT_EXISTS);
	if (!handle) {
		// WAL does not exist - instantiate an empty WAL
		return make_uniq<WriteAheadLog>(storage_manager, wal_path);
	}

	// context is passed for metric collection purposes only!!
	auto wal_handle = ReplayInternal(context, storage_manager, std::move(handle));
	if (wal_handle) {
		return wal_handle;
	}
	// replay returning NULL indicates we can nuke the WAL entirely - but only if this is not a read-only connection
	if (!storage_manager.GetAttached().IsReadOnly()) {
		fs.TryRemoveFile(wal_path);
	}
	return make_uniq<WriteAheadLog>(storage_manager, wal_path);
}

static void CopyOverWAL(QueryContext context, BufferedFileReader &reader, FileHandle &target, data_ptr_t buffer,
                        idx_t buffer_size, idx_t copy_end) {
	while (!reader.Finished()) {
		idx_t read_count = MinValue<idx_t>(buffer_size, copy_end - reader.CurrentOffset());
		if (read_count == 0) {
			break;
		}
		reader.ReadData(context, buffer, read_count);

		target.Write(buffer, read_count);
	}
}

unique_ptr<WriteAheadLog> WriteAheadLog::ReplayInternal(QueryContext context, StorageManager &storage_manager,
                                                        unique_ptr<FileHandle> handle, WALReplayState replay_state) {
	auto &database = storage_manager.GetAttached();
	Connection con(database.GetDatabase());
	auto wal_path = handle->GetPath();
	BufferedFileReader reader(FileSystem::Get(database), std::move(handle));
	if (reader.Finished()) {
		// WAL file exists, but it is empty - we can delete the file
		return nullptr;
	}

	con.BeginTransaction();
	MetaTransaction::Get(*con.context).ModifyDatabase(database, DatabaseModificationType());

	auto &config = DBConfig::GetConfig(database.GetDatabase());
	// first deserialize the WAL to look for a checkpoint flag
	// if there is a checkpoint flag, we might have already flushed the contents of the WAL to disk
	ReplayState checkpoint_state(database, *con.context);
	try {
		idx_t replay_entry_count = 0;
		while (true) {
			replay_entry_count++;
			// read the current entry (deserialize only)
			checkpoint_state.current_position = reader.CurrentOffset();
			auto deserializer = WriteAheadLogDeserializer::GetEntryDeserializer(checkpoint_state, reader, true);
			if (deserializer.ReplayEntry()) {
				// check if the file is exhausted
				if (reader.Finished()) {
					// we finished reading the file: break
					break;
				}
			}
		}
		auto client_context = context.GetClientContext();
		if (client_context) {
			auto &profiler = *client_context->client_data->profiler;
			profiler.AddToCounter(MetricType::WAL_REPLAY_ENTRY_COUNT, replay_entry_count);
		}
	} catch (std::exception &ex) { // LCOV_EXCL_START
		ErrorData error(ex);
		// ignore serialization exceptions - they signal a torn WAL
		if (error.Type() != ExceptionType::SERIALIZATION) {
			error.Throw("Failure while replaying WAL file \"" + wal_path + "\": ");
		}
	} // LCOV_EXCL_STOP
	unique_ptr<FileHandle> checkpoint_handle;
	if (checkpoint_state.checkpoint_id.IsValid()) {
		if (replay_state == WALReplayState::CHECKPOINT_WAL) {
			throw InvalidInputException(
			    "Failure while replaying checkpoint WAL file \"%s\": checkpoint WAL cannot contain a checkpoint marker",
			    wal_path);
		}
		// there is a checkpoint flag
		// this means a checkpoint was on-going when we crashed
		// we need to reconcile this with what is in the data file
		// first check if there is a checkpoint WAL
		auto &manager = database.GetStorageManager();
		auto &fs = FileSystem::Get(storage_manager.GetAttached());
		auto checkpoint_wal = manager.GetCheckpointWALPath();
		checkpoint_handle =
		    fs.OpenFile(checkpoint_wal, FileFlags::FILE_FLAGS_READ | FileFlags::FILE_FLAGS_NULL_IF_NOT_EXISTS);
		bool checkpoint_was_successful = manager.IsCheckpointClean(checkpoint_state.checkpoint_id);
		if (!checkpoint_handle) {
			// no checkpoint WAL - either we just need to replay this WAL, or we are done
			if (checkpoint_was_successful) {
				// the contents of the WAL have already been checkpointed and there is no checkpoint WAL - we are done
				return nullptr;
			}
		} else {
			// we have a checkpoint WAL
			if (checkpoint_was_successful) {
				// the checkpoint was successful
				// the main WAL is no longer needed, we only need to replay the checkpoint WAL
				// if this is a read-only connection then replay the checkpoint WAL directly
				if (storage_manager.GetAttached().IsReadOnly()) {
					return ReplayInternal(context, storage_manager, std::move(checkpoint_handle),
					                      WALReplayState::CHECKPOINT_WAL);
				}
				// if this is not a read-only connection we need to finish the checkpoint
				// overwrite the current WAL with the checkpoint WAL
				checkpoint_handle.reset();

				fs.MoveFile(checkpoint_wal, wal_path);

				// now open the handle again and replay the checkpoint WAL
				checkpoint_handle =
				    fs.OpenFile(wal_path, FileFlags::FILE_FLAGS_READ | FileFlags::FILE_FLAGS_NULL_IF_NOT_EXISTS);
				return ReplayInternal(context, storage_manager, std::move(checkpoint_handle),
				                      WALReplayState::CHECKPOINT_WAL);
			}
			// the checkpoint was unsuccessful
			// this means we need to replay both this WAL and the checkpoint WAL
			// if this is a read-only connection - replay both WAL files
			if (!storage_manager.GetAttached().IsReadOnly()) {
				// if this is not a read-only connection, then merge the two WALs and replay the merged WAL
				// we merge into the recovery WAL path
				auto recovery_path = manager.GetRecoveryWALPath();
				auto recovery_handle =
				    fs.OpenFile(recovery_path, FileFlags::FILE_FLAGS_WRITE | FileFlags::FILE_FLAGS_FILE_CREATE_NEW);

				static constexpr idx_t BATCH_SIZE = Storage::DEFAULT_BLOCK_SIZE;
				auto buffer = make_uniq_array<data_t>(BATCH_SIZE);

				// first copy over the main WAL contents
				auto copy_end = checkpoint_state.checkpoint_position.GetIndex();
				reader.Reset();
				CopyOverWAL(context, reader, *recovery_handle, buffer.get(), BATCH_SIZE, copy_end);

				// now copy over the checkpoint WAL
				{
					BufferedFileReader checkpoint_reader(FileSystem::Get(database), std::move(checkpoint_handle));

					// skip over the version entry
					ReplayState checkpoint_replay_state(database, *con.context);
					auto deserializer = WriteAheadLogDeserializer::GetEntryDeserializer(checkpoint_replay_state,
					                                                                    checkpoint_reader, true);
					deserializer.ReplayEntry();

					if (checkpoint_replay_state.wal_version != checkpoint_state.wal_version) {
						throw InvalidInputException("Failure while replaying checkpoint WAL file \"%s\": checkpoint "
						                            "WAL version is different from main WAL version",
						                            wal_path);
					}

					CopyOverWAL(context, checkpoint_reader, *recovery_handle, buffer.get(), BATCH_SIZE,
					            checkpoint_reader.FileSize());
				}

				auto debug_checkpoint_abort =
				    DBConfig::GetSetting<DebugCheckpointAbortSetting>(storage_manager.GetDatabase());

				// move over the recovery WAL over the main WAL
				recovery_handle->Sync();
				recovery_handle.reset();

				if (debug_checkpoint_abort == CheckpointAbort::DEBUG_ABORT_BEFORE_MOVING_RECOVERY) {
					throw FatalException(
					    "Checkpoint aborted before moving recovery file because of PRAGMA checkpoint_abort flag");
				}

				fs.MoveFile(recovery_path, wal_path);

				if (debug_checkpoint_abort == CheckpointAbort::DEBUG_ABORT_BEFORE_DELETING_CHECKPOINT_WAL) {
					throw FatalException(
					    "Checkpoint aborted before deleting checkpoint file because of PRAGMA checkpoint_abort flag");
				}

				// delete the checkpoint WAL
				fs.RemoveFile(checkpoint_wal);

				// replay the (combined) recovery WAL
				auto main_handle = fs.OpenFile(wal_path, FileFlags::FILE_FLAGS_READ);
				return ReplayInternal(context, storage_manager, std::move(main_handle), WALReplayState::CHECKPOINT_WAL);
			}
		}
	}
	if (checkpoint_state.expected_checkpoint_id.IsValid()) {
		// we expected a checkpoint id - but no checkpoint has happened - abort!
		auto expected_id = checkpoint_state.expected_checkpoint_id.GetIndex();
		WriteAheadLogDeserializer::ThrowVersionError(expected_id - 1, expected_id);
	}

	// we need to recover from the WAL: actually set up the replay state
	ReplayState state(database, *con.context);

	// reset the reader - we are going to read the WAL from the beginning again
	reader.Reset();

	// replay the WAL
	// note that everything is wrapped inside a try/catch block here
	// there can be errors in WAL replay because of a corrupt WAL file
	idx_t successful_offset = 0;
	bool all_succeeded = false;
	try {
		while (true) {
			// read the current entry
			auto deserializer = WriteAheadLogDeserializer::GetEntryDeserializer(state, reader);
			if (deserializer.ReplayEntry()) {
				con.Commit();

				// Commit any outstanding indexes.
				for (auto &info : state.replay_index_infos) {
					info.index_list.get().AddIndex(std::move(info.index));
				}
				state.replay_index_infos.clear();

				successful_offset = reader.CurrentOffset();
				// check if the file is exhausted
				if (reader.Finished()) {
					// we finished reading the file: break
					all_succeeded = true;
					break;
				}
				con.BeginTransaction();
				MetaTransaction::Get(*con.context).ModifyDatabase(database, DatabaseModificationType());
			}
		}
	} catch (std::exception &ex) { // LCOV_EXCL_START
		// exception thrown in WAL replay: rollback
		con.Query("ROLLBACK");
		ErrorData error(ex);
		// serialization failure means a truncated WAL
		// these failures are ignored unless abort_on_wal_failure is true
		// other failures always result in an error
		if (config.options.abort_on_wal_failure || error.Type() != ExceptionType::SERIALIZATION) {
			error.Throw("Failure while replaying WAL file \"" + wal_path + "\": ");
		}
	} catch (...) {
		// exception thrown in WAL replay: rollback
		con.Query("ROLLBACK");
		throw;
	} // LCOV_EXCL_STOP
	if (all_succeeded && checkpoint_handle) {
		// we have successfully replayed the main WAL - but there is still a checkpoint WAL remaining
		// this can only happen in read-only mode
		// replay the checkpoint WAL and return
		return ReplayInternal(context, storage_manager, std::move(checkpoint_handle), WALReplayState::CHECKPOINT_WAL);
	}
	auto init_state = all_succeeded ? WALInitState::UNINITIALIZED : WALInitState::UNINITIALIZED_REQUIRES_TRUNCATE;
	return make_uniq<WriteAheadLog>(storage_manager, wal_path, successful_offset, init_state);
}

//===--------------------------------------------------------------------===//
// Replay Entries
//===--------------------------------------------------------------------===//
void WriteAheadLogDeserializer::ReplayEntry(WALType entry_type) {
	switch (entry_type) {
	case WALType::WAL_VERSION:
		ReplayVersion();
		break;
	case WALType::CREATE_TABLE:
		ReplayCreateTable();
		break;
	case WALType::DROP_TABLE:
		ReplayDropTable();
		break;
	case WALType::ALTER_INFO:
		ReplayAlter();
		break;
	case WALType::CREATE_VIEW:
		ReplayCreateView();
		break;
	case WALType::DROP_VIEW:
		ReplayDropView();
		break;
	case WALType::CREATE_SCHEMA:
		ReplayCreateSchema();
		break;
	case WALType::DROP_SCHEMA:
		ReplayDropSchema();
		break;
	case WALType::CREATE_SEQUENCE:
		ReplayCreateSequence();
		break;
	case WALType::DROP_SEQUENCE:
		ReplayDropSequence();
		break;
	case WALType::SEQUENCE_VALUE:
		ReplaySequenceValue();
		break;
	case WALType::CREATE_MACRO:
		ReplayCreateMacro();
		break;
	case WALType::DROP_MACRO:
		ReplayDropMacro();
		break;
	case WALType::CREATE_TABLE_MACRO:
		ReplayCreateTableMacro();
		break;
	case WALType::DROP_TABLE_MACRO:
		ReplayDropTableMacro();
		break;
	case WALType::CREATE_INDEX:
		ReplayCreateIndex();
		break;
	case WALType::DROP_INDEX:
		ReplayDropIndex();
		break;
	case WALType::USE_TABLE:
		ReplayUseTable();
		break;
	case WALType::INSERT_TUPLE:
		ReplayInsert();
		break;
	case WALType::ROW_GROUP_DATA:
		ReplayRowGroupData();
		break;
	case WALType::DELETE_TUPLE:
		ReplayDelete();
		break;
	case WALType::UPDATE_TUPLE:
		ReplayUpdate();
		break;
	case WALType::CHECKPOINT:
		ReplayCheckpoint();
		break;
	case WALType::CREATE_TYPE:
		ReplayCreateType();
		break;
	case WALType::DROP_TYPE:
		ReplayDropType();
		break;
	default:
		throw InternalException("Invalid WAL entry type!");
	}
}

//===--------------------------------------------------------------------===//
// Replay Version
//===--------------------------------------------------------------------===//
void WriteAheadLogDeserializer::ThrowVersionError(idx_t checkpoint_iteration, idx_t expected_checkpoint_iteration) {
	string relation = checkpoint_iteration < expected_checkpoint_iteration ? "an older" : "a newer";
	throw IOException("This WAL was created for this database file, but the WAL checkpoint iteration does not "
	                  "match the database file. "
	                  "That means the WAL was created for %s version of this database. File checkpoint "
	                  "iteration: %d, WAL checkpoint iteration: %d",
	                  relation, expected_checkpoint_iteration, checkpoint_iteration);
}

void WriteAheadLogDeserializer::ReplayVersion() {
	state.wal_version = deserializer.ReadProperty<idx_t>(101, "version");

	auto &single_file_block_manager = db.GetStorageManager().GetBlockManager().Cast<SingleFileBlockManager>();
	data_t db_identifier[MainHeader::DB_IDENTIFIER_LEN];
	bool is_set = false;
	deserializer.ReadOptionalList(102, "db_identifier", [&](Deserializer::List &list, idx_t i) {
		db_identifier[i] = list.ReadElement<uint8_t>();
		is_set = true;
	});
	auto checkpoint_iteration = deserializer.ReadPropertyWithDefault<optional_idx>(103, "checkpoint_iteration");
	if (!is_set || !checkpoint_iteration.IsValid()) {
		return;
	}
	auto expected_db_identifier = single_file_block_manager.GetDBIdentifier();
	if (!MainHeader::CompareDBIdentifiers(db_identifier, expected_db_identifier)) {
		throw IOException("WAL does not match database file.");
	}

	auto wal_checkpoint_iteration = checkpoint_iteration.GetIndex();
	auto expected_checkpoint_iteration = single_file_block_manager.GetCheckpointIteration();
	if (expected_checkpoint_iteration != wal_checkpoint_iteration) {
		if (wal_checkpoint_iteration + 1 == expected_checkpoint_iteration) {
			// this iteration is exactly one lower than the expected iteration
			// this can happen if we aborted AFTER checkpointing the file, but BEFORE truncating the WAL
			// expect this situation to occur - we will throw an error if it does not later on
			state.expected_checkpoint_id = expected_checkpoint_iteration;
			return;
		}
		ThrowVersionError(wal_checkpoint_iteration, expected_checkpoint_iteration);
	}
}

//===--------------------------------------------------------------------===//
// Replay Table
//===--------------------------------------------------------------------===//
void WriteAheadLogDeserializer::ReplayCreateTable() {
	auto info = deserializer.ReadProperty<unique_ptr<CreateInfo>>(101, "table");
	if (DeserializeOnly()) {
		return;
	}
	// bind the constraints to the table again
	auto binder = Binder::CreateBinder(context);
	auto &schema = catalog.GetSchema(context, info->schema);
	auto bound_info = Binder::BindCreateTableCheckpoint(std::move(info), schema);

	catalog.CreateTable(context, *bound_info);
}

void WriteAheadLogDeserializer::ReplayDropTable() {
	DropInfo info;

	info.type = CatalogType::TABLE_ENTRY;
	info.schema = deserializer.ReadProperty<string>(101, "schema");
	info.name = deserializer.ReadProperty<string>(102, "name");
	if (DeserializeOnly()) {
		return;
	}

	// Remove any replay indexes of this table.
	state.replay_index_infos.erase(std::remove_if(state.replay_index_infos.begin(), state.replay_index_infos.end(),
	                                              [&info](const ReplayState::ReplayIndexInfo &replay_info) {
		                                              return replay_info.table_schema == info.schema &&
		                                                     replay_info.table_name == info.name;
	                                              }),
	                               state.replay_index_infos.end());

	catalog.DropEntry(context, info);
}

void ReplayWithoutIndex(ClientContext &context, Catalog &catalog, AlterInfo &info, const bool only_deserialize) {
	if (only_deserialize) {
		return;
	}
	catalog.Alter(context, info);
}

void WriteAheadLogDeserializer::ReplayIndexData(IndexStorageInfo &info) {
	D_ASSERT(info.IsValid() && !info.name.empty());

	auto &single_file_sm = db.GetStorageManager().Cast<SingleFileStorageManager>();
	auto &block_manager = single_file_sm.block_manager;
	auto &buffer_manager = block_manager->buffer_manager;

	deserializer.ReadList(103, "index_storage", [&](Deserializer::List &list, idx_t i) {
		auto &data_info = info.allocator_infos[i];

		// Read the data into buffer handles and convert them to blocks on disk.
		for (idx_t j = 0; j < data_info.allocation_sizes.size(); j++) {
			// Read the data into a buffer handle.
			auto buffer_handle = buffer_manager.Allocate(MemoryTag::ART_INDEX, block_manager.get(), false);
			auto block_handle = buffer_handle.GetBlockHandle();
			auto data_ptr = buffer_handle.Ptr();

			list.ReadElement<bool>(data_ptr, data_info.allocation_sizes[j]);

			// Convert the buffer handle to a persistent block and store the block id.
			if (!deserialize_only) {
				auto block_id = block_manager->GetFreeBlockIdForCheckpoint();
				block_manager->ConvertToPersistent(context, block_id, std::move(block_handle),
				                                   std::move(buffer_handle));
				data_info.block_pointers[j].block_id = block_id;
			}
		}
	});
}

void WriteAheadLogDeserializer::ReplayAlter() {
	auto info = deserializer.ReadProperty<unique_ptr<ParseInfo>>(101, "info");
	auto &alter_info = info->Cast<AlterInfo>();
	if (!alter_info.IsAddPrimaryKey()) {
		return ReplayWithoutIndex(context, catalog, alter_info, DeserializeOnly());
	}

	auto index_storage_info = deserializer.ReadProperty<IndexStorageInfo>(102, "index_storage_info");
	ReplayIndexData(index_storage_info);
	if (DeserializeOnly()) {
		return;
	}

	auto &table_info = alter_info.Cast<AlterTableInfo>();
	auto &constraint_info = table_info.Cast<AddConstraintInfo>();
	auto &unique_info = constraint_info.constraint->Cast<UniqueConstraint>();

	auto &table =
	    catalog.GetEntry<TableCatalogEntry>(context, table_info.schema, table_info.name).Cast<DuckTableEntry>();
	auto &column_list = table.GetColumns();

	// Add the table to the bind context to bind the parsed expressions.
	auto binder = Binder::CreateBinder(context);
	vector<LogicalType> column_types;
	vector<string> column_names;
	for (auto &col : column_list.Logical()) {
		column_types.push_back(col.Type());
		column_names.push_back(col.Name());
	}

	// Create a binder to bind the parsed expressions.
	vector<ColumnIndex> column_indexes;
	binder->bind_context.AddBaseTable(0, string(), column_names, column_types, column_indexes, table);
	IndexBinder idx_binder(*binder, context);

	// Bind the parsed expressions to create unbound expressions.
	vector<unique_ptr<Expression>> unbound_expressions;
	auto logical_indexes = unique_info.GetLogicalIndexes(column_list);
	for (const auto &logical_index : logical_indexes) {
		auto &col = column_list.GetColumn(logical_index);
		unique_ptr<ParsedExpression> parsed = make_uniq<ColumnRefExpression>(col.GetName(), table_info.name);
		unbound_expressions.push_back(idx_binder.Bind(parsed));
	}

	vector<column_t> column_ids;
	for (auto &column_index : column_indexes) {
		column_ids.push_back(column_index.GetPrimaryIndex());
	}

	auto &storage = table.GetStorage();
	CreateIndexInput input(TableIOManager::Get(storage), storage.db, IndexConstraintType::PRIMARY,
	                       index_storage_info.name, column_ids, unbound_expressions, index_storage_info,
	                       index_storage_info.options);

	auto index_type = context.db->config.GetIndexTypes().FindByName(ART::TYPE_NAME);
	auto index_instance = index_type->create_instance(input);

	auto &table_index_list = storage.GetDataTableInfo()->GetIndexes();
	state.replay_index_infos.emplace_back(table_index_list, std::move(index_instance), table_info.schema,
	                                      table_info.name);

	catalog.Alter(context, alter_info);
}

//===--------------------------------------------------------------------===//
// Replay View
//===--------------------------------------------------------------------===//
void WriteAheadLogDeserializer::ReplayCreateView() {
	auto entry = deserializer.ReadProperty<unique_ptr<CreateInfo>>(101, "view");
	if (DeserializeOnly()) {
		return;
	}
	catalog.CreateView(context, entry->Cast<CreateViewInfo>());
}

void WriteAheadLogDeserializer::ReplayDropView() {
	DropInfo info;
	info.type = CatalogType::VIEW_ENTRY;
	info.schema = deserializer.ReadProperty<string>(101, "schema");
	info.name = deserializer.ReadProperty<string>(102, "name");
	if (DeserializeOnly()) {
		return;
	}
	catalog.DropEntry(context, info);
}

//===--------------------------------------------------------------------===//
// Replay Schema
//===--------------------------------------------------------------------===//
void WriteAheadLogDeserializer::ReplayCreateSchema() {
	CreateSchemaInfo info;
	info.schema = deserializer.ReadProperty<string>(101, "schema");
	if (DeserializeOnly()) {
		return;
	}

	catalog.CreateSchema(context, info);
}

void WriteAheadLogDeserializer::ReplayDropSchema() {
	DropInfo info;

	info.type = CatalogType::SCHEMA_ENTRY;
	info.name = deserializer.ReadProperty<string>(101, "schema");
	if (DeserializeOnly()) {
		return;
	}

	catalog.DropEntry(context, info);
}

//===--------------------------------------------------------------------===//
// Replay Custom Type
//===--------------------------------------------------------------------===//
void WriteAheadLogDeserializer::ReplayCreateType() {
	auto info = deserializer.ReadProperty<unique_ptr<CreateInfo>>(101, "type");
	info->on_conflict = OnCreateConflict::IGNORE_ON_CONFLICT;
	catalog.CreateType(context, info->Cast<CreateTypeInfo>());
}

void WriteAheadLogDeserializer::ReplayDropType() {
	DropInfo info;

	info.type = CatalogType::TYPE_ENTRY;
	info.schema = deserializer.ReadProperty<string>(101, "schema");
	info.name = deserializer.ReadProperty<string>(102, "name");
	if (DeserializeOnly()) {
		return;
	}

	catalog.DropEntry(context, info);
}

//===--------------------------------------------------------------------===//
// Replay Sequence
//===--------------------------------------------------------------------===//
void WriteAheadLogDeserializer::ReplayCreateSequence() {
	auto entry = deserializer.ReadProperty<unique_ptr<CreateInfo>>(101, "sequence");
	if (DeserializeOnly()) {
		return;
	}

	catalog.CreateSequence(context, entry->Cast<CreateSequenceInfo>());
}

void WriteAheadLogDeserializer::ReplayDropSequence() {
	DropInfo info;
	info.type = CatalogType::SEQUENCE_ENTRY;
	info.schema = deserializer.ReadProperty<string>(101, "schema");
	info.name = deserializer.ReadProperty<string>(102, "name");
	if (DeserializeOnly()) {
		return;
	}

	catalog.DropEntry(context, info);
}

void WriteAheadLogDeserializer::ReplaySequenceValue() {
	auto schema = deserializer.ReadProperty<string>(101, "schema");
	auto name = deserializer.ReadProperty<string>(102, "name");
	auto usage_count = deserializer.ReadProperty<uint64_t>(103, "usage_count");
	auto counter = deserializer.ReadProperty<int64_t>(104, "counter");
	if (DeserializeOnly()) {
		return;
	}

	// fetch the sequence from the catalog
	auto &seq = catalog.GetEntry<SequenceCatalogEntry>(context, schema, name);
	seq.ReplayValue(usage_count, counter);
}

//===--------------------------------------------------------------------===//
// Replay Macro
//===--------------------------------------------------------------------===//
void WriteAheadLogDeserializer::ReplayCreateMacro() {
	auto entry = deserializer.ReadProperty<unique_ptr<CreateInfo>>(101, "macro");
	if (DeserializeOnly()) {
		return;
	}

	catalog.CreateFunction(context, entry->Cast<CreateMacroInfo>());
}

void WriteAheadLogDeserializer::ReplayDropMacro() {
	DropInfo info;
	info.type = CatalogType::MACRO_ENTRY;
	info.schema = deserializer.ReadProperty<string>(101, "schema");
	info.name = deserializer.ReadProperty<string>(102, "name");
	if (DeserializeOnly()) {
		return;
	}

	catalog.DropEntry(context, info);
}

//===--------------------------------------------------------------------===//
// Replay Table Macro
//===--------------------------------------------------------------------===//
void WriteAheadLogDeserializer::ReplayCreateTableMacro() {
	auto entry = deserializer.ReadProperty<unique_ptr<CreateInfo>>(101, "table_macro");
	if (DeserializeOnly()) {
		return;
	}
	catalog.CreateFunction(context, entry->Cast<CreateMacroInfo>());
}

void WriteAheadLogDeserializer::ReplayDropTableMacro() {
	DropInfo info;
	info.type = CatalogType::TABLE_MACRO_ENTRY;
	info.schema = deserializer.ReadProperty<string>(101, "schema");
	info.name = deserializer.ReadProperty<string>(102, "name");
	if (DeserializeOnly()) {
		return;
	}

	catalog.DropEntry(context, info);
}

//===--------------------------------------------------------------------===//
// Replay Index
//===--------------------------------------------------------------------===//
void WriteAheadLogDeserializer::ReplayCreateIndex() {
	auto create_info = deserializer.ReadProperty<unique_ptr<CreateInfo>>(101, "index_catalog_entry");
	auto index_info = deserializer.ReadProperty<IndexStorageInfo>(102, "index_storage_info");

	ReplayIndexData(index_info);
	if (DeserializeOnly()) {
		return;
	}
	auto &info = create_info->Cast<CreateIndexInfo>();

	// Ensure that the index type exists.
	if (info.index_type.empty()) {
		info.index_type = ART::TYPE_NAME;
	}

	const auto schema_name = create_info->schema;
	const auto table_name = info.table;

	auto &entry = catalog.GetEntry<TableCatalogEntry>(context, schema_name, table_name);
	auto &table = entry.Cast<DuckTableEntry>();
	auto &storage = table.GetStorage();
	auto &io_manager = TableIOManager::Get(storage);

	// Create the index in the catalog.
	table.schema.CreateIndex(context, info, table);

	// add the index to the storage
	auto unbound_index = make_uniq<UnboundIndex>(std::move(create_info), std::move(index_info), io_manager, db);

	auto &table_index_list = storage.GetDataTableInfo()->GetIndexes();
	state.replay_index_infos.emplace_back(table_index_list, std::move(unbound_index), schema_name, table_name);
}

void WriteAheadLogDeserializer::ReplayDropIndex() {
	DropInfo info;
	info.type = CatalogType::INDEX_ENTRY;
	info.schema = deserializer.ReadProperty<string>(101, "schema");
	info.name = deserializer.ReadProperty<string>(102, "name");
	if (DeserializeOnly()) {
		return;
	}

	// Remove the replay index, if any.
	state.replay_index_infos.erase(std::remove_if(state.replay_index_infos.begin(), state.replay_index_infos.end(),
	                                              [&info](const ReplayState::ReplayIndexInfo &replay_info) {
		                                              return replay_info.table_schema == info.schema &&
		                                                     replay_info.index->GetIndexName() == info.name;
	                                              }),
	                               state.replay_index_infos.end());

	catalog.DropEntry(context, info);
}

//===--------------------------------------------------------------------===//
// Replay Data
//===--------------------------------------------------------------------===//
void WriteAheadLogDeserializer::ReplayUseTable() {
	auto schema_name = deserializer.ReadProperty<string>(101, "schema");
	auto table_name = deserializer.ReadProperty<string>(102, "table");
	if (DeserializeOnly()) {
		return;
	}
	state.current_table = &catalog.GetEntry<TableCatalogEntry>(context, schema_name, table_name);
}

void WriteAheadLogDeserializer::ReplayInsert() {
	DataChunk chunk;
	deserializer.ReadObject(101, "chunk", [&](Deserializer &object) { chunk.Deserialize(object); });
	if (DeserializeOnly()) {
		return;
	}
	if (!state.current_table) {
		throw InternalException("Corrupt WAL: insert without table");
	}

	// Append to the current table without constraint verification.
	vector<unique_ptr<BoundConstraint>> bound_constraints;
	auto &storage = state.current_table->GetStorage();
	storage.LocalWALAppend(*state.current_table, context, chunk, bound_constraints);
}

static void MarkBlocksAsUsed(BlockManager &manager, const PersistentColumnData &col_data) {
	for (auto &pointer : col_data.pointers) {
		auto block_id = pointer.block_pointer.block_id;
		if (block_id != INVALID_BLOCK) {
			manager.MarkBlockAsUsed(block_id);
		}
		if (pointer.segment_state) {
			for (auto &block : pointer.segment_state->blocks) {
				manager.MarkBlockAsUsed(block);
			}
		}
	}
	for (auto &child_column : col_data.child_columns) {
		MarkBlocksAsUsed(manager, child_column);
	}
}

void WriteAheadLogDeserializer::ReplayRowGroupData() {
	auto &block_manager = db.GetStorageManager().GetBlockManager();
	PersistentCollectionData data;
	deserializer.Set<DatabaseInstance &>(db.GetDatabase());
	CompressionInfo compression_info(block_manager);
	deserializer.Set<const CompressionInfo &>(compression_info);
	deserializer.ReadProperty(101, "row_group_data", data);
	deserializer.Unset<const CompressionInfo>();
	deserializer.Unset<DatabaseInstance>();
	if (DeserializeOnly()) {
		// label blocks in data as used - they will be used after the WAL replay is finished
		// we need to do this during the deserialization phase to ensure the blocks will not be overwritten
		// by previous deserialization steps
		for (auto &group : data.row_group_data) {
			for (auto &col_data : group.column_data) {
				MarkBlocksAsUsed(block_manager, col_data);
			}
		}
		return;
	}
	if (!state.current_table) {
		throw InternalException("Corrupt WAL: insert without table");
	}
	auto &storage = state.current_table->GetStorage();
	auto &table_info = storage.GetDataTableInfo();
	RowGroupCollection new_row_groups(table_info, table_info->GetIOManager(), storage.GetTypes(), 0);
	new_row_groups.Initialize(data);
	TableIndexList index_list;
	storage.MergeStorage(new_row_groups, index_list, nullptr);
}

void WriteAheadLogDeserializer::ReplayDelete() {
	DataChunk chunk;
	deserializer.ReadObject(101, "chunk", [&](Deserializer &object) { chunk.Deserialize(object); });
	if (DeserializeOnly()) {
		return;
	}
	if (!state.current_table) {
		throw SerializationException("delete without a table");
	}

	D_ASSERT(chunk.ColumnCount() == 1 && chunk.data[0].GetType() == LogicalType::ROW_TYPE);
	auto &row_identifiers = chunk.data[0];
	row_identifiers.Flatten(chunk.size());
	auto source_ids = FlatVector::GetData<row_t>(row_identifiers);

	// Delete the row IDs from the current table.
	auto &storage = state.current_table->GetStorage();
	auto total_rows = storage.GetTotalRows();
	for (idx_t i = 0; i < chunk.size(); i++) {
		if (source_ids[i] >= UnsafeNumericCast<row_t>(total_rows)) {
			throw SerializationException("invalid row ID delete in WAL");
		}
	}
	TableDeleteState delete_state;
	storage.Delete(delete_state, context, row_identifiers, chunk.size());
}

void WriteAheadLogDeserializer::ReplayUpdate() {
	auto column_path = deserializer.ReadProperty<vector<column_t>>(101, "column_indexes");

	DataChunk chunk;
	deserializer.ReadObject(102, "chunk", [&](Deserializer &object) { chunk.Deserialize(object); });

	if (DeserializeOnly()) {
		return;
	}
	if (!state.current_table) {
		throw InternalException("Corrupt WAL: update without table");
	}

	if (column_path[0] >= state.current_table->GetColumns().PhysicalColumnCount()) {
		throw InternalException("Corrupt WAL: column index for update out of bounds");
	}

	// remove the row id vector from the chunk
	auto row_ids = std::move(chunk.data.back());
	chunk.data.pop_back();

	// now perform the update
	state.current_table->GetStorage().UpdateColumn(*state.current_table, context, row_ids, column_path, chunk);
}

void WriteAheadLogDeserializer::ReplayCheckpoint() {
	state.checkpoint_id = deserializer.ReadProperty<MetaBlockPointer>(101, "meta_block");
	state.checkpoint_position = state.current_position;
}

} // namespace duckdb
