// Copyright(C) 2025 InfiniFlow, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

module infinity_core:catalog_cache.impl;

import :catalog_cache;
import :default_values;
import :infinity_exception;
import :logger;
import :index_base;
import :catalog_cache;

import std;
import third_party;

import internal_types;

namespace infinity {

TableCache::TableCache(u64 table_id, SegmentID unsealed_segment_id, SegmentOffset unsealed_segment_offset, SegmentID next_segment_id)
    : prepare_segment_id_(unsealed_segment_id), prepare_segment_offset_(unsealed_segment_offset), commit_segment_id_(unsealed_segment_id),
      commit_segment_offset_(unsealed_segment_offset), table_id_(table_id), next_segment_id_(next_segment_id) {
    if (commit_segment_offset_ != 0) {
        // Used when system is restarted and there's an unsealed segment.
        unsealed_segment_cache_ = std::make_shared<SegmentCache>(prepare_segment_id_, commit_segment_offset_);
        unsealed_segment_cache_->sealed_ = false;
        LOG_INFO(fmt::format("TableCache initialized with unsealed_segment_cache_({}, {})", prepare_segment_id_, commit_segment_offset_));
    }
}

std::shared_ptr<AppendPrepareInfo> TableCache::PrepareAppendNolock(size_t row_count, TransactionID txn_id) {
    if (row_count > MAX_BLOCK_CAPACITY or row_count == 0) {
        UnrecoverableError(fmt::format("Attempt to append row_count: {}", row_count));
    }

    std::shared_ptr<AppendPrepareInfo> append_info = std::make_shared<AppendPrepareInfo>();
    append_info->transaction_id_ = txn_id;
    if (unsealed_segment_cache_ == nullptr) {
        // Used when system is restarted and all segments are sealed.
        unsealed_segment_cache_ = std::make_shared<SegmentCache>(next_segment_id_, row_count);
        unsealed_segment_cache_->sealed_ = false;
        // Update prepare info
        prepare_segment_id_ = next_segment_id_;
        prepare_segment_offset_ = row_count;

        append_info->ranges_.emplace_back(RowID(next_segment_id_, 0), row_count);
        LOG_DEBUG(fmt::format("TableCache.PrepareAppendNolock allocated range({}.{}, {})", next_segment_id_, 0, row_count));
        ++next_segment_id_;
    } else {
        if (unsealed_segment_cache_->row_count_ + row_count < DEFAULT_SEGMENT_CAPACITY) {
            // Don't need to add a new segment
            append_info->ranges_.emplace_back(RowID(unsealed_segment_cache_->segment_id_, unsealed_segment_cache_->row_count_), row_count);
            LOG_DEBUG(fmt::format("TableCache.PrepareAppendNolock allocated range({}.{}, {})",
                                  unsealed_segment_cache_->segment_id_,
                                  unsealed_segment_cache_->row_count_,
                                  row_count));
            unsealed_segment_cache_->row_count_ += row_count;

            prepare_segment_offset_ += row_count;
        } else if (unsealed_segment_cache_->row_count_ + row_count == DEFAULT_SEGMENT_CAPACITY) {
            // Need to add a new segment
            append_info->ranges_.emplace_back(RowID(unsealed_segment_cache_->segment_id_, unsealed_segment_cache_->row_count_), row_count);
            unsealed_segment_cache_->row_count_ += row_count;
            unsealed_segment_cache_->sealed_ = true;
            segment_cache_map_.emplace(unsealed_segment_cache_->segment_id_, unsealed_segment_cache_);
            LOG_DEBUG(fmt::format("TableCache.PrepareAppendNolock allocated range({}.{}, {})",
                                  unsealed_segment_cache_->segment_id_,
                                  unsealed_segment_cache_->row_count_,
                                  row_count));

            prepare_segment_offset_ += row_count;
        } else {
            size_t first_row_count = DEFAULT_SEGMENT_CAPACITY - unsealed_segment_cache_->row_count_;
            append_info->ranges_.emplace_back(RowID(unsealed_segment_cache_->segment_id_, unsealed_segment_cache_->row_count_), first_row_count);
            LOG_DEBUG(fmt::format("TableCache.PrepareAppendNolock allocated first range({}.{}, {})",
                                  unsealed_segment_cache_->segment_id_,
                                  unsealed_segment_cache_->row_count_,
                                  first_row_count));
            unsealed_segment_cache_->row_count_ += first_row_count;
            unsealed_segment_cache_->sealed_ = true;
            segment_cache_map_.emplace(unsealed_segment_cache_->segment_id_, unsealed_segment_cache_);

            // Update prepare info
            size_t second_row_count = row_count - first_row_count;
            prepare_segment_id_ = next_segment_id_;
            prepare_segment_offset_ = second_row_count;

            // create a new segment
            unsealed_segment_cache_ = std::make_shared<SegmentCache>(next_segment_id_, second_row_count);
            unsealed_segment_cache_->sealed_ = false;
            append_info->ranges_.emplace_back(RowID(next_segment_id_, 0), second_row_count);
            LOG_DEBUG(fmt::format("TableCache.PrepareAppendNolock allocated second range({}.{}, {})", next_segment_id_, 0, first_row_count));
            ++next_segment_id_;
        }
    }

    uncommitted_append_infos_.emplace_back(append_info);
    return append_info;
}

void TableCache::CommitAppendNolock(const std::shared_ptr<AppendPrepareInfo> &append_info, TransactionID txn_id) {
    std::shared_ptr<AppendPrepareInfo> saved_append_info = uncommitted_append_infos_.front();
    if (saved_append_info->transaction_id_ != txn_id) {
        UnrecoverableError(fmt::format("Attempt to commit append prepare transaction id: {} != {}", saved_append_info->transaction_id_, txn_id));
    }
    uncommitted_append_infos_.pop_front();
    const std::vector<std::pair<RowID, u64>> &ranges = append_info->ranges_;
    for (const auto &range : ranges) {
        commit_segment_id_ = range.first.segment_id_;
        commit_segment_offset_ += range.second;
        if (commit_segment_offset_ > DEFAULT_SEGMENT_CAPACITY) {
            commit_segment_offset_ -= DEFAULT_SEGMENT_CAPACITY;
        }

        auto segment_iter = segment_cache_map_.find(commit_segment_id_);
        if (segment_iter == segment_cache_map_.end()) {
            std::shared_ptr<SegmentCache> segment_cache = std::make_shared<SegmentCache>(commit_segment_id_, range.second);
            segment_cache->sealed_ = false;
            segment_cache_map_.emplace(commit_segment_id_, segment_cache);

            for (const auto &index_pair : index_cache_map_) {
                auto index_cache = index_pair.second;

                auto segment_index_cache = std::make_shared<SegmentIndexCache>(commit_segment_id_);
                segment_index_cache->next_chunk_id_ = 0;
                index_cache->segment_index_cache_map_.emplace(commit_segment_id_, segment_index_cache);
            }
        } else {
            SegmentCache *segment_cache = segment_iter->second.get();
            segment_cache->row_count_ += range.second;
            if (segment_cache->row_count_ > DEFAULT_SEGMENT_CAPACITY) {
                UnrecoverableError(
                    fmt::format("Segment id: {} row count: {} exceed default capacity", commit_segment_id_, segment_cache->row_count_));
            } else if (segment_cache->row_count_ == DEFAULT_SEGMENT_CAPACITY) {
                segment_cache->sealed_ = true;
            } else {
                ;
            }
        }
    }
    return;
}

std::vector<SegmentID> TableCache::ApplySegmentIDsNolock(u64 segment_count) {
    std::vector<SegmentID> segment_ids;
    segment_ids.reserve(segment_count);
    for (size_t i = 0; i < segment_count; ++i) {
        SegmentID segment_id = next_segment_id_;
        segment_ids.emplace_back(segment_id);
        ++next_segment_id_;
    }

    return segment_ids;
}

void TableCache::AddTableIndexCacheNolock(const std::shared_ptr<TableIndexCache> &table_index_cache) {
    auto [iter, insert_success] = index_cache_map_.emplace(table_index_cache->index_id_, table_index_cache);
    if (!insert_success) {
        UnrecoverableError(fmt::format("Table index cache with id: {} already exists", table_index_cache->index_id_));
    }
}

void TableCache::DropTableIndexCacheNolock(u64 index_id) {
    auto iter = index_cache_map_.find(index_id);
    if (iter == index_cache_map_.end()) {
        LOG_ERROR(fmt::format("Table index cache with id: {} not found", index_id));
        return;
    }
    index_cache_map_.erase(iter);
}

void DbCache::AddNewTableCacheNolock(u64 table_id, const std::string &table_name) {
    std::shared_ptr<TableCache> table_cache = std::make_shared<TableCache>(table_id, table_name);
    auto [iter, insert_success] = table_cache_map_.emplace(table_cache->table_id(), table_cache);
    if (!insert_success) {
        UnrecoverableError(fmt::format("Table cache with id: {} already exists", table_cache->table_id()));
    }
}

void DbCache::AddTableCacheNolock(const std::shared_ptr<TableCache> &table_cache) {
    auto [iter, insert_success] = table_cache_map_.emplace(table_cache->table_id(), table_cache);
    if (!insert_success) {
        UnrecoverableError(fmt::format("Table cache with id: {} already exists", table_cache->table_id()));
    }
}

void DbCache::DropTableCacheNolock(u64 table_id) {
    auto iter = table_cache_map_.find(table_id);
    if (iter == table_cache_map_.end()) {
        LOG_ERROR(fmt::format("Table cache with id: {} not found", table_id));
        return;
    }
    table_cache_map_.erase(iter);
}

void SystemCache::AddNewDbCache(const std::string &db_name, u64 db_id) {
    std::unique_lock lock(cache_mtx_);
    auto db_cache = std::make_shared<DbCache>(db_id, db_name, 0);
    Status status = this->AddDbCacheNolock(db_cache);
    if (!status.ok()) {
        UnrecoverableError(status.message());
    }
}

void SystemCache::DropDbCache(u64 db_id) {
    std::unique_lock lock(cache_mtx_);
    // LOG_TRACE(fmt::format("Dropping db cache with id: {} db_cache_size: {}, db_name_size: {}", db_id, db_cache_map_.size(), db_name_map_.size()));
    auto cache_iter = db_cache_map_.find(db_id);
    if (cache_iter == db_cache_map_.end()) {
        LOG_ERROR(fmt::format("Db cache with id: {} not found", db_id));
        return;
    }
    std::string db_name = cache_iter->second->db_name();
    db_cache_map_.erase(cache_iter);
}

void SystemCache::AddNewTableCache(u64 db_id, u64 table_id, const std::string &table_name) {
    std::unique_lock lock(cache_mtx_);
    DbCache *db_cache = this->GetDbCacheNolock(db_id);
    if (db_cache == nullptr) {
        UnrecoverableError(fmt::format("Can't find database {} in cache", db_id));
    }
    db_cache->AddNewTableCacheNolock(table_id, table_name);
}

void SystemCache::DropTableCache(u64 db_id, u64 table_id) {
    std::unique_lock lock(cache_mtx_);
    DbCache *db_cache = this->GetDbCacheNolock(db_id);
    if (db_cache == nullptr) {
        LOG_WARN(fmt::format("Can't find database: {}", db_id));
        return;
    }
    db_cache->DropTableCacheNolock(table_id);
}

void SystemCache::AddNewIndexCache(u64 db_id, u64 table_id, const std::string &index_name) {
    std::unique_lock lock(cache_mtx_);
    TableCache *table_cache = this->GetTableCacheNolock(db_id, table_id);
    u64 index_id = table_cache->next_index_id_;
    std::shared_ptr<TableIndexCache> table_index_cache = std::make_shared<TableIndexCache>(db_id, table_id, index_id, index_name);
    table_cache->AddTableIndexCacheNolock(table_index_cache);
    ++table_cache->next_index_id_;
}

void SystemCache::DropIndexCache(u64 db_id, u64 table_id, u64 index_id) {
    std::unique_lock lock(cache_mtx_);
    TableCache *table_cache = this->GetTableCacheNolock(db_id, table_id);
    if (table_cache == nullptr) {
        LOG_WARN(fmt::format("Database: {}, table: {} is already dropped", db_id, table_id));
        return;
    }
    auto index_iter = table_cache->index_cache_map_.find(index_id);
    if (index_iter == table_cache->index_cache_map_.end()) {
        LOG_WARN(fmt::format("Database: {}, table: {}, index: {} is already dropped", db_id, table_id, index_id));
        // UnrecoverableError(fmt::format("Table index cache with id: {} not found", index_id));
        return;
    }
    table_cache->DropTableIndexCacheNolock(index_id);
}

std::vector<SegmentID> SystemCache::ApplySegmentIDs(u64 db_id, u64 table_id, u64 segment_count) {
    std::unique_lock lock(cache_mtx_);
    TableCache *table_cache = this->GetTableCacheNolock(db_id, table_id);
    return table_cache->ApplySegmentIDsNolock(segment_count);
}

std::shared_ptr<AppendPrepareInfo> SystemCache::PrepareAppend(u64 db_id, u64 table_id, size_t row_count, TransactionID txn_id) {
    std::unique_lock lock(cache_mtx_);
    TableCache *table_cache = this->GetTableCacheNolock(db_id, table_id);
    return table_cache->PrepareAppendNolock(row_count, txn_id);
}

Status SystemCache::AddDbCacheNolock(const std::shared_ptr<DbCache> &db_cache) {
    auto [iter, insert_success] = db_cache_map_.emplace(db_cache->db_id(), db_cache);
    if (!insert_success) {
        UnrecoverableError(fmt::format("Db cache with id: {} already exists", db_cache->db_id()));
    }
    // LOG_TRACE(fmt::format("Adding db cache with id: {} and name: {}, name_map_size: {}, cache_map_size: {}",
    //                      db_cache->db_id(),
    //                      db_cache->db_name(),
    //                      db_name_map_.size(),
    //                      db_cache_map_.size()));
    return Status::OK();
}

std::shared_ptr<DbCache> SystemCache::GetDbCache(u64 db_id) const {
    std::unique_lock lock(cache_mtx_);
    auto iter = db_cache_map_.find(db_id);
    if (iter == db_cache_map_.end()) {
        return nullptr;
    }
    return iter->second;
}

DbCache *SystemCache::GetDbCacheNolock(u64 db_id) {
    auto db_iter = db_cache_map_.find(db_id);
    if (db_iter == db_cache_map_.end()) {
        LOG_WARN(fmt::format("Db cache with id: {} not found", db_id));
        // UnrecoverableError(fmt::format("Db cache with id: {} not found", db_id));
        return nullptr;
    }
    DbCache *db_cache = db_iter->second.get();
    return db_cache;
}

TableCache *SystemCache::GetTableCacheNolock(u64 db_id, u64 table_id) {
    DbCache *db_cache = this->GetDbCacheNolock(db_id);
    if (db_cache == nullptr) {
        return nullptr;
    }

    auto table_iter = db_cache->table_cache_map_.find(table_id);
    if (table_iter == db_cache->table_cache_map_.end()) {
        LOG_WARN(fmt::format("Table cache with id: {} not found", table_id));
        return nullptr;
    }
    TableCache *table_cache = table_iter->second.get();
    return table_cache;
}

nlohmann::json SystemCache::ToJson() const {
    nlohmann::json result;
    std::unique_lock lock(cache_mtx_);
    return result;
}

} // namespace infinity