// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//


#include <pollux/dwio/parquet/reader/parquet_data.h>

#include <pollux/dwio/common/buffered_input.h>
#include <pollux/dwio/parquet/reader/parquet_stats_context.h>

namespace kumo::pollux::parquet {
    std::unique_ptr<dwio::common::FormatData> ParquetParams::toFormatData(
        const std::shared_ptr<const dwio::common::TypeWithId> &type,
        const common::ScanSpec & /*scanSpec*/) {
        return std::make_unique<ParquetData>(
            type, metaData_, pool(), sessionTimezone_);
    }

    void ParquetData::filterRowGroups(
        const common::ScanSpec &scanSpec,
        uint64_t /*rowsPerRowGroup*/,
        const dwio::common::StatsContext &writerContext,
        FilterRowGroupsResult &result) {
        auto parquetStatsContext =
                reinterpret_cast<const ParquetStatsContext *>(&writerContext);
        if (type_->parquetType_.has_value() &&
            parquetStatsContext->shouldIgnoreStatistics(
                type_->parquetType_.value())) {
            return;
        }
        result.totalCount =
                std::max<int>(result.totalCount, fileMetaDataPtr_.numRowGroups());
        auto nwords = bits::nwords(result.totalCount);
        if (result.filterResult.size() < nwords) {
            result.filterResult.resize(nwords);
        }
        auto metadataFiltersStartIndex = result.metadataFilterResults.size();
        for (int i = 0; i < scanSpec.numMetadataFilters(); ++i) {
            result.metadataFilterResults.emplace_back(
                scanSpec.metadataFilterNodeAt(i), std::vector<uint64_t>(nwords));
        }
        if (scanSpec.filter() || scanSpec.numMetadataFilters() > 0) {
            for (auto i = 0; i < fileMetaDataPtr_.numRowGroups(); ++i) {
                if (scanSpec.filter() && !rowGroupMatches(i, scanSpec.filter())) {
                    bits::setBit(result.filterResult.data(), i);
                    continue;
                }
                for (int j = 0; j < scanSpec.numMetadataFilters(); ++j) {
                    auto *metadataFilter = scanSpec.metadataFilterAt(j);
                    if (!rowGroupMatches(i, metadataFilter)) {
                        bits::setBit(
                            result.metadataFilterResults[metadataFiltersStartIndex + j]
                            .second.data(),
                            i);
                    }
                }
            }
        }
    }

    bool ParquetData::rowGroupMatches(uint32_t rowGroupId, common::Filter *filter) {
        auto column = type_->column();
        auto type = type_->type();
        auto rowGroup = fileMetaDataPtr_.rowGroup(rowGroupId);
        assert(rowGroup.numColumns() != 0);

        if (!filter) {
            return true;
        }

        auto columnChunk = rowGroup.columnChunk(column);
        if (columnChunk.hasStatistics()) {
            auto columnStats =
                    columnChunk.getColumnStatistics(type, rowGroup.numRows());
            return testFilter(filter, columnStats.get(), rowGroup.numRows(), type);
        }
        return true;
    }

    void ParquetData::enqueueRowGroup(
        uint32_t index,
        dwio::common::BufferedInput &input) {
        auto chunk = fileMetaDataPtr_.rowGroup(index).columnChunk(type_->column());
        streams_.resize(fileMetaDataPtr_.numRowGroups());
        POLLUX_CHECK(
            chunk.hasMetadata(),
            "ColumnMetaData does not exist for schema Id ",
            type_->column());;

        uint64_t chunkReadOffset = chunk.dataPageOffset();
        if (chunk.hasDictionaryPageOffset() && chunk.dictionaryPageOffset() >= 4) {
            // this assumes the data pages follow the dict pages directly.
            chunkReadOffset = chunk.dictionaryPageOffset();
        }

        uint64_t readSize =
                (chunk.compression() == common::CompressionKind::CompressionKind_NONE)
                    ? chunk.totalUncompressedSize()
                    : chunk.totalCompressedSize();

        auto id = dwio::common::StreamIdentifier(type_->column());
        streams_[index] = input.enqueue({chunkReadOffset, readSize}, &id);
    }

    dwio::common::PositionProvider ParquetData::seekToRowGroup(int64_t index) {
        static std::vector<uint64_t> empty;
        POLLUX_CHECK_LT(index, streams_.size());
        POLLUX_CHECK(streams_[index], "Stream not enqueued for column");
        auto metadata = fileMetaDataPtr_.rowGroup(index).columnChunk(type_->column());
        reader_ = std::make_unique<PageReader>(
            std::move(streams_[index]),
            pool_,
            type_,
            metadata.compression(),
            metadata.totalCompressedSize(),
            sessionTimezone_);
        return dwio::common::PositionProvider(empty);
    }

    std::pair<int64_t, int64_t> ParquetData::getRowGroupRegion(
        uint32_t index) const {
        auto rowGroup = fileMetaDataPtr_.rowGroup(index);

        POLLUX_CHECK_GT(rowGroup.numColumns(), 0);
        auto fileOffset = rowGroup.hasFileOffset()
                              ? rowGroup.fileOffset()
                              : rowGroup.columnChunk(0).hasDictionaryPageOffset()
                                    ? rowGroup.columnChunk(0).dictionaryPageOffset()
                                    : rowGroup.columnChunk(0).dataPageOffset();
        POLLUX_CHECK_GT(fileOffset, 0);

        auto length = rowGroup.hasTotalCompressedSize()
                          ? rowGroup.totalCompressedSize()
                          : rowGroup.totalByteSize();

        return {fileOffset, length};
    }
} // namespace kumo::pollux::parquet
