//                           _       _
// __      _____  __ ___   ___  __ _| |_ ___
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
//  \ V  V /  __/ (_| |\ V /| | (_| | ||  __/
//   \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
//
//  Copyright © 2016 - 2025 Weaviate B.V. All rights reserved.
//
//  CONTACT: hello@weaviate.io
//

package lsmkv

import (
	"context"
	"errors"
	"fmt"
	"io/fs"
	"os"
	"path/filepath"
	"slices"
	"strings"
	"sync"
	"time"

	"github.com/sirupsen/logrus"

	"github.com/weaviate/weaviate/adapters/repos/db/lsmkv/segmentindex"
	"github.com/weaviate/weaviate/adapters/repos/db/roaringset"
	"github.com/weaviate/weaviate/adapters/repos/db/roaringsetrange"
	"github.com/weaviate/weaviate/entities/cyclemanager"
	"github.com/weaviate/weaviate/entities/diskio"
	"github.com/weaviate/weaviate/entities/lsmkv"
	"github.com/weaviate/weaviate/entities/models"
	"github.com/weaviate/weaviate/entities/schema"
	"github.com/weaviate/weaviate/entities/storagestate"
	"github.com/weaviate/weaviate/usecases/memwatch"
)

type SegmentGroup struct {
	segments []Segment
	// Holds map of all segments currently in use (based on consistentView requests).
	// Segments are added to the map when consistentView is acquired and removed from map
	// when they are released and number of refs is 0.
	// It may contains segments that are no longer present in sg.segments, but still being read from
	// (segments that were cleaned or compacted and replaced by new ones)
	segmentsWithRefs      map[string]Segment // segment.path => segment
	segmentRefCounterLock sync.Mutex

	// Lock() for changing the currently active segments, RLock() for normal
	// operation
	maintenanceLock sync.RWMutex
	dir             string

	strategy string

	compactionCallbackCtrl cyclemanager.CycleCallbackCtrl

	logger logrus.FieldLogger

	// for backward-compatibility with states where the disk state for maps was
	// not guaranteed to be sorted yet
	mapRequiresSorting bool

	status     storagestate.Status
	statusLock sync.Mutex
	metrics    *Metrics

	// all "replace" buckets support counting through net additions, but not all
	// produce a meaningful count. Typically, the only count we're interested in
	// is that of the bucket that holds objects
	monitorCount bool

	mmapContents             bool
	keepTombstones           bool // see bucket for more details
	useBloomFilter           bool // see bucket for more details
	calcCountNetAdditions    bool // see bucket for more details
	compactLeftOverSegments  bool // see bucket for more details
	enableChecksumValidation bool
	MinMMapSize              int64
	keepLevelCompaction      bool // see bucket for more details

	allocChecker   memwatch.AllocChecker
	maxSegmentSize int64

	segmentCleaner     segmentCleaner
	cleanupInterval    time.Duration
	lastCleanupCall    time.Time
	lastCompactionCall time.Time

	roaringSetRangeSegmentInMemory *roaringsetrange.SegmentInMemory
	bitmapBufPool                  roaringset.BitmapBufPool
	bm25config                     *schema.BM25Config
	writeSegmentInfoIntoFileName   bool
	writeMetadata                  bool
}

type sgConfig struct {
	dir                          string
	strategy                     string
	mapRequiresSorting           bool
	monitorCount                 bool
	mmapContents                 bool
	keepTombstones               bool
	useBloomFilter               bool
	calcCountNetAdditions        bool
	forceCompaction              bool
	keepLevelCompaction          bool
	maxSegmentSize               int64
	cleanupInterval              time.Duration
	enableChecksumValidation     bool
	keepSegmentsInMemory         bool
	MinMMapSize                  int64
	bm25config                   *models.BM25Config
	writeSegmentInfoIntoFileName bool
	writeMetadata                bool
}

func newSegmentGroup(ctx context.Context, logger logrus.FieldLogger, metrics *Metrics, cfg sgConfig,
	compactionCallbacks cyclemanager.CycleCallbackGroup, b *Bucket, files map[string]int64,
) (*SegmentGroup, error) {
	now := time.Now()
	sg := &SegmentGroup{
		segments:                     make([]Segment, len(files)),
		segmentsWithRefs:             map[string]Segment{},
		dir:                          cfg.dir,
		logger:                       logger,
		metrics:                      metrics,
		monitorCount:                 cfg.monitorCount,
		mapRequiresSorting:           cfg.mapRequiresSorting,
		strategy:                     cfg.strategy,
		mmapContents:                 cfg.mmapContents,
		keepTombstones:               cfg.keepTombstones,
		useBloomFilter:               cfg.useBloomFilter,
		calcCountNetAdditions:        cfg.calcCountNetAdditions,
		compactLeftOverSegments:      cfg.forceCompaction,
		maxSegmentSize:               cfg.maxSegmentSize,
		cleanupInterval:              cfg.cleanupInterval,
		enableChecksumValidation:     cfg.enableChecksumValidation,
		allocChecker:                 b.allocChecker,
		lastCompactionCall:           now,
		lastCleanupCall:              now,
		MinMMapSize:                  cfg.MinMMapSize,
		writeSegmentInfoIntoFileName: cfg.writeSegmentInfoIntoFileName,
		writeMetadata:                cfg.writeMetadata,
		bitmapBufPool:                b.bitmapBufPool,
	}

	segmentIndex := 0

	// Note: it's important to process first the compacted segments
	// TODO: a single iteration may be possible

	for entry := range files {
		if filepath.Ext(entry) != ".tmp" {
			continue
		}

		potentialCompactedSegmentFileName := strings.TrimSuffix(entry, ".tmp")

		if filepath.Ext(potentialCompactedSegmentFileName) != ".db" {
			// another kind of temporal file, ignore at this point but it may need to be deleted...
			continue
		}

		jointSegments := segmentID(potentialCompactedSegmentFileName)
		jointSegmentsIDs := strings.Split(jointSegments, "_")

		if len(jointSegmentsIDs) == 1 {
			// cleanup leftover, to be removed
			if err := os.Remove(filepath.Join(sg.dir, entry)); err != nil {
				return nil, fmt.Errorf("delete partially cleaned segment %q: %w", entry, err)
			}
			continue
		}

		if len(jointSegmentsIDs) != 2 {
			logger.WithField("action", "lsm_segment_init").
				WithField("path", filepath.Join(sg.dir, entry)).
				Warn("ignored (partially written) LSM compacted segment generated with a version older than v1.24.0")

			continue
		}

		// jointSegmentsIDs[0] is the left segment, jointSegmentsIDs[1] is the right segment
		leftSegmentFound, _ := segmentExistsWithID(jointSegmentsIDs[0], files)
		rightSegmentFound, rightSegmentFilename := segmentExistsWithID(jointSegmentsIDs[1], files)

		rightSegmentPath := filepath.Join(sg.dir, rightSegmentFilename)

		if leftSegmentFound && rightSegmentFound {
			delete(files, entry)
			if err := os.Remove(filepath.Join(sg.dir, entry)); err != nil {
				return nil, fmt.Errorf("delete partially compacted segment %q: %w", entry, err)
			}
			continue
		}

		if leftSegmentFound && !rightSegmentFound {
			return nil, fmt.Errorf("missing right segment %q", rightSegmentFilename)
		}

		var rightSegmentMetadata *struct {
			Level    uint16
			Strategy segmentindex.Strategy
		}
		if !leftSegmentFound && rightSegmentFound {
			// segment is initialized just to be erased
			// there is no need of bloom filters nor net addition counter re-calculation
			rightSegment, err := newSegment(rightSegmentPath, logger,
				metrics, sg.makeExistsOn(nil),
				segmentConfig{
					mmapContents:             sg.mmapContents,
					useBloomFilter:           sg.useBloomFilter,
					calcCountNetAdditions:    sg.calcCountNetAdditions,
					overwriteDerived:         false,
					enableChecksumValidation: sg.enableChecksumValidation,
					MinMMapSize:              sg.MinMMapSize,
					allocChecker:             sg.allocChecker,
					fileList:                 make(map[string]int64), // empty to not check if bloom/cna files already exist
					writeMetadata:            sg.writeMetadata,
				})
			if err != nil {
				return nil, fmt.Errorf("init already compacted right segment %s: %w", rightSegmentFilename, err)
			}

			rightSegmentMetadata = &struct {
				Level    uint16
				Strategy segmentindex.Strategy
			}{
				Level:    rightSegment.getLevel(),
				Strategy: rightSegment.getStrategy(),
			}

			err = rightSegment.close()
			if err != nil {
				return nil, fmt.Errorf("close already compacted right segment %s: %w", rightSegmentFilename, err)
			}

			// https://github.com/weaviate/weaviate/pull/6128 introduces the ability
			// to drop segments delayed by renaming them first and then dropping them
			// later.
			//
			// The existing functionality (previously .drop) was renamed to
			// .dropImmediately. We are keeping the old behavior in this mainly for
			// backward compatbility, but also because the motivation behind the
			// delayed deletion does not apply here:
			//
			// The new behavior is meant to split the deletion into two steps, to
			// reduce the time that an expensive lock – which could block readers -
			// is held. In this scenario, the segment has not been initialized yet,
			// so there is no one we could be blocking.
			//
			// The total time is the same, so we can also just drop it immediately.
			err = rightSegment.dropImmediately()
			if err != nil {
				return nil, fmt.Errorf("delete already compacted right segment %s: %w", rightSegmentFilename, err)
			}
			delete(files, rightSegmentFilename)

			err = diskio.Fsync(sg.dir)
			if err != nil {
				return nil, fmt.Errorf("fsync segment directory %s: %w", sg.dir, err)
			}
		}

		var newRightSegmentFileName string
		if cfg.writeSegmentInfoIntoFileName && rightSegmentMetadata != nil {
			newRightSegmentFileName = fmt.Sprintf("segment-%s%s.db", jointSegmentsIDs[1], segmentExtraInfo(rightSegmentMetadata.Level, rightSegmentMetadata.Strategy))
		} else {
			newRightSegmentFileName = fmt.Sprintf("segment-%s.db", jointSegmentsIDs[1])
		}
		newRightSegmentPath := filepath.Join(sg.dir, newRightSegmentFileName)

		if err := os.Rename(filepath.Join(sg.dir, entry), newRightSegmentPath); err != nil {
			return nil, fmt.Errorf("rename compacted segment file %q as %q: %w", entry, newRightSegmentFileName, err)
		}

		// initialize in correct order in the next iteration
		files[newRightSegmentFileName] = files[entry]
		delete(files, entry)
	}

	// segments need to be initialised in order of their timestamp to ensure that various computations are correct (CNA etc)
	fileList := make([]string, 0, len(files))
	for entry := range files {
		fileList = append(fileList, entry)
	}
	slices.Sort(fileList)

	for _, entry := range fileList {
		if filepath.Ext(entry) == DeleteMarkerSuffix {
			// marked for deletion, but never actually deleted. Delete now.
			if err := os.Remove(filepath.Join(sg.dir, entry)); err != nil {
				// don't abort if the delete fails, we can still continue (albeit
				// without freeing disk space that should have been freed)
				sg.logger.WithError(err).WithFields(logrus.Fields{
					"action": "lsm_segment_init_deleted_previously_marked_files",
					"file":   entry,
				}).Error("failed to delete file already marked for deletion")
			}
			continue

		}

		if filepath.Ext(entry) != ".db" {
			// skip, this could be commit log, etc.
			continue
		}

		// before we can mount this file, we need to check if a WAL exists for it.
		// If yes, we must assume that the flush never finished, as otherwise the
		// WAL would have been deleted. Thus we must remove it.
		walFileName, _, _ := strings.Cut(entry, ".")
		walFileName += ".wal"
		_, ok := files[walFileName]
		if ok {
			// the segment will be recovered from the WAL
			err := os.Remove(filepath.Join(sg.dir, entry))
			if err != nil {
				return nil, fmt.Errorf("delete partially written segment %s: %w", entry, err)
			}

			logger.WithField("action", "lsm_segment_init").
				WithField("path", filepath.Join(sg.dir, entry)).
				WithField("wal_path", walFileName).
				Info("discarded (partially written) LSM segment, because an active WAL for " +
					"the same segment was found. A recovery from the WAL will follow.")

			continue
		}

		var segment Segment
		segConf := segmentConfig{
			mmapContents:             sg.mmapContents,
			useBloomFilter:           sg.useBloomFilter,
			calcCountNetAdditions:    sg.calcCountNetAdditions,
			overwriteDerived:         false,
			enableChecksumValidation: sg.enableChecksumValidation,
			MinMMapSize:              sg.MinMMapSize,
			allocChecker:             sg.allocChecker,
			fileList:                 files,
			writeMetadata:            sg.writeMetadata,
		}
		var err error
		if b.lazySegmentLoading {
			segment, err = newLazySegment(filepath.Join(sg.dir, entry), logger,
				metrics, sg.makeExistsOn(sg.segments[:segmentIndex]), segConf,
			)
			if err != nil {
				return nil, fmt.Errorf("init lazy segment %s: %w", filepath.Join(sg.dir, entry), err)
			}
		} else {
			segment, err = newSegment(filepath.Join(sg.dir, entry), logger,
				metrics, sg.makeExistsOn(sg.segments[:segmentIndex]), segConf,
			)
			if err != nil {
				return nil, fmt.Errorf("init segment %s: %w", filepath.Join(sg.dir, entry), err)
			}
		}
		sg.segments[segmentIndex] = segment
		segmentIndex++

		sg.metrics.IncSegmentTotalByStrategy(sg.strategy)
		sg.metrics.ObserveSegmentSize(sg.strategy, segment.Size())
	}

	sg.segments = sg.segments[:segmentIndex]

	// Actual strategy is stored in segment files. In case it is SetCollection,
	// while new implementation uses bitmaps and supposed to be RoaringSet,
	// bucket and segmentgroup strategy is changed back to SetCollection
	// (memtables will be created later on, with already modified strategy)
	// TODO what if only WAL files exists, and there is no segment to get actual strategy?
	if b.strategy == StrategyRoaringSet && len(sg.segments) > 0 &&
		sg.segments[0].getStrategy() == segmentindex.StrategySetCollection {
		b.strategy = StrategySetCollection
		b.desiredStrategy = StrategyRoaringSet
		sg.strategy = StrategySetCollection
	}
	// As of v1.19 property's IndexInterval setting is replaced with
	// IndexFilterable (roaring set) + IndexSearchable (map) and enabled by default.
	// Buckets for text/text[] inverted indexes created before 1.19 have strategy
	// map and name that since 1.19 is used by filterable indeverted index.
	// Those buckets (roaring set by configuration, but in fact map) have to be
	// renamed on startup by migrator. Here actual strategy is set based on
	// data found in segment files
	if b.strategy == StrategyRoaringSet && len(sg.segments) > 0 &&
		sg.segments[0].getStrategy() == segmentindex.StrategyMapCollection {
		b.strategy = StrategyMapCollection
		b.desiredStrategy = StrategyRoaringSet
		sg.strategy = StrategyMapCollection
	}

	// Inverted segments share a lot of their logic as the MapCollection,
	// and the main difference is in the way they store their data.
	// Setting the desired strategy to Inverted will make sure that we can
	// distinguish between the two strategies for search.
	// The changes only apply when we have segments on disk,
	// as the memtables will always be created with the MapCollection strategy.
	if b.strategy == StrategyInverted && len(sg.segments) > 0 &&
		sg.segments[0].getStrategy() == segmentindex.StrategyMapCollection {
		b.strategy = StrategyMapCollection
		b.desiredStrategy = StrategyInverted
		sg.strategy = StrategyMapCollection
	} else if b.strategy == StrategyMapCollection && len(sg.segments) > 0 &&
		sg.segments[0].getStrategy() == segmentindex.StrategyInverted {
		// TODO amourao: blockmax "else" to be removed before final release
		// in case bucket was created as inverted and default strategy was reverted to map
		// by unsetting corresponding env variable
		b.strategy = StrategyInverted
		b.desiredStrategy = StrategyMapCollection
		sg.strategy = StrategyInverted
	}

	if err := b.mayRecoverFromCommitLogs(ctx, sg, files); err != nil {
		return nil, err
	}

	if sg.monitorCount {
		sg.metrics.ObjectCount(sg.count())
	}

	sc, err := newSegmentCleaner(sg)
	if err != nil {
		return nil, err
	}
	sg.segmentCleaner = sc

	// if a segment exists of the map collection strategy, we need to
	// convert the inverted strategy to a map collection strategy
	// as it is done on the bucket level
	if sg.strategy == StrategyInverted && len(sg.segments) > 0 &&
		sg.segments[0].getStrategy() == segmentindex.StrategyMapCollection {
		sg.strategy = StrategyMapCollection
	}

	switch sg.strategy {
	case StrategyInverted:
		// start with last but one segment, as the last one doesn't need tombstones for now
		for i := len(sg.segments) - 2; i >= 0; i-- {
			// avoid crashing if segment has no tombstones
			tombstonesNext, err := sg.segments[i+1].ReadOnlyTombstones()
			if err != nil {
				return nil, fmt.Errorf("init segment %s: load tombstones %w", sg.segments[i+1].getPath(), err)
			}
			if _, err := sg.segments[i].MergeTombstones(tombstonesNext); err != nil {
				return nil, fmt.Errorf("init segment %s: merge tombstones %w", sg.segments[i].getPath(), err)
			}
		}

	case StrategyRoaringSetRange:
		if cfg.keepSegmentsInMemory {
			t := time.Now()
			sg.roaringSetRangeSegmentInMemory = roaringsetrange.NewSegmentInMemory(sg.logger)
			for _, seg := range sg.segments {
				cursor := seg.newRoaringSetRangeCursor()
				if err := sg.roaringSetRangeSegmentInMemory.MergeSegmentByCursor(cursor); err != nil {
					return nil, fmt.Errorf("build segment-in-memory of strategy '%s': %w", sg.strategy, err)
				}
			}
			logger.WithFields(logrus.Fields{
				"took":    time.Since(t).String(),
				"bucket":  filepath.Base(cfg.dir),
				"size_mb": fmt.Sprintf("%.3f", float64(sg.roaringSetRangeSegmentInMemory.Size())/1024/1024),
			}).Debug("rangeable segment-in-memory built")
		}
	}

	id := "segmentgroup/compaction/" + sg.dir
	sg.compactionCallbackCtrl = compactionCallbacks.Register(id, sg.compactOrCleanup)

	return sg, nil
}

func (sg *SegmentGroup) pauseCompaction(ctx context.Context) error {
	return sg.compactionCallbackCtrl.Deactivate(ctx)
}

func (sg *SegmentGroup) resumeCompaction(_ context.Context) error {
	return sg.compactionCallbackCtrl.Activate()
}

func (sg *SegmentGroup) makeExistsOn(segments []Segment) existsOnLowerSegmentsFn {
	return func(key []byte) (bool, error) {
		if len(segments) == 0 {
			// this is already the lowest possible segment, we can guarantee that
			// any key in this segment is previously unseen.
			return false, nil
		}
		if _, err := sg.getWithSegmentList(key, segments); err != nil {
			if !errors.Is(err, lsmkv.Deleted) && !errors.Is(err, lsmkv.NotFound) {
				return false, fmt.Errorf("check exists on segments: %w", err)
			}
			return false, nil
		}
		return true, nil
	}
}

func (sg *SegmentGroup) add(path string) error {
	sg.maintenanceLock.Lock()
	defer sg.maintenanceLock.Unlock()

	segment, err := newSegment(path, sg.logger,
		sg.metrics, sg.makeExistsOn(sg.segments),
		segmentConfig{
			mmapContents:             sg.mmapContents,
			useBloomFilter:           sg.useBloomFilter,
			calcCountNetAdditions:    sg.calcCountNetAdditions,
			overwriteDerived:         true,
			enableChecksumValidation: sg.enableChecksumValidation,
			MinMMapSize:              sg.MinMMapSize,
			allocChecker:             sg.allocChecker,
			writeMetadata:            sg.writeMetadata,
		})
	if err != nil {
		return fmt.Errorf("init segment %s: %w", path, err)
	}

	sg.segments = append(sg.segments, segment)
	sg.metrics.IncSegmentTotalByStrategy(sg.strategy)
	sg.metrics.ObserveSegmentSize(sg.strategy, segment.Size())

	return nil
}

func (sg *SegmentGroup) getConsistentViewOfSegments() (segments []Segment, release func()) {
	sg.maintenanceLock.RLock()
	segments = make([]Segment, len(sg.segments))
	copy(segments, sg.segments)

	sg.segmentRefCounterLock.Lock()
	for _, seg := range segments {
		seg.incRef()
		sg.segmentsWithRefs[seg.getPath()] = seg
	}
	sg.segmentRefCounterLock.Unlock()
	sg.maintenanceLock.RUnlock()

	return segments, func() {
		sg.segmentRefCounterLock.Lock()
		for _, seg := range segments {
			seg.decRef()
			if seg.getRefs() == 0 {
				delete(sg.segmentsWithRefs, seg.getPath())
			}
		}
		sg.segmentRefCounterLock.Unlock()
	}
}

func (sg *SegmentGroup) addInitializedSegment(segment Segment) (err error) {
	defer func() {
		if err != nil {
			return
		}
		sg.metrics.IncSegmentTotalByStrategy(sg.strategy)
		sg.metrics.ObserveSegmentSize(sg.strategy, segment.Size())
	}()

	sg.maintenanceLock.Lock()
	defer sg.maintenanceLock.Unlock()

	sg.segments = append(sg.segments, segment)
	return nil
}

// not thread-safe on its own, as the assumption is that this is called from a
// lockholder, e.g. within .get()
func (sg *SegmentGroup) getWithSegmentList(key []byte, segments []Segment) ([]byte, error) {
	if err := CheckExpectedStrategy(sg.strategy, StrategyReplace); err != nil {
		return nil, fmt.Errorf("SegmentGroup::getWithSegmentList(): %w", err)
	}

	// start with latest and exit as soon as something is found, thus making sure
	// the latest takes presence
	for i := len(segments) - 1; i >= 0; i-- {
		beforeSegment := time.Now()
		v, err := segments[i].get(key)
		if duration := time.Since(beforeSegment); duration > 100*time.Millisecond {
			sg.logger.WithError(err).
				WithFields(logrus.Fields{
					"duration":    duration,
					"action":      "lsm_segment_group_get_individual_segment",
					"segment_pos": i,
				}).Debug("waited over 100ms to get result from individual segment")
		}
		if err == nil {
			return v, nil
		}
		if errors.Is(err, lsmkv.Deleted) {
			return nil, err
		}
		if !errors.Is(err, lsmkv.NotFound) {
			return nil, fmt.Errorf("SegmentGroup::getWithSegmentList() %q: %w", segments[i].getPath(), err)
		}
	}

	return nil, lsmkv.NotFound
}

func (sg *SegmentGroup) getBySecondaryWithSegmentList(pos int, key []byte, buffer []byte,
	segments []Segment,
) ([]byte, []byte, []byte, error) {
	if err := CheckExpectedStrategy(sg.strategy, StrategyReplace); err != nil {
		return nil, nil, nil, fmt.Errorf("SegmentGroup::getWithSegmentList(): %w", err)
	}

	// start with latest and exit as soon as something is found, thus making sure
	// the latest takes presence
	for i := len(segments) - 1; i >= 0; i-- {
		beforeSegment := time.Now()
		k, v, allocBuf, err := segments[i].getBySecondary(pos, key, buffer)
		if duration := time.Since(beforeSegment); duration > 100*time.Millisecond {
			sg.logger.WithError(err).
				WithFields(logrus.Fields{
					"duration":    duration,
					"action":      "lsm_segment_group_getbysecondary_individual_segment",
					"segment_pos": i,
				}).Debug("waited over 100ms to get result from individual segment")
		}
		if err == nil {
			return k, v, allocBuf, nil
		}
		if errors.Is(err, lsmkv.Deleted) {
			return nil, nil, nil, err
		}
		if !errors.Is(err, lsmkv.NotFound) {
			return nil, nil, nil, fmt.Errorf("SegmentGroup::getBySecondaryWithSegmentList() %q: %w", segments[i].getPath(), err)
		}
	}
	return nil, nil, nil, lsmkv.NotFound
}

func (sg *SegmentGroup) getCollection(key []byte, segments []Segment) ([]value, error) {
	var out []value

	// start with first and do not exit
	for _, segment := range segments {
		v, err := segment.getCollection(key)
		if err != nil {
			if errors.Is(err, lsmkv.NotFound) {
				continue
			}

			return nil, err
		}

		if len(out) == 0 {
			out = v
		} else {
			out = append(out, v...)
		}
	}

	return out, nil
}

func (sg *SegmentGroup) getCollectionAndSegments(ctx context.Context, key []byte, segments []Segment) ([][]value, []Segment, error) {
	out := make([][]value, len(segments))
	outSegments := make([]Segment, len(segments))

	i := 0
	// start with first and do not exit
	for _, segment := range segments {
		if ctx.Err() != nil {
			return nil, nil, ctx.Err()
		}
		v, err := segment.getCollection(key)
		if err != nil {
			if !errors.Is(err, lsmkv.NotFound) {
				return nil, nil, err
			}
			// inverted segments need to be loaded anyway, even if they don't have
			// the key, as we need to know if they have tombstones
			if segment.getStrategy() != segmentindex.StrategyInverted {
				continue
			}
		}

		out[i] = v
		outSegments[i] = segment
		i++
	}

	return out[:i], outSegments[:i], nil
}

func (sg *SegmentGroup) roaringSetGet(key []byte, segments []Segment) (out roaringset.BitmapLayers, release func(), err error) {
	ln := len(segments)
	if ln == 0 {
		return nil, noopRelease, nil
	}

	release = noopRelease
	// use bigger buffer for first layer, to make space for further merges
	// with following layers
	bitmapBufPool := roaringset.NewBitmapBufPoolFactorWrapper(sg.bitmapBufPool, 1.25)

	i := 0
	for ; i < ln; i++ {
		layer, layerRelease, err := segments[i].roaringSetGet(key, bitmapBufPool)
		if err == nil {
			out = append(out, layer)
			release = layerRelease
			i++
			break
		}
		if !errors.Is(err, lsmkv.NotFound) {
			return nil, noopRelease, err
		}
	}
	defer func() {
		if err != nil {
			release()
		}
	}()

	for ; i < ln; i++ {
		if err := segments[i].roaringSetMergeWith(key, out[0], sg.bitmapBufPool); err != nil {
			return nil, noopRelease, err
		}
	}

	return out, release, nil
}

func (sg *SegmentGroup) count() int {
	segments, release := sg.getConsistentViewOfSegments()
	defer release()

	return sg.countWithSegmentList(segments)
}

func (sg *SegmentGroup) countWithSegmentList(segments []Segment) int {
	count := 0
	for _, seg := range segments {
		count += seg.getCountNetAdditions()
	}

	return count
}

func (sg *SegmentGroup) shutdown(ctx context.Context) error {
	if err := sg.compactionCallbackCtrl.Unregister(ctx); err != nil {
		return fmt.Errorf("long-running compaction in progress: %w", ctx.Err())
	}
	if err := sg.segmentCleaner.close(); err != nil {
		return err
	}

	// TODO aliszka:copy-on-read forbid consistent view to be created from that point
	sg.segmentRefCounterLock.Lock()
	segmentsWithRefs := make([]Segment, len(sg.segmentsWithRefs))
	i := 0
	for _, seg := range sg.segmentsWithRefs {
		segmentsWithRefs[i] = seg
		i++
	}
	sg.segmentRefCounterLock.Unlock()
	sg.waitForReferenceCountToReachZero(segmentsWithRefs...)

	// Lock acquirement placed after compaction cycle stop request, due to occasional deadlock,
	// because compaction logic used in cycle also requires maintenance lock.
	//
	// If lock is grabbed by shutdown method and compaction in cycle loop starts right after,
	// it is blocked waiting for the same lock, eventually blocking entire cycle loop and preventing to read stop signal.
	// If stop signal can not be read, shutdown will not receive stop result and will not proceed with further execution.
	// Maintenance lock will then never be released.
	sg.maintenanceLock.Lock()
	defer sg.maintenanceLock.Unlock()

	for _, seg := range sg.segments {
		if err := seg.close(); err != nil {
			return err
		}
	}

	// make sure the segment list itself is set to nil. In case a memtable will
	// still flush after closing, it might try to read from a disk segment list
	// otherwise and run into nil-pointer problems.
	sg.segments = nil

	return nil
}

func (sg *SegmentGroup) UpdateStatus(status storagestate.Status) {
	sg.statusLock.Lock()
	defer sg.statusLock.Unlock()

	sg.status = status
}

func (sg *SegmentGroup) isReadyOnly() bool {
	sg.statusLock.Lock()
	defer sg.statusLock.Unlock()

	return sg.status == storagestate.StatusReadOnly
}

func fileExists(path string) (bool, error) {
	_, err := os.Stat(path)
	if err == nil {
		return true, nil
	}

	if errors.Is(err, fs.ErrNotExist) {
		return false, nil
	}

	return false, err
}

func segmentExistsWithID(segmentID string, files map[string]int64) (bool, string) {
	// segment file format is "segment-{segmentID}.EXT" where EXT is either
	// - ".db" if extra infos in filename are not used
	// - ".{extra_infos}.db" if extra infos in filename are used
	match := fmt.Sprintf("segment-%s.", segmentID)
	for fileName := range files {
		if strings.HasPrefix(fileName, match) && strings.HasSuffix(fileName, ".db") {
			return true, fileName
		}
	}
	return false, ""
}

func (sg *SegmentGroup) compactOrCleanup(shouldAbort cyclemanager.ShouldAbortCallback) bool {
	sg.monitorSegments()

	compact := func() bool {
		sg.lastCompactionCall = time.Now()
		compacted, err := sg.compactOnce()
		if err != nil {
			sg.logger.WithField("action", "lsm_compaction").
				WithField("path", sg.dir).
				WithError(err).
				Errorf("compaction failed")
		} else if !compacted {
			sg.logger.WithField("action", "lsm_compaction").
				WithField("path", sg.dir).
				Trace("no segments eligible for compaction")
		}
		return compacted
	}
	cleanup := func() bool {
		sg.lastCleanupCall = time.Now()
		cleaned, err := sg.segmentCleaner.cleanupOnce(shouldAbort)
		if err != nil {
			sg.logger.WithField("action", "lsm_cleanup").
				WithField("path", sg.dir).
				WithError(err).
				Errorf("cleanup failed")
		}
		return cleaned
	}

	// alternatively run compaction or cleanup first
	// if 1st one called succeeds, 2nd one is skipped, otherwise 2nd one is called as well
	//
	// compaction has the precedence over cleanup, however if cleanup
	// was not called for over [forceCleanupInterval], force at least one execution
	// in between compactions.
	// (ignore if compaction was not called within that time either)
	forceCleanupInterval := time.Hour * 12

	if time.Since(sg.lastCleanupCall) > forceCleanupInterval && sg.lastCleanupCall.Before(sg.lastCompactionCall) {
		return cleanup() || compact()
	}
	return compact() || cleanup()
}

func (sg *SegmentGroup) Len() int {
	segments, release := sg.getConsistentViewOfSegments()
	defer release()

	return len(segments)
}

func (sg *SegmentGroup) GetAveragePropertyLength() (float64, uint64) {
	segments, release := sg.getConsistentViewOfSegments()
	defer release()

	if len(segments) == 0 {
		return 0, 0
	}

	totalDocCount := uint64(0)
	for _, segment := range segments {
		invertedData := segment.getInvertedData()
		totalDocCount += invertedData.avgPropertyLengthsCount
	}

	if totalDocCount == 0 {
		return defaultAveragePropLength, 0
	}

	weightedAverage := 0.0
	for _, segment := range segments {
		invertedData := segment.getInvertedData()
		weightedAverage += float64(invertedData.avgPropertyLengthsCount) / float64(totalDocCount) * invertedData.avgPropertyLengthsAvg
	}

	return weightedAverage, totalDocCount
}
