// SPDX-License-Identifier: AGPL-3.0-only
// Provenance-includes-location: https://github.com/grafana/mimir/blob/main/pkg/compactor/bucket_compactor.go
// Provenance-includes-license: Apache-2.0
// Provenance-includes-copyright: The Cortex Authors.

package compactor

import (
	"context"
	"fmt"
	"os"
	"path"
	"path/filepath"
	"strings"
	"sync"
	"time"

	"github.com/go-kit/log"
	"github.com/go-kit/log/level"
	"github.com/grafana/dskit/concurrency"
	"github.com/grafana/dskit/multierror"
	"github.com/grafana/dskit/runutil"
	"github.com/oklog/ulid/v2"
	"github.com/opentracing/opentracing-go"
	"github.com/opentracing/opentracing-go/ext"
	"github.com/pkg/errors"
	"github.com/prometheus/client_golang/prometheus"
	"github.com/prometheus/client_golang/prometheus/promauto"
	"github.com/prometheus/prometheus/model/labels"
	"go.uber.org/atomic"

	"github.com/grafana/pyroscope/pkg/objstore"
	"github.com/grafana/pyroscope/pkg/objstore/client"
	"github.com/grafana/pyroscope/pkg/objstore/providers/filesystem"
	"github.com/grafana/pyroscope/pkg/phlaredb"
	"github.com/grafana/pyroscope/pkg/phlaredb/block"
)

type DeduplicateFilter interface {
	block.MetadataFilter

	// DuplicateIDs returns IDs of duplicate blocks generated by last call to Filter method.
	DuplicateIDs() []ulid.ULID
}

// Syncer synchronizes block metas from a bucket into a local directory.
// It sorts them into compaction groups based on equal label sets.
type Syncer struct {
	logger                  log.Logger
	bkt                     objstore.Bucket
	fetcher                 *block.MetaFetcher
	mtx                     sync.Mutex
	blocks                  map[ulid.ULID]*block.Meta
	metrics                 *syncerMetrics
	deduplicateBlocksFilter DeduplicateFilter
}

type syncerMetrics struct {
	garbageCollections        prometheus.Counter
	garbageCollectionFailures prometheus.Counter
	garbageCollectionDuration prometheus.Histogram
	blocksMarkedForDeletion   prometheus.Counter
}

func newSyncerMetrics(reg prometheus.Registerer, blocksMarkedForDeletion prometheus.Counter) *syncerMetrics {
	var m syncerMetrics

	m.garbageCollections = promauto.With(reg).NewCounter(prometheus.CounterOpts{
		Name: "thanos_compact_garbage_collection_total",
		Help: "Total number of garbage collection operations.",
	})
	m.garbageCollectionFailures = promauto.With(reg).NewCounter(prometheus.CounterOpts{
		Name: "thanos_compact_garbage_collection_failures_total",
		Help: "Total number of failed garbage collection operations.",
	})
	m.garbageCollectionDuration = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
		Name:    "thanos_compact_garbage_collection_duration_seconds",
		Help:    "Time it took to perform garbage collection iteration.",
		Buckets: []float64{0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120, 240, 360, 720},
	})

	m.blocksMarkedForDeletion = blocksMarkedForDeletion

	return &m
}

// NewMetaSyncer returns a new Syncer for the given Bucket and directory.
// Blocks must be at least as old as the sync delay for being considered.
func NewMetaSyncer(logger log.Logger, reg prometheus.Registerer, bkt objstore.Bucket, fetcher *block.MetaFetcher, deduplicateBlocksFilter DeduplicateFilter, blocksMarkedForDeletion prometheus.Counter) (*Syncer, error) {
	if logger == nil {
		logger = log.NewNopLogger()
	}
	return &Syncer{
		logger:                  logger,
		bkt:                     bkt,
		fetcher:                 fetcher,
		blocks:                  map[ulid.ULID]*block.Meta{},
		metrics:                 newSyncerMetrics(reg, blocksMarkedForDeletion),
		deduplicateBlocksFilter: deduplicateBlocksFilter,
	}, nil
}

// SyncMetas synchronizes local state of block metas with what we have in the bucket.
func (s *Syncer) SyncMetas(ctx context.Context) error {
	sp, ctx := opentracing.StartSpanFromContext(ctx, "SyncMetas")
	defer sp.Finish()
	s.mtx.Lock()
	defer s.mtx.Unlock()

	// While fetching blocks, we filter out blocks that were marked for deletion.
	// No deletion delay is used -- all blocks with deletion marker are ignored, and not considered for compaction.
	metas, _, err := s.fetcher.FetchWithoutMarkedForDeletion(ctx)
	if err != nil {
		return err
	}
	s.blocks = metas
	return nil
}

// Metas returns loaded metadata blocks since last sync.
func (s *Syncer) Metas() map[ulid.ULID]*block.Meta {
	s.mtx.Lock()
	defer s.mtx.Unlock()

	return s.blocks
}

// GarbageCollect marks blocks for deletion from bucket if their data is available as part of a
// block with a higher compaction level.
// Call to SyncMetas function is required to populate duplicateIDs in duplicateBlocksFilter.
func (s *Syncer) GarbageCollect(ctx context.Context) error {
	sp, ctx := opentracing.StartSpanFromContext(ctx, "GarbageCollect")
	defer sp.Finish()
	s.mtx.Lock()
	defer s.mtx.Unlock()

	begin := time.Now()

	// The deduplication filter is applied after all blocks marked for deletion have been excluded
	// (with no deletion delay), so we expect that all duplicated blocks have not been marked for
	// deletion yet. Even in the remote case these blocks have already been marked for deletion,
	// the block.MarkForDeletion() call will correctly handle it.
	duplicateIDs := s.deduplicateBlocksFilter.DuplicateIDs()

	for _, id := range duplicateIDs {
		if ctx.Err() != nil {
			return ctx.Err()
		}

		// Spawn a new context so we always mark a block for deletion in full on shutdown.
		delCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)

		level.Info(s.logger).Log("msg", "marking outdated block for deletion", "block", id)
		err := block.MarkForDeletion(delCtx, s.logger, s.bkt, id, "outdated block", false, s.metrics.blocksMarkedForDeletion)
		cancel()
		if err != nil {
			s.metrics.garbageCollectionFailures.Inc()
			return errors.Wrapf(err, "mark block %s for deletion", id)
		}

		// Immediately update our in-memory state so no further call to SyncMetas is needed
		// after running garbage collection.
		delete(s.blocks, id)
	}
	s.metrics.garbageCollections.Inc()
	s.metrics.garbageCollectionDuration.Observe(time.Since(begin).Seconds())
	return nil
}

// Grouper is responsible to group all known blocks into compaction Job which are safe to be
// compacted concurrently.
type Grouper interface {
	// Groups returns the compaction jobs for all blocks currently known to the syncer.
	// It creates all jobs from the scratch on every call.
	Groups(blocks map[ulid.ULID]*block.Meta) (res []*Job, err error)
}

// DefaultGroupKey returns a unique identifier for the group the block belongs to, based on
// the DefaultGrouper logic. It considers the downsampling resolution and the block's labels.
func DefaultGroupKey(meta block.Meta) string {
	return defaultGroupKey(meta.Resolution, labels.FromMap(meta.Labels))
}

func defaultGroupKey(res int64, lbls labels.Labels) string {
	return fmt.Sprintf("%d@%v", res, labels.StableHash(lbls))
}

func minTime(metas []*block.Meta) time.Time {
	if len(metas) == 0 {
		return time.Time{}
	}

	minT := metas[0].MinTime
	for _, meta := range metas {
		if meta.MinTime < minT {
			minT = meta.MinTime
		}
	}

	return time.Unix(0, int64(minT)*int64(time.Millisecond)).UTC()
}

func maxTime(metas []*block.Meta) time.Time {
	if len(metas) == 0 {
		return time.Time{}
	}

	maxT := metas[0].MaxTime
	for _, meta := range metas {
		if meta.MaxTime > maxT {
			maxT = meta.MaxTime
		}
	}

	return time.Unix(0, int64(maxT)*int64(time.Millisecond)).UTC()
}

// Planner returns blocks to compact.
type Planner interface {
	// Plan returns a list of blocks that should be compacted into single one.
	// The blocks can be overlapping. The provided metadata has to be ordered by minTime.
	Plan(ctx context.Context, metasByMinTime []*block.Meta) ([]*block.Meta, error)
}

// Compactor provides compaction against an underlying storage of profiling data.
type Compactor interface {
	// CompactWithSplitting merges and splits the source blocks into shardCount number of compacted blocks,
	// and returns slice of block IDs.
	// If given compacted block has no series, corresponding block ID will not be returned.
	CompactWithSplitting(ctx context.Context, dst string, dirs []string, shardCount, stageSize uint64) (result []ulid.ULID, _ error)
}

const (
	CompactionSplitByFingerprint         = "fingerprint"
	CompactionSplitByStacktracePartition = "stacktracePartition"
)

var CompactionSplitBys = []string{CompactionSplitByFingerprint, CompactionSplitByStacktracePartition}

func getCompactionSplitBy(name string) phlaredb.SplitByFunc {
	switch name {
	case CompactionSplitByFingerprint:
		return phlaredb.SplitByFingerprint
	case CompactionSplitByStacktracePartition:
		return phlaredb.SplitByStacktracePartition
	default:
		return nil
	}
}

type BlockCompactor struct {
	blockOpenConcurrency int
	downsamplerEnabled   bool
	splitBy              phlaredb.SplitByFunc
	logger               log.Logger
	metrics              *CompactorMetrics
}

type CompactorMetrics struct {
	Ran               *prometheus.CounterVec
	InProgress        *prometheus.GaugeVec
	OverlappingBlocks prometheus.Counter
	Duration          *prometheus.HistogramVec
	Size              *prometheus.HistogramVec
	Samples           *prometheus.HistogramVec
	Range             *prometheus.HistogramVec
	Split             *prometheus.HistogramVec
}

func newCompactorMetrics(r prometheus.Registerer) *CompactorMetrics {
	m := &CompactorMetrics{}

	m.Ran = prometheus.NewCounterVec(prometheus.CounterOpts{
		Name: "pyroscope_compactions_total",
		Help: "Total number of compactions that were executed per level.",
	}, []string{"level"})
	m.InProgress = prometheus.NewGaugeVec(prometheus.GaugeOpts{
		Name: "pyroscope_compactions_current",
		Help: "The amount of compaction in progress per level",
	}, []string{"level"})
	m.OverlappingBlocks = prometheus.NewCounter(prometheus.CounterOpts{
		Name: "pyroscope_vertical_compactions_total",
		Help: "Total number of compactions done on overlapping blocks.",
	})
	m.Duration = prometheus.NewHistogramVec(prometheus.HistogramOpts{
		Name:    "pyroscope_compaction_duration_seconds",
		Help:    "Duration of compaction runs",
		Buckets: prometheus.ExponentialBuckets(1, 2, 14),
	}, []string{"level"})
	m.Size = prometheus.NewHistogramVec(prometheus.HistogramOpts{
		Name:    "pyroscope_compaction_size_bytes",
		Help:    "Final block size after compaction by level",
		Buckets: prometheus.ExponentialBuckets(32, 1.5, 12),
	}, []string{"level"})
	m.Samples = prometheus.NewHistogramVec(prometheus.HistogramOpts{
		Name:    "pyroscope_compaction_samples",
		Help:    "Final number of samples after compaction by level",
		Buckets: prometheus.ExponentialBuckets(4, 1.5, 12),
	}, []string{"level"})
	m.Range = prometheus.NewHistogramVec(prometheus.HistogramOpts{
		Name:    "pyroscope_compaction_range_seconds",
		Help:    "Final time range after compaction by level.",
		Buckets: prometheus.ExponentialBuckets(100, 4, 10),
	}, []string{"level"})
	m.Split = prometheus.NewHistogramVec(prometheus.HistogramOpts{
		Name:    "pyroscope_compaction_splits",
		Help:    "Compaction split factor by level.",
		Buckets: []float64{1, 2, 4, 8, 16, 32, 64},
	}, []string{"level"})

	if r != nil {
		r.MustRegister(
			m.Ran,
			m.InProgress,
			m.OverlappingBlocks,
			m.Duration,
			m.Range,
			m.Samples,
			m.Size,
		)
	}
	return m
}

func (c *BlockCompactor) CompactWithSplitting(ctx context.Context, dest string, dirs []string, shardCount, stageSize uint64) ([]ulid.ULID, error) {
	defer func() {
		if err := recover(); err != nil {
			level.Error(c.logger).Log("msg", "panic during compaction", "err", err, "dirs", strings.Join(dirs, ","))
			panic(err)
		}
	}()
	localBucket, err := client.NewBucket(ctx, client.Config{
		StorageBackendConfig: client.StorageBackendConfig{
			Backend:    client.Filesystem,
			Filesystem: filesystem.Config{Directory: dest},
		},
	}, "local-compactor")
	if err != nil {
		return nil, errors.Wrap(err, "create local bucket")
	}
	defer localBucket.Close()

	readers := make([]phlaredb.BlockReader, len(dirs))
	defer func() {
		for _, b := range readers {
			if b != nil {
				if err := b.Close(); err != nil {
					level.Warn(c.logger).Log("msg", "failed to close block", "err", err)
				}
			}
		}
	}()

	err = func() error {
		sp, ctx := opentracing.StartSpanFromContext(ctx, "OpenBlocks", opentracing.Tag{Key: "concurrency", Value: c.blockOpenConcurrency})
		defer sp.Finish()
		// Open all blocks
		return concurrency.ForEachJob(ctx, len(readers), c.blockOpenConcurrency, func(ctx context.Context, idx int) error {
			dir := dirs[idx]
			meta, err := block.ReadMetaFromDir(dir)
			if err != nil {
				return errors.Wrapf(err, "failed to read meta the block dir %s", dir)
			}
			b := phlaredb.NewSingleBlockQuerierFromMeta(ctx, localBucket, meta)
			if err := b.Open(ctx); err != nil {
				return errors.Wrapf(err, "open block %s", meta.ULID)
			}
			readers[idx] = b
			return nil
		})
	}()
	if err != nil {
		return nil, err
	}
	currentLevel := 0
	for _, r := range readers {
		lvl := r.Meta().Compaction.Level
		if lvl > currentLevel {
			currentLevel = lvl
		}
	}
	currentLevel++
	if sp := opentracing.SpanFromContext(ctx); sp != nil {
		sp.SetTag("compaction_level", currentLevel)
	}
	start := time.Now()
	defer func() {
		c.metrics.Duration.WithLabelValues(fmt.Sprintf("%d", currentLevel)).Observe(time.Since(start).Seconds())
		c.metrics.InProgress.WithLabelValues(fmt.Sprintf("%d", currentLevel)).Dec()
	}()
	c.metrics.InProgress.WithLabelValues(fmt.Sprintf("%d", currentLevel)).Inc()
	c.metrics.Ran.WithLabelValues(fmt.Sprintf("%d", currentLevel)).Inc()
	c.metrics.Split.WithLabelValues(fmt.Sprintf("%d", currentLevel)).Observe(float64(shardCount))

	metas, err := phlaredb.CompactWithSplitting(ctx, phlaredb.CompactWithSplittingOpts{
		Src:                readers,
		Dst:                dest,
		SplitCount:         shardCount,
		StageSize:          stageSize,
		SplitBy:            c.splitBy,
		DownsamplerEnabled: c.downsamplerEnabled,
		Logger:             c.logger,
	})
	if err != nil {
		return nil, errors.Wrapf(err, "compact blocks %v", dirs)
	}
	for _, m := range metas {
		c.metrics.Range.WithLabelValues(fmt.Sprintf("%d", currentLevel)).Observe(float64(m.MaxTime-m.MinTime) / 1000)
		c.metrics.Samples.WithLabelValues(fmt.Sprintf("%d", currentLevel)).Observe(float64(m.Stats.NumSamples))
		size := float64(0)
		for _, f := range m.Files {
			size += float64(f.SizeBytes)
		}
		c.metrics.Size.WithLabelValues(fmt.Sprintf("%d", currentLevel)).Observe(size)
	}
	result := make([]ulid.ULID, len(metas))
	for i := range metas {
		result[i] = metas[i].ULID
	}
	return result, nil
}

// runCompactionJob plans and runs a single compaction against the provided job. The compacted result
// is uploaded into the bucket the blocks were retrieved from.
func (c *BucketCompactor) runCompactionJob(ctx context.Context, job *Job) (shouldRerun bool, compIDs []ulid.ULID, rerr error) {
	jobBeginTime := time.Now()

	jobLogger := log.With(c.logger, "groupKey", job.Key())
	subDir := filepath.Join(c.compactDir, job.Key())

	defer func() {
		elapsed := time.Since(jobBeginTime)

		if rerr == nil {
			level.Info(jobLogger).Log("msg", "compaction job succeeded", "duration", elapsed, "duration_ms", elapsed.Milliseconds())
		} else {
			level.Error(jobLogger).Log("msg", "compaction job failed", "duration", elapsed, "duration_ms", elapsed.Milliseconds(), "err", rerr)
		}

		if err := os.RemoveAll(subDir); err != nil {
			level.Error(jobLogger).Log("msg", "failed to remove compaction group work directory", "path", subDir, "err", err)
		}
	}()

	if err := os.MkdirAll(subDir, 0o750); err != nil {
		return false, nil, errors.Wrap(err, "create compaction job dir")
	}

	toCompact, err := c.planner.Plan(ctx, job.metasByMinTime)
	if err != nil {
		return false, nil, errors.Wrap(err, "plan compaction")
	}
	if len(toCompact) == 0 {
		// Nothing to do.
		return false, nil, nil
	}

	// The planner returned some blocks to compact, so we can enrich the logger
	// with the min/max time between all blocks to compact.
	toCompactMinTime := minTime(toCompact)
	toCompactMaxTime := maxTime(toCompact)
	jobLogger = log.With(jobLogger, "minTime", toCompactMinTime.String(), "maxTime", toCompactMaxTime.String())

	level.Info(jobLogger).Log("msg", "compaction available and planned; downloading blocks", "blocks", len(toCompact), "plan", fmt.Sprintf("%v", toCompact))

	sp, ctx := opentracing.StartSpanFromContext(ctx, "CompactJob",
		opentracing.Tag{Key: "GroupKey", Value: job.Key()},
		opentracing.Tag{Key: "Job", Value: job.String()},
		opentracing.Tag{Key: "Labels", Value: job.Labels().String()},
		opentracing.Tag{Key: "MinCompactionLevel", Value: job.MinCompactionLevel()},
		opentracing.Tag{Key: "Resolution", Value: job.Resolution()},
		opentracing.Tag{Key: "ShardKey", Value: job.ShardingKey()},
		opentracing.Tag{Key: "SplitStageSize", Value: job.SplitStageSize()},
		opentracing.Tag{Key: "UseSplitting", Value: job.UseSplitting()},
		opentracing.Tag{Key: "SplittingShards", Value: job.SplittingShards()},
		opentracing.Tag{Key: "BlockCount", Value: len(toCompact)},
	)
	defer sp.Finish()

	blocksToCompactDirs := make([]string, len(toCompact))
	// Once we have a plan we need to download the actual data.
	downloadBegin := time.Now()

	err = func() error {
		sp, ctx := opentracing.StartSpanFromContext(ctx, "DownloadBlocks")
		defer func() {
			elapsed := time.Since(downloadBegin)
			level.Info(jobLogger).Log("msg", "downloaded and verified blocks; compacting blocks", "blocks", len(blocksToCompactDirs), "plan", fmt.Sprintf("%v", blocksToCompactDirs), "duration", elapsed, "duration_ms", elapsed.Milliseconds())
			sp.Finish()
		}()

		if err := concurrency.ForEachJob(ctx, len(toCompact), c.blockSyncConcurrency, func(ctx context.Context, idx int) error {
			meta := toCompact[idx]
			// Must be the same as in blocksToCompactDirs.
			bdir := filepath.Join(subDir, meta.ULID.String())
			if err := block.Download(ctx, jobLogger, c.bkt, meta.ULID, bdir); err != nil {
				return errors.Wrapf(err, "download block %s", meta.ULID)
			}

			return nil
		}); err != nil {
			return err
		}

		for ix, meta := range toCompact {
			blocksToCompactDirs[ix] = filepath.Join(subDir, meta.ULID.String())
		}
		return nil
	}()
	if err != nil {
		ext.LogError(sp, err)
		return false, nil, err
	}

	err = func() error {
		sp, ctx := opentracing.StartSpanFromContext(ctx, "CompactBlocks")
		compactionBegin := time.Now()
		defer func() {
			sp.Finish()
			elapsed := time.Since(compactionBegin)
			level.Info(jobLogger).Log("msg", "compacted blocks", "new", fmt.Sprintf("%v", compIDs), "blocks", fmt.Sprintf("%v", blocksToCompactDirs), "duration", elapsed, "duration_ms", elapsed.Milliseconds())
		}()
		if job.UseSplitting() {
			compIDs, err = c.comp.CompactWithSplitting(ctx, subDir, blocksToCompactDirs, uint64(job.SplittingShards()), uint64(job.SplitStageSize()))
		} else {
			compIDs, err = c.comp.CompactWithSplitting(ctx, subDir, blocksToCompactDirs, 1, 0)
		}
		outputDirs := make([]string, len(compIDs))
		for i, id := range compIDs {
			outputDirs[i] = filepath.Join(subDir, id.String())
		}
		sp.SetTag("input_dirs", blocksToCompactDirs)
		sp.SetTag("output_dirs", outputDirs)
		return err
	}()
	if err != nil {
		ext.LogError(sp, err)
		return false, nil, errors.Wrapf(err, "compact blocks %v", blocksToCompactDirs)
	}

	if err = verifyCompactedBlocksTimeRanges(compIDs, toCompactMinTime.UnixMilli(), toCompactMaxTime.UnixMilli(), subDir); err != nil {
		level.Error(jobLogger).Log("msg", "compacted blocks verification failed", "err", err)
		c.metrics.compactionBlocksVerificationFailed.Inc()
		return false, nil, err
	}

	// Spawn a new context so we always finish uploading and marking a block for deletion in full on shutdown.
	ctx, cancel := context.WithTimeout(context.Background(), 20*time.Minute)
	ctx = opentracing.ContextWithSpan(ctx, sp)
	defer cancel()

	err = func() error {
		sp, ctx := opentracing.StartSpanFromContext(ctx, "Uploading blocks", opentracing.Tag{Key: "count", Value: len(compIDs)})
		uploadBegin := time.Now()
		uploadedBlocks := atomic.NewInt64(0)
		defer func() {
			elapsed := time.Since(uploadBegin)
			level.Info(jobLogger).Log("msg", "uploaded all blocks", "blocks", uploadedBlocks, "duration", elapsed, "duration_ms", elapsed.Milliseconds())
			sp.Finish()
		}()
		return concurrency.ForEachJob(ctx, len(compIDs), c.blockSyncConcurrency, func(ctx context.Context, idx int) error {
			ulidToUpload := compIDs[idx]

			uploadedBlocks.Inc()

			bdir := filepath.Join(subDir, ulidToUpload.String())

			newMeta, err := block.ReadMetaFromDir(bdir)
			if err != nil {
				return errors.Wrapf(err, "failed to read meta the block dir %s", bdir)
			}

			// Ensure the compacted block is valid.
			if err := phlaredb.ValidateLocalBlock(ctx, bdir); err != nil {
				return errors.Wrapf(err, "invalid result block %s", bdir)
			}

			begin := time.Now()
			if err := block.Upload(ctx, jobLogger, c.bkt, bdir); err != nil {
				return errors.Wrapf(err, "upload of %s failed", ulidToUpload)
			}

			elapsed := time.Since(begin)
			level.Info(jobLogger).Log("msg", "uploaded block", "result_block", ulidToUpload, "duration", elapsed, "duration_ms", elapsed.Milliseconds(), "labels", labels.FromMap(newMeta.Labels))
			return nil
		})
	}()

	if err != nil {
		ext.LogError(sp, err)
		return false, nil, err
	}

	sp, ctx = opentracing.StartSpanFromContext(ctx, "Deleting blocks", opentracing.Tag{Key: "count", Value: len(compIDs)})
	defer sp.Finish()
	// Mark for deletion the blocks we just compacted from the job and bucket so they do not get included
	// into the next planning cycle.
	// Eventually the block we just uploaded should get synced into the job again (including sync-delay).
	for _, meta := range toCompact {
		if err := deleteBlock(ctx, c.bkt, meta.ULID, filepath.Join(subDir, meta.ULID.String()), jobLogger, c.metrics.blocksMarkedForDeletion); err != nil {
			return false, nil, errors.Wrapf(err, "mark old block for deletion from bucket")
		}
	}

	return true, compIDs, nil
}

// verifyCompactedBlocksTimeRanges does a full run over the compacted blocks
// and verifies that they satisfy the min/maxTime from the source blocks
func verifyCompactedBlocksTimeRanges(compIDs []ulid.ULID, sourceBlocksMinTime, sourceBlocksMaxTime int64, subDir string) error {
	sourceBlocksMinTimeFound := false
	sourceBlocksMaxTimeFound := false

	for _, compID := range compIDs {
		// Skip empty block
		if compID == (ulid.ULID{}) {
			continue
		}

		bdir := filepath.Join(subDir, compID.String())
		meta, err := block.ReadMetaFromDir(bdir)
		if err != nil {
			return errors.Wrapf(err, "failed to read meta.json from %s during block time range verification", bdir)
		}

		// Ensure compacted block min/maxTime within source blocks min/maxTime
		if int64(meta.MinTime) < sourceBlocksMinTime {
			return fmt.Errorf("invalid minTime for block %s, compacted block minTime %d is before source minTime %d", compID.String(), meta.MinTime, sourceBlocksMinTime)
		}

		if int64(meta.MaxTime) > sourceBlocksMaxTime {
			return fmt.Errorf("invalid maxTime for block %s, compacted block maxTime %d is after source maxTime %d", compID.String(), meta.MaxTime, sourceBlocksMaxTime)
		}

		if int64(meta.MinTime) == sourceBlocksMinTime {
			sourceBlocksMinTimeFound = true
		}

		if int64(meta.MaxTime) == sourceBlocksMaxTime {
			sourceBlocksMaxTimeFound = true
		}
	}

	// Check that the minTime and maxTime from the source blocks
	// are found at least once in the compacted blocks
	if !sourceBlocksMinTimeFound || !sourceBlocksMaxTimeFound {
		return fmt.Errorf("compacted block(s) do not contain minTime %d and maxTime %d from the source blocks", sourceBlocksMinTime, sourceBlocksMaxTime)
	}

	return nil
}

func deleteBlock(ctx context.Context, bkt objstore.Bucket, id ulid.ULID, bdir string, logger log.Logger, blocksMarkedForDeletion prometheus.Counter) error {
	if err := os.RemoveAll(bdir); err != nil {
		return errors.Wrapf(err, "remove old block dir %s", id)
	}
	level.Info(logger).Log("msg", "marking compacted block for deletion", "old_block", id)
	if err := block.MarkForDeletion(ctx, logger, bkt, id, "source of compacted block", true, blocksMarkedForDeletion); err != nil {
		return errors.Wrapf(err, "mark block %s for deletion from bucket", id)
	}
	return nil
}

// BucketCompactorMetrics holds the metrics tracked by BucketCompactor.
type BucketCompactorMetrics struct {
	groupCompactionRunsStarted         prometheus.Counter
	groupCompactionRunsCompleted       prometheus.Counter
	groupCompactionRunsFailed          prometheus.Counter
	groupCompactions                   prometheus.Counter
	compactionBlocksVerificationFailed prometheus.Counter
	blocksMarkedForDeletion            prometheus.Counter
	blocksMarkedForNoCompact           prometheus.Counter
	blocksMaxTimeDelta                 prometheus.Histogram
}

// NewBucketCompactorMetrics makes a new BucketCompactorMetrics.
func NewBucketCompactorMetrics(blocksMarkedForDeletion prometheus.Counter, reg prometheus.Registerer) *BucketCompactorMetrics {
	return &BucketCompactorMetrics{
		groupCompactionRunsStarted: promauto.With(reg).NewCounter(prometheus.CounterOpts{
			Name: "pyroscope_compactor_group_compaction_runs_started_total",
			Help: "Total number of group compaction attempts.",
		}),
		groupCompactionRunsCompleted: promauto.With(reg).NewCounter(prometheus.CounterOpts{
			Name: "pyroscope_compactor_group_compaction_runs_completed_total",
			Help: "Total number of group completed compaction runs. This also includes compactor group runs that resulted with no compaction.",
		}),
		groupCompactionRunsFailed: promauto.With(reg).NewCounter(prometheus.CounterOpts{
			Name: "pyroscope_compactor_group_compactions_failures_total",
			Help: "Total number of failed group compactions.",
		}),
		groupCompactions: promauto.With(reg).NewCounter(prometheus.CounterOpts{
			Name: "pyroscope_compactor_group_compactions_total",
			Help: "Total number of group compaction attempts that resulted in new block(s).",
		}),
		compactionBlocksVerificationFailed: promauto.With(reg).NewCounter(prometheus.CounterOpts{
			Name: "pyroscope_compactor_blocks_verification_failures_total",
			Help: "Total number of failures when verifying min/max time ranges of compacted blocks.",
		}),
		blocksMarkedForDeletion: blocksMarkedForDeletion,
		blocksMarkedForNoCompact: promauto.With(reg).NewCounter(prometheus.CounterOpts{
			Name:        "pyroscope_compactor_blocks_marked_for_no_compaction_total",
			Help:        "Total number of blocks that were marked for no-compaction.",
			ConstLabels: prometheus.Labels{"reason": block.OutOfOrderChunksNoCompactReason},
		}),
		blocksMaxTimeDelta: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
			Name:    "pyroscope_compactor_block_max_time_delta_seconds",
			Help:    "Difference between now and the max time of a block being compacted in seconds.",
			Buckets: prometheus.LinearBuckets(86400, 43200, 8), // 1 to 5 days, in 12 hour intervals
		}),
	}
}

type ownCompactionJobFunc func(job *Job) (bool, error)

// ownAllJobs is a ownCompactionJobFunc that always return true.
var ownAllJobs = func(job *Job) (bool, error) {
	return true, nil
}

// BucketCompactor compacts blocks in a bucket.
type BucketCompactor struct {
	logger               log.Logger
	sy                   *Syncer
	grouper              Grouper
	planner              Planner
	comp                 Compactor
	compactDir           string
	bkt                  objstore.Bucket
	concurrency          int
	ownJob               ownCompactionJobFunc
	sortJobs             JobsOrderFunc
	waitPeriod           time.Duration
	blockSyncConcurrency int
	metrics              *BucketCompactorMetrics
}

// NewBucketCompactor creates a new bucket compactor.
func NewBucketCompactor(
	logger log.Logger,
	sy *Syncer,
	grouper Grouper,
	planner Planner,
	comp Compactor,
	compactDir string,
	bkt objstore.Bucket,
	concurrency int,
	ownJob ownCompactionJobFunc,
	sortJobs JobsOrderFunc,
	waitPeriod time.Duration,
	blockSyncConcurrency int,
	metrics *BucketCompactorMetrics,
) (*BucketCompactor, error) {
	if concurrency <= 0 {
		return nil, errors.Errorf("invalid concurrency level (%d), concurrency level must be > 0", concurrency)
	}
	return &BucketCompactor{
		logger:               logger,
		sy:                   sy,
		grouper:              grouper,
		planner:              planner,
		comp:                 comp,
		compactDir:           compactDir,
		bkt:                  bkt,
		concurrency:          concurrency,
		ownJob:               ownJob,
		sortJobs:             sortJobs,
		waitPeriod:           waitPeriod,
		blockSyncConcurrency: blockSyncConcurrency,
		metrics:              metrics,
	}, nil
}

// Compact runs compaction over bucket.
// If maxCompactionTime is positive then after this time no more new compactions are started.
func (c *BucketCompactor) Compact(ctx context.Context, maxCompactionTime time.Duration) (rerr error) {
	sp := opentracing.SpanFromContext(ctx)
	if sp == nil {
		sp, ctx = opentracing.StartSpanFromContext(ctx, "Compact")
	}
	sp.SetTag("max_compaction_time", maxCompactionTime)
	sp.SetTag("concurrency", c.concurrency)
	defer func() {
		// Do not remove the compactDir if an error has occurred
		// because potentially on the next run we would not have to download
		// everything again.
		if rerr != nil {
			return
		}
		if err := os.RemoveAll(c.compactDir); err != nil {
			level.Error(c.logger).Log("msg", "failed to remove compaction work directory", "path", c.compactDir, "err", err)
		}
	}()

	var maxCompactionTimeChan <-chan time.Time
	if maxCompactionTime > 0 {
		maxCompactionTimeChan = time.After(maxCompactionTime)
	}

	// Loop over bucket and compact until there's no work left.
	for {
		var (
			wg                     sync.WaitGroup
			workCtx, workCtxCancel = context.WithCancel(ctx)
			jobChan                = make(chan *Job)
			errChan                = make(chan error, c.concurrency)
			finishedAllJobs        = true
			mtx                    sync.Mutex
		)
		defer workCtxCancel()

		// Set up workers who will compact the jobs when the jobs are ready.
		// They will compact available jobs until they encounter an error, after which they will stop.
		for i := 0; i < c.concurrency; i++ {
			wg.Add(1)
			go func() {
				defer wg.Done()
				for g := range jobChan {
					// Ensure the job is still owned by the current compactor instance.
					// If not, we shouldn't run it because another compactor instance may already
					// process it (or will do it soon).
					if ok, err := c.ownJob(g); err != nil {
						level.Info(c.logger).Log("msg", "skipped compaction because unable to check whether the job is owned by the compactor instance", "groupKey", g.Key(), "err", err)
						continue
					} else if !ok {
						level.Info(c.logger).Log("msg", "skipped compaction because job is not owned by the compactor instance anymore", "groupKey", g.Key())
						continue
					}

					c.metrics.groupCompactionRunsStarted.Inc()

					shouldRerunJob, compactedBlockIDs, err := c.runCompactionJob(workCtx, g)
					if err == nil {
						c.metrics.groupCompactionRunsCompleted.Inc()
						if hasNonZeroULIDs(compactedBlockIDs) {
							c.metrics.groupCompactions.Inc()
						}

						if shouldRerunJob {
							mtx.Lock()
							finishedAllJobs = false
							mtx.Unlock()
						}
						continue
					}

					// At this point the compaction has failed.
					c.metrics.groupCompactionRunsFailed.Inc()

					errChan <- errors.Wrapf(err, "group %s", g.Key())
					return
				}
			}()
		}

		level.Info(c.logger).Log("msg", "start sync of metas")
		if err := c.sy.SyncMetas(ctx); err != nil {
			ext.LogError(sp, err)
			return errors.Wrap(err, "sync")
		}

		level.Info(c.logger).Log("msg", "start of GC")
		// Blocks that were compacted are garbage collected after each Compaction.
		// However if compactor crashes we need to resolve those on startup.
		if err := c.sy.GarbageCollect(ctx); err != nil {
			ext.LogError(sp, err)
			return errors.Wrap(err, "blocks garbage collect")
		}

		jobs, err := c.grouper.Groups(c.sy.Metas())
		if err != nil {
			ext.LogError(sp, err)
			return errors.Wrap(err, "build compaction jobs")
		}
		sp.LogKV("discovered_jobs", len(jobs))

		// There is another check just before we start processing the job, but we can avoid sending it
		// to the goroutine in the first place.
		jobs, err = c.filterOwnJobs(jobs)
		if err != nil {
			return err
		}
		sp.LogKV("own_jobs", len(jobs))

		// Record the difference between now and the max time for a block being compacted. This
		// is used to detect compactors not being able to keep up with the rate of blocks being
		// created. The idea is that most blocks should be for within 24h or 48h.
		now := time.Now()
		for _, delta := range c.blockMaxTimeDeltas(now, jobs) {
			c.metrics.blocksMaxTimeDelta.Observe(delta)
		}

		// Skip jobs for which the wait period hasn't been honored yet.
		jobs = c.filterJobsByWaitPeriod(ctx, jobs)
		sp.LogKV("filtered_jobs", len(jobs))

		// Sort jobs based on the configured ordering algorithm.
		jobs = c.sortJobs(jobs)

		ignoreDirs := []string{}
		for _, gr := range jobs {
			for _, grID := range gr.IDs() {
				ignoreDirs = append(ignoreDirs, filepath.Join(gr.Key(), grID.String()))
			}
		}

		if err := runutil.DeleteAll(c.compactDir, ignoreDirs...); err != nil {
			level.Warn(c.logger).Log("msg", "failed deleting non-compaction job directories/files, some disk space usage might have leaked. Continuing", "err", err, "dir", c.compactDir)
		}

		level.Info(c.logger).Log("msg", "start of compactions")

		maxCompactionTimeReached := false
		// Send all jobs found during this pass to the compaction workers.
		var jobErrs multierror.MultiError
	jobLoop:
		for _, g := range jobs {
			select {
			case jobErr := <-errChan:
				ext.LogError(sp, jobErr)
				jobErrs.Add(jobErr)
				break jobLoop
			case jobChan <- g:
			case <-maxCompactionTimeChan:
				maxCompactionTimeReached = true
				level.Info(c.logger).Log("msg", "max compaction time reached, no more compactions will be started")
				sp.LogKV("msg", "max compaction time reached, no more compactions will be started")
				break jobLoop
			}
		}
		close(jobChan)
		wg.Wait()

		// Collect any other error reported by the workers, or any error reported
		// while we were waiting for the last batch of jobs to run the compaction.
		close(errChan)
		for jobErr := range errChan {
			jobErrs.Add(jobErr)
		}

		workCtxCancel()
		if len(jobErrs) > 0 {
			return jobErrs.Err()
		}

		if maxCompactionTimeReached || finishedAllJobs {
			break
		}
	}
	level.Info(c.logger).Log("msg", "compaction iterations done")
	return nil
}

// blockMaxTimeDeltas returns a slice of the difference between now and the MaxTime of each
// block that will be compacted as part of the provided jobs, in seconds.
func (c *BucketCompactor) blockMaxTimeDeltas(now time.Time, jobs []*Job) []float64 {
	var out []float64

	for _, j := range jobs {
		for _, m := range j.Metas() {
			out = append(out, now.Sub(time.UnixMilli(int64(m.MaxTime))).Seconds())
		}
	}

	return out
}

func (c *BucketCompactor) filterOwnJobs(jobs []*Job) ([]*Job, error) {
	for ix := 0; ix < len(jobs); {
		// Skip any job which doesn't belong to this compactor instance.
		if ok, err := c.ownJob(jobs[ix]); err != nil {
			return nil, errors.Wrap(err, "ownJob")
		} else if !ok {
			jobs = append(jobs[:ix], jobs[ix+1:]...)
		} else {
			ix++
		}
	}
	return jobs, nil
}

// filterJobsByWaitPeriod filters out jobs for which the configured wait period hasn't been honored yet.
func (c *BucketCompactor) filterJobsByWaitPeriod(ctx context.Context, jobs []*Job) []*Job {
	for i := 0; i < len(jobs); {
		if elapsed, notElapsedBlock, err := jobWaitPeriodElapsed(ctx, jobs[i], c.waitPeriod, c.bkt); err != nil {
			level.Warn(c.logger).Log("msg", "not enforcing compaction wait period because the check if compaction job contains recently uploaded blocks has failed", "groupKey", jobs[i].Key(), "err", err)

			// Keep the job.
			i++
		} else if !elapsed {
			level.Info(c.logger).Log("msg", "skipping compaction job because blocks in this job were uploaded too recently (within wait period)", "groupKey", jobs[i].Key(), "waitPeriodNotElapsedFor", notElapsedBlock.String())
			jobs = append(jobs[:i], jobs[i+1:]...)
		} else {
			i++
		}
	}

	return jobs
}

var _ block.MetadataFilter = &NoCompactionMarkFilter{}

// NoCompactionMarkFilter is a block.Fetcher filter that finds all blocks with no-compact marker files, and optionally
// removes them from synced metas.
type NoCompactionMarkFilter struct {
	bkt                   objstore.BucketReader
	noCompactMarkedMap    map[ulid.ULID]struct{}
	removeNoCompactBlocks bool
}

// NewNoCompactionMarkFilter creates NoCompactionMarkFilter.
func NewNoCompactionMarkFilter(bkt objstore.BucketReader, removeNoCompactBlocks bool) *NoCompactionMarkFilter {
	return &NoCompactionMarkFilter{
		bkt:                   bkt,
		removeNoCompactBlocks: removeNoCompactBlocks,
	}
}

// NoCompactMarkedBlocks returns block ids that were marked for no compaction.
// It is safe to call this method only after Filter has finished, and it is also safe to manipulate the map between calls to Filter.
func (f *NoCompactionMarkFilter) NoCompactMarkedBlocks() map[ulid.ULID]struct{} {
	return f.noCompactMarkedMap
}

// Filter finds blocks that should not be compacted, and fills f.noCompactMarkedMap. If f.removeNoCompactBlocks is true,
// blocks are also removed from metas. (Thanos version of the filter doesn't do removal).
func (f *NoCompactionMarkFilter) Filter(ctx context.Context, metas map[ulid.ULID]*block.Meta, synced block.GaugeVec) error {
	noCompactMarkedMap := make(map[ulid.ULID]struct{})

	// Find all no-compact markers in the storage.
	err := f.bkt.Iter(ctx, block.MarkersPathname+"/", func(name string) error {
		if err := ctx.Err(); err != nil {
			return err
		}

		if blockID, ok := block.IsNoCompactMarkFilename(path.Base(name)); ok {
			_, exists := metas[blockID]
			if exists {
				noCompactMarkedMap[blockID] = struct{}{}
				synced.WithLabelValues(block.MarkedForNoCompactionMeta).Inc()

				if f.removeNoCompactBlocks {
					delete(metas, blockID)
				}
			}

		}
		return nil
	})
	if err != nil {
		return errors.Wrap(err, "list block no-compact marks")
	}

	f.noCompactMarkedMap = noCompactMarkedMap
	return nil
}

func hasNonZeroULIDs(ids []ulid.ULID) bool {
	for _, id := range ids {
		if id != (ulid.ULID{}) {
			return true
		}
	}

	return false
}
