// ============================================================================
// File: pkg/prefetch/engine.go - Prefetch Engine
// ============================================================================
package prefetch

import (
	"context"
	"os"
	"path/filepath"
	"sync"

	"github.com/sirupsen/logrus"
	"gitee.com/wangtsingx/onyx-snapshotter/pkg/content"
	"gitee.com/wangtsingx/onyx-snapshotter/pkg/metrics"
	"gitee.com/wangtsingx/onyx-snapshotter/pkg/puller"
)

type Config struct {
	Paths          []string
	MaxSize        int64
	ConcurrentJobs int
}

type Engine struct {
	cfg    *Config
	cs     *content.Store
	puller *puller.Puller
	jobs   chan prefetchJob
	stop   chan struct{}
	wg     sync.WaitGroup
}

type prefetchJob struct {
	layerDigest string
	snapshotID  string
}

func NewEngine(cfg *Config, cs *content.Store, p *puller.Puller) *Engine {
	e := &Engine{
		cfg:    cfg,
		cs:     cs,
		puller: p,
		jobs:   make(chan prefetchJob, 100),
		stop:   make(chan struct{}),
	}

	// Start worker pool
	for i := 0; i < cfg.ConcurrentJobs; i++ {
		e.wg.Add(1)
		go e.worker()
	}

	return e
}

func (e *Engine) Prefetch(ctx context.Context, layerDigest, snapshotID string) {
	select {
	case e.jobs <- prefetchJob{layerDigest: layerDigest, snapshotID: snapshotID}:
		metrics.PrefetchJobs.Inc()
	case <-e.stop:
	}
}

func (e *Engine) worker() {
	defer e.wg.Done()

	for {
		select {
		case job := <-e.jobs:
			e.processPrefetch(job)
		case <-e.stop:
			return
		}
	}
}

func (e *Engine) processPrefetch(job prefetchJob) {
	timer := metrics.NewTimer()
	defer func() {
		metrics.PrefetchDuration.Observe(timer.ObserveDuration().Seconds())
	}()

	logrus.Debugf("prefetching for snapshot %s", job.snapshotID)

	snapDir := filepath.Join("/var/lib/onyx/snapshots", job.snapshotID, "fs")

	prefetchedCount := 0
	prefetchedSize := int64(0)

	for _, pathPattern := range e.cfg.Paths {
		matches, err := filepath.Glob(filepath.Join(snapDir, pathPattern))
		if err != nil {
			logrus.WithError(err).Warnf("failed to glob %s", pathPattern)
			continue
		}

		for _, path := range matches {
			info, err := os.Stat(path)
			if err != nil || info.IsDir() {
				continue
			}

			// Check size limit
			if e.cfg.MaxSize > 0 && prefetchedSize+info.Size() > e.cfg.MaxSize {
				logrus.Debugf("prefetch size limit reached")
				goto done
			}

			// Read file to trigger loading
			data, err := os.ReadFile(path)
			if err != nil {
				logrus.WithError(err).Debugf("failed to prefetch %s", path)
				continue
			}

			prefetchedCount++
			prefetchedSize += int64(len(data))
		}
	}

done:
	metrics.PrefetchedFiles.Add(float64(prefetchedCount))
	metrics.PrefetchedBytes.Add(float64(prefetchedSize))
	logrus.Infof("prefetched %d files (%d bytes) for snapshot %s", 
		prefetchedCount, prefetchedSize, job.snapshotID)
}

func (e *Engine) Stop() {
	close(e.stop)
	e.wg.Wait()
}