package snapshotter

import (
	"context"
	"fmt"
	"os"
	"path/filepath"
	"sync"
	"time"

	"github.com/containerd/containerd/log"
	"github.com/containerd/containerd/mount"
	"github.com/containerd/containerd/snapshots"
	"github.com/containerd/containerd/snapshots/storage"
	bolt "go.etcd.io/bbolt"

	"onyx-snapshotter/pkg/config"
	"onyx-snapshotter/pkg/dedup"
	"onyx-snapshotter/pkg/estargz"
	"onyx-snapshotter/pkg/lazy"
	"onyx-snapshotter/pkg/metrics"
	"onyx-snapshotter/pkg/prefetch"
	"onyx-snapshotter/pkg/ublk"
)

const (
	bucketSnapshots = "snapshots"
	bucketLayers    = "layers"
)

type Snapshotter struct {
	root   string
	config *config.Config
	db     *bolt.DB
	ms     *storage.MetaStore

	// V1.0 components
	loader *lazy.Loader
	parser *estargz.Parser

	// V2.0 components
	dedupStore  *dedup.Store
	prefetchEng *prefetch.Engine
	ublkDriver  *ublk.Driver

	// Metrics collection
	metricsTimer *time.Ticker
	stopMetrics  chan struct{}

	mu sync.Mutex
}

func New(ctx context.Context, cfg *config.Config) (*Snapshotter, error) {
	// Create root directory
	if err := os.MkdirAll(cfg.Root, 0o700); err != nil {
		return nil, fmt.Errorf("failed to create root: %w", err)
	}

	// Open metadata database
	dbPath := filepath.Join(cfg.Root, "metadata.db")
	db, err := bolt.Open(dbPath, 0o600, &bolt.Options{Timeout: 5 * time.Second})
	if err != nil {
		return nil, fmt.Errorf("failed to open database: %w", err)
	}

	// Initialize buckets
	if err := db.Update(func(tx *bolt.Tx) error {
		if _, err := tx.CreateBucketIfNotExists([]byte(bucketSnapshots)); err != nil {
			return err
		}
		if _, err := tx.CreateBucketIfNotExists([]byte(bucketLayers)); err != nil {
			return err
		}
		return nil
	}); err != nil {
		db.Close()
		return nil, fmt.Errorf("failed to initialize database: %w", err)
	}

	// Create metadata store
	ms, err := storage.NewMetaStore(db)
	if err != nil {
		db.Close()
		return nil, fmt.Errorf("failed to create metastore: %w", err)
	}

	// Create lazy loader
	loader, err := lazy.NewLoader(ctx, cfg)
	if err != nil {
		db.Close()
		return nil, fmt.Errorf("failed to create loader: %w", err)
	}

	// Create estargz parser
	parser := estargz.NewParser()

	// Create deduplication store (V2.0)
	var dedupStore *dedup.Store
	if cfg.Dedup.Enabled {
		dedupStore, err = dedup.NewStore(ctx, &cfg.Dedup)
		if err != nil {
			loader.Close()
			db.Close()
			return nil, fmt.Errorf("failed to create dedup store: %w", err)
		}
	}

	// Create ublk driver (V2.0)
	var ublkDriver *ublk.Driver
	if cfg.Ublk.Enabled {
		ublkDriver, err = ublk.NewDriver(&cfg.Ublk)
		if err != nil {
			log.G(ctx).WithError(err).Warn("failed to initialize ublk, continuing without it")
		}
	}

	s := &Snapshotter{
		root:        cfg.Root,
		config:      cfg,
		db:          db,
		ms:          ms,
		loader:      loader,
		parser:      parser,
		dedupStore:  dedupStore,
		ublkDriver:  ublkDriver,
		stopMetrics: make(chan struct{}),
	}

	// Create prefetch engine with download callback (V2.0)
	s.prefetchEng = prefetch.NewEngine(&cfg.Prefetch, s.downloadFile)

	// Start metrics collection
	if cfg.Metrics.Enabled {
		s.startMetricsCollection(ctx)
	}

	log.G(ctx).WithFields(map[string]interface{}{
		"root":     cfg.Root,
		"dedup":    cfg.Dedup.Enabled,
		"ublk":     cfg.Ublk.Enabled,
		"prefetch": cfg.Prefetch.Enabled,
	}).Info("snapshotter initialized")

	return s, nil
}

func (s *Snapshotter) Stat(ctx context.Context, key string) (snapshots.Info, error) {
	start := time.Now()
	defer func() {
		metrics.SnapshotOperationDuration.WithLabelValues("stat").Observe(time.Since(start).Seconds())
	}()

	s.mu.Lock()
	defer s.mu.Unlock()

	_, info, _, err := storage.GetInfo(ctx, key, s.db)
	if err != nil {
		metrics.RecordSnapshotOperation("stat", false)
		return snapshots.Info{}, err
	}

	metrics.RecordSnapshotOperation("stat", true)
	return info, nil
}

func (s *Snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) {
	start := time.Now()
	defer func() {
		metrics.SnapshotOperationDuration.WithLabelValues("update").Observe(time.Since(start).Seconds())
	}()

	s.mu.Lock()
	defer s.mu.Unlock()

	updated, err := storage.UpdateInfo(ctx, info, fieldpaths, s.db)
	metrics.RecordSnapshotOperation("update", err == nil)
	return updated, err
}

func (s *Snapshotter) Usage(ctx context.Context, key string) (snapshots.Usage, error) {
	start := time.Now()
	defer func() {
		metrics.SnapshotOperationDuration.WithLabelValues("usage").Observe(time.Since(start).Seconds())
	}()

	s.mu.Lock()
	defer s.mu.Unlock()

	id, info, usage, err := storage.GetInfo(ctx, key, s.db)
	if err != nil {
		metrics.RecordSnapshotOperation("usage", false)
		return snapshots.Usage{}, err
	}

	if info.Kind == snapshots.KindActive {
		snapshotDir := s.snapshotDir(id)
		du, err := s.diskUsage(snapshotDir)
		if err != nil {
			metrics.RecordSnapshotOperation("usage", false)
			return snapshots.Usage{}, err
		}
		usage = snapshots.Usage(du)
	}

	metrics.RecordSnapshotOperation("usage", true)
	return usage, nil
}

func (s *Snapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, error) {
	start := time.Now()
	defer func() {
		metrics.SnapshotOperationDuration.WithLabelValues("mounts").Observe(time.Since(start).Seconds())
	}()

	s.mu.Lock()
	defer s.mu.Unlock()

	snapshot, err := storage.GetSnapshot(ctx, key, s.db)
	if err != nil {
		metrics.RecordSnapshotOperation("mounts", false)
		return nil, err
	}

	metrics.RecordSnapshotOperation("mounts", true)
	return s.mounts(snapshot), nil
}

func (s *Snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) {
	start := time.Now()
	defer func() {
		metrics.SnapshotOperationDuration.WithLabelValues("prepare").Observe(time.Since(start).Seconds())
	}()

	mounts, err := s.createSnapshot(ctx, snapshots.KindActive, key, parent, opts)
	metrics.RecordSnapshotOperation("prepare", err == nil)
	if err == nil {
		metrics.ActiveSnapshots.Inc()
	}
	return mounts, err
}

func (s *Snapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) {
	start := time.Now()
	defer func() {
		metrics.SnapshotOperationDuration.WithLabelValues("view").Observe(time.Since(start).Seconds())
	}()

	mounts, err := s.createSnapshot(ctx, snapshots.KindView, key, parent, opts)
	metrics.RecordSnapshotOperation("view", err == nil)
	return mounts, err
}

func (s *Snapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error {
	start := time.Now()
	defer func() {
		metrics.SnapshotOperationDuration.WithLabelValues("commit").Observe(time.Since(start).Seconds())
	}()

	s.mu.Lock()
	defer s.mu.Unlock()

	err := storage.CommitActive(ctx, key, name, snapshots.Usage{}, s.db, func(ctx context.Context, id string) error {
		// V2.0: Process deduplication if enabled
		if s.dedupStore != nil {
			if err := s.processDedupForSnapshot(ctx, id); err != nil {
				log.G(ctx).WithError(err).Warn("failed to process deduplication")
			}
		}
		return nil
	})

	success := err == nil
	metrics.RecordSnapshotOperation("commit", success)
	if success {
		metrics.ActiveSnapshots.Dec()
	}
	return err
}

func (s *Snapshotter) Remove(ctx context.Context, key string) error {
	start := time.Now()
	defer func() {
		metrics.SnapshotOperationDuration.WithLabelValues("remove").Observe(time.Since(start).Seconds())
	}()

	s.mu.Lock()
	defer s.mu.Unlock()

	_, info, err := storage.Remove(ctx, key, s.db, func(ctx context.Context, id string) error {
		snapshotDir := s.snapshotDir(id)

		// V2.0: Handle dedup reference counting
		if s.dedupStore != nil {
			if err := s.cleanupDedupRefs(ctx, id); err != nil {
				log.G(ctx).WithError(err).Warn("failed to cleanup dedup refs")
			}
		}

		return os.RemoveAll(snapshotDir)
	})

	success := err == nil
	metrics.RecordSnapshotOperation("remove", success)
	if success && info.Kind == snapshots.KindActive {
		metrics.ActiveSnapshots.Dec()
	}
	return err
}

func (s *Snapshotter) Walk(ctx context.Context, fn snapshots.WalkFunc, fs ...string) error {
	s.mu.Lock()
	defer s.mu.Unlock()

	return storage.WalkInfo(ctx, s.db, fn, fs...)
}

func (s *Snapshotter) Close() error {
	close(s.stopMetrics)

	if s.metricsTimer != nil {
		s.metricsTimer.Stop()
	}

	if s.ublkDriver != nil {
		s.ublkDriver.Close()
	}

	if s.dedupStore != nil {
		s.dedupStore.Close()
	}

	s.loader.Close()
	return s.db.Close()
}

// Internal helpers

func (s *Snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, key, parent string, opts []snapshots.Opt) ([]mount.Mount, error) {
	s.mu.Lock()
	defer s.mu.Unlock()

	return storage.CreateSnapshot(ctx, kind, key, parent, s.db, func(ctx context.Context, id string, parentID string) error {
		snapshotDir := s.snapshotDir(id)
		if err := os.MkdirAll(snapshotDir, 0o700); err != nil {
			return fmt.Errorf("failed to create snapshot dir: %w", err)
		}

		// If parent exists, setup lazy loading
		if parentID != "" {
			parentDir := s.snapshotDir(parentID)
			if err := s.loader.Setup(ctx, snapshotDir, parentDir); err != nil {
				return fmt.Errorf("failed to setup lazy loading: %w", err)
			}

			// V2.0: Trigger prefetch if enabled
			if s.config.Prefetch.Enabled && s.config.Prefetch.OnPrepare {
				go s.triggerPrefetch(context.Background(), parentID, snapshotDir)
			}
		}

		return nil
	}, func(ctx context.Context, sn storage.Snapshot) ([]mount.Mount, error) {
		return s.mounts(sn), nil
	})
}

func (s *Snapshotter) mounts(sn storage.Snapshot) []mount.Mount {
	if len(sn.ParentIDs) == 0 {
		snapshotDir := s.snapshotDir(sn.ID)
		return []mount.Mount{
			{
				Source:  snapshotDir,
				Type:    "bind",
				Options: []string{"rw", "rbind"},
			},
		}
	}

	// Use overlayfs with parents
	var lowerDirs []string
	for i := len(sn.ParentIDs) - 1; i >= 0; i-- {
		lowerDirs = append(lowerDirs, s.snapshotDir(sn.ParentIDs[i]))
	}

	snapshotDir := s.snapshotDir(sn.ID)
	workDir := filepath.Join(snapshotDir, "work")
	upperDir := filepath.Join(snapshotDir, "fs")

	if sn.Kind == snapshots.KindActive {
		os.MkdirAll(workDir, 0o700)
		os.MkdirAll(upperDir, 0o700)
	}

	options := []string{
		fmt.Sprintf("lowerdir=%s", filepath.Join(lowerDirs...)),
	}

	if sn.Kind == snapshots.KindActive {
		options = append(options,
			fmt.Sprintf("upperdir=%s", upperDir),
			fmt.Sprintf("workdir=%s", workDir),
		)
	}

	return []mount.Mount{
		{
			Type:    "overlay",
			Source:  "overlay",
			Options: options,
		},
	}
}

func (s *Snapshotter) snapshotDir(id string) string {
	return filepath.Join(s.root, "snapshots", id)
}

func (s *Snapshotter) diskUsage(path string) (snapshots.Usage, error) {
	var size int64
	err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
		if err != nil {
			return err
		}
		if !info.IsDir() {
			size += info.Size()
		}
		return nil
	})
	return snapshots.Usage{Size: size}, err
}

// V2.0 specific methods

func (s *Snapshotter) processDedupForSnapshot(ctx context.Context, id string) error {
	snapshotDir := s.snapshotDir(id)

	// Walk through files and deduplicate
	return filepath.Walk(snapshotDir, func(path string, info os.FileInfo, err error) error {
		if err != nil || info.IsDir() {
			return err
		}

		file, err := os.Open(path)
		if err != nil {
			return err
		}
		defer file.Close()

		// Chunk and store file
		refs, err := s.dedupStore.ChunkFile(ctx, file)
		if err != nil {
			return err
		}

		// Store chunk references in metadata
		// (simplified - real implementation would store refs properly)
		_ = refs

		return nil
	})
}

func (s *Snapshotter) cleanupDedupRefs(ctx context.Context, id string) error {
	// Decrement reference counts for all chunks in this snapshot
	// (simplified - real implementation would track refs properly)
	return nil
}

func (s *Snapshotter) triggerPrefetch(ctx context.Context, parentID, snapshotDir string) {
	// Get parent layer info
	// Parse TOC
	// Execute prefetch
	log.G(ctx).WithField("parent", parentID).Debug("prefetch triggered")
}

func (s *Snapshotter) downloadFile(ctx context.Context, url string, entry *estargz.TOCEntry, dest string) error {
	// Delegate to loader
	return s.loader.FetchFile(ctx, filepath.Dir(dest), entry.Name)
}

// Metrics collection

func (s *Snapshotter) startMetricsCollection(ctx context.Context) {
	interval := time.Duration(s.config.Metrics.Interval) * time.Second
	s.metricsTimer = time.NewTicker(interval)

	go func() {
		for {
			select {
			case <-s.metricsTimer.C:
				s.collectMetrics(ctx)
			case <-s.stopMetrics:
				return
			}
		}
	}()
}

func (s *Snapshotter) collectMetrics(ctx context.Context) {
	// Collect cache stats
	items, size := s.loader.GetCacheStats()
	metrics.UpdateCacheStats(items, size)

	// Collect dedup stats
	if s.dedupStore != nil {
		stats := s.dedupStore.Stats()
		metrics.DedupStoreSize.Set(float64(stats.TotalBytes))

		if stats.TotalChunks > 0 {
			ratio := 1.0 - float64(stats.UniqueChunks)/float64(stats.TotalChunks)
			metrics.UpdateDedupRatio(ratio)
		}
	}
}
