package dedup

import (
	"context"
	"fmt"
	"io"
	"os"
	"path/filepath"
	"sync"

	"github.com/containerd/containerd/log"
	"github.com/zeebo/blake3"
	bolt "go.etcd.io/bbolt"

	"onyx-snapshotter/pkg/config"
	"onyx-snapshotter/pkg/metrics"
)

const (
	bucketChunks    = "chunks"
	bucketRefCounts = "refcounts"
)

// Store manages content-addressed chunk storage with deduplication
type Store struct {
	config   *config.DedupConfig
	storeDir string
	db       *bolt.DB
	chunker  Chunker

	mu    sync.RWMutex
	stats StoreStats
}

type StoreStats struct {
	TotalChunks  int64
	UniqueChunks int64
	TotalBytes   int64
	DedupedBytes int64
}

type ChunkRef struct {
	Digest   string
	Size     int64
	Offset   int64
	RefCount int32
}

func NewStore(ctx context.Context, cfg *config.DedupConfig) (*Store, error) {
	if !cfg.Enabled {
		return nil, fmt.Errorf("deduplication is disabled")
	}

	// Create store directory
	if err := os.MkdirAll(cfg.StoreDir, 0o700); err != nil {
		return nil, fmt.Errorf("failed to create store dir: %w", err)
	}

	// Open metadata database
	dbPath := filepath.Join(cfg.StoreDir, "dedup.db")
	db, err := bolt.Open(dbPath, 0o600, nil)
	if err != nil {
		return nil, fmt.Errorf("failed to open dedup db: %w", err)
	}

	// Initialize buckets
	if err := db.Update(func(tx *bolt.Tx) error {
		if _, err := tx.CreateBucketIfNotExists([]byte(bucketChunks)); err != nil {
			return err
		}
		if _, err := tx.CreateBucketIfNotExists([]byte(bucketRefCounts)); err != nil {
			return err
		}
		return nil
	}); err != nil {
		db.Close()
		return nil, fmt.Errorf("failed to initialize db: %w", err)
	}

	// Create chunker
	var chunker Chunker
	switch cfg.Algorithm {
	case "fixed":
		chunker = NewFixedChunker(cfg.ChunkSize)
	case "cdc":
		chunker = NewCDCChunker(cfg.ChunkSize)
	default:
		db.Close()
		return nil, fmt.Errorf("unsupported chunking algorithm: %s", cfg.Algorithm)
	}

	s := &Store{
		config:   cfg,
		storeDir: cfg.StoreDir,
		db:       db,
		chunker:  chunker,
	}

	// Calculate initial stats
	s.calculateStats(ctx)

	log.G(ctx).WithFields(map[string]interface{}{
		"algorithm":  cfg.Algorithm,
		"chunk_size": cfg.ChunkSize,
		"store_dir":  cfg.StoreDir,
	}).Info("deduplication store initialized")

	return s, nil
}

// StoreChunk stores a chunk and returns its digest
func (s *Store) StoreChunk(ctx context.Context, data []byte) (string, error) {
	// Calculate digest
	hash := blake3.Sum256(data)
	digest := fmt.Sprintf("blake3:%x", hash)

	s.mu.Lock()
	defer s.mu.Unlock()

	// Check if chunk already exists
	var exists bool
	err := s.db.View(func(tx *bolt.Tx) error {
		bucket := tx.Bucket([]byte(bucketChunks))
		exists = bucket.Get([]byte(digest)) != nil
		return nil
	})
	if err != nil {
		return "", err
	}

	if exists {
		// Increment reference count
		if err := s.incRefCount(digest); err != nil {
			return "", err
		}
		metrics.RecordDedupChunk(true, int64(len(data)))
		log.G(ctx).WithField("digest", digest[:16]+"...").Debug("chunk already exists")
		return digest, nil
	}

	// Store new chunk
	chunkPath := s.chunkPath(digest)
	if err := os.MkdirAll(filepath.Dir(chunkPath), 0o700); err != nil {
		return "", fmt.Errorf("failed to create chunk dir: %w", err)
	}

	if err := os.WriteFile(chunkPath, data, 0o600); err != nil {
		return "", fmt.Errorf("failed to write chunk: %w", err)
	}

	// Update metadata
	err = s.db.Update(func(tx *bolt.Tx) error {
		bucket := tx.Bucket([]byte(bucketChunks))
		if err := bucket.Put([]byte(digest), []byte(chunkPath)); err != nil {
			return err
		}

		refBucket := tx.Bucket([]byte(bucketRefCounts))
		return refBucket.Put([]byte(digest), []byte{1})
	})
	if err != nil {
		os.Remove(chunkPath)
		return "", err
	}

	s.stats.UniqueChunks++
	s.stats.TotalBytes += int64(len(data))
	metrics.RecordDedupChunk(false, int64(len(data)))
	metrics.DedupStoreSize.Add(float64(len(data)))

	log.G(ctx).WithFields(map[string]interface{}{
		"digest": digest[:16] + "...",
		"size":   len(data),
	}).Debug("stored new chunk")

	return digest, nil
}

// GetChunk retrieves a chunk by digest
func (s *Store) GetChunk(ctx context.Context, digest string) ([]byte, error) {
	s.mu.RLock()
	defer s.mu.RUnlock()

	var chunkPath string
	err := s.db.View(func(tx *bolt.Tx) error {
		bucket := tx.Bucket([]byte(bucketChunks))
		data := bucket.Get([]byte(digest))
		if data == nil {
			return fmt.Errorf("chunk not found")
		}
		chunkPath = string(data)
		return nil
	})
	if err != nil {
		return nil, err
	}

	data, err := os.ReadFile(chunkPath)
	if err != nil {
		return nil, fmt.Errorf("failed to read chunk: %w", err)
	}

	return data, nil
}

// ChunkFile splits a file into chunks and stores them
func (s *Store) ChunkFile(ctx context.Context, reader io.Reader) ([]ChunkRef, error) {
	chunks, err := s.chunker.Chunk(reader)
	if err != nil {
		return nil, fmt.Errorf("failed to chunk data: %w", err)
	}

	var refs []ChunkRef
	var offset int64

	for _, chunk := range chunks {
		digest, err := s.StoreChunk(ctx, chunk)
		if err != nil {
			return nil, fmt.Errorf("failed to store chunk: %w", err)
		}

		refs = append(refs, ChunkRef{
			Digest: digest,
			Size:   int64(len(chunk)),
			Offset: offset,
		})
		offset += int64(len(chunk))

		s.stats.TotalChunks++
	}

	// Update dedup ratio
	if s.stats.TotalChunks > 0 {
		ratio := float64(s.stats.UniqueChunks) / float64(s.stats.TotalChunks)
		metrics.UpdateDedupRatio(1.0 - ratio)
	}

	return refs, nil
}

// ReconstructFile reconstructs a file from chunk references
func (s *Store) ReconstructFile(ctx context.Context, refs []ChunkRef, writer io.Writer) error {
	for _, ref := range refs {
		data, err := s.GetChunk(ctx, ref.Digest)
		if err != nil {
			return fmt.Errorf("failed to get chunk %s: %w", ref.Digest, err)
		}

		if _, err := writer.Write(data); err != nil {
			return fmt.Errorf("failed to write data: %w", err)
		}
	}

	return nil
}

// IncRef increments reference count for a chunk
func (s *Store) IncRef(digest string) error {
	s.mu.Lock()
	defer s.mu.Unlock()
	return s.incRefCount(digest)
}

// DecRef decrements reference count and removes chunk if zero
func (s *Store) DecRef(ctx context.Context, digest string) error {
	s.mu.Lock()
	defer s.mu.Unlock()

	var refCount int
	err := s.db.Update(func(tx *bolt.Tx) error {
		bucket := tx.Bucket([]byte(bucketRefCounts))
		data := bucket.Get([]byte(digest))
		if data == nil {
			return fmt.Errorf("chunk not found")
		}

		refCount = int(data[0]) - 1
		if refCount <= 0 {
			// Remove chunk
			chunkBucket := tx.Bucket([]byte(bucketChunks))
			chunkPath := chunkBucket.Get([]byte(digest))
			if chunkPath != nil {
				os.Remove(string(chunkPath))
				chunkBucket.Delete([]byte(digest))
			}
			bucket.Delete([]byte(digest))
			s.stats.UniqueChunks--
		} else {
			bucket.Put([]byte(digest), []byte{byte(refCount)})
		}
		return nil
	})
	if err != nil {
		return err
	}

	log.G(ctx).WithFields(map[string]interface{}{
		"digest":    digest[:16] + "...",
		"ref_count": refCount,
	}).Debug("decremented chunk refcount")

	return nil
}

// GC performs garbage collection on unreferenced chunks
func (s *Store) GC(ctx context.Context) error {
	log.G(ctx).Info("starting dedup garbage collection")

	var removed int
	err := s.db.Update(func(tx *bolt.Tx) error {
		refBucket := tx.Bucket([]byte(bucketRefCounts))
		chunkBucket := tx.Bucket([]byte(bucketChunks))

		return refBucket.ForEach(func(k, v []byte) error {
			if v[0] == 0 {
				chunkPath := chunkBucket.Get(k)
				if chunkPath != nil {
					os.Remove(string(chunkPath))
					chunkBucket.Delete(k)
				}
				refBucket.Delete(k)
				removed++
			}
			return nil
		})
	})
	if err != nil {
		return err
	}

	log.G(ctx).WithField("removed", removed).Info("garbage collection completed")
	return nil
}

// Stats returns current store statistics
func (s *Store) Stats() StoreStats {
	s.mu.RLock()
	defer s.mu.RUnlock()
	return s.stats
}

// Close closes the store
func (s *Store) Close() error {
	return s.db.Close()
}

// Internal helpers

func (s *Store) incRefCount(digest string) error {
	return s.db.Update(func(tx *bolt.Tx) error {
		bucket := tx.Bucket([]byte(bucketRefCounts))
		data := bucket.Get([]byte(digest))
		if data == nil {
			return fmt.Errorf("chunk not found")
		}
		refCount := int(data[0]) + 1
		if refCount > 255 {
			refCount = 255 // Cap at max byte value
		}
		return bucket.Put([]byte(digest), []byte{byte(refCount)})
	})
}

func (s *Store) chunkPath(digest string) string {
	// Use first 4 chars for directory sharding
	prefix := digest[7:11] // Skip "blake3:" prefix
	return filepath.Join(s.storeDir, "chunks", prefix[:2], prefix[2:4], digest[7:])
}

func (s *Store) calculateStats(ctx context.Context) {
	s.db.View(func(tx *bolt.Tx) error {
		chunkBucket := tx.Bucket([]byte(bucketChunks))
		refBucket := tx.Bucket([]byte(bucketRefCounts))

		chunkBucket.ForEach(func(k, v []byte) error {
			s.stats.UniqueChunks++

			// Get chunk size
			if info, err := os.Stat(string(v)); err == nil {
				s.stats.TotalBytes += info.Size()
			}

			// Get refcount
			refData := refBucket.Get(k)
			if refData != nil {
				s.stats.TotalChunks += int64(refData[0])
			}
			return nil
		})
		return nil
	})

	log.G(ctx).WithFields(map[string]interface{}{
		"unique_chunks": s.stats.UniqueChunks,
		"total_chunks":  s.stats.TotalChunks,
		"total_bytes":   s.stats.TotalBytes,
	}).Info("calculated store statistics")
}
