package lazy

import (
	"context"
	"fmt"
	"io"
	"net/http"
	"os"
	"path/filepath"
	"sync"
	"time"

	"github.com/containerd/containerd/log"
	"onyx-snapshotter/pkg/cache"
	"onyx-snapshotter/pkg/config"
	"onyx-snapshotter/pkg/estargz"
)

type Loader struct {
	config *config.Config
	cache  *cache.Cache
	parser *estargz.Parser

	// Active monitoring
	monitors map[string]*Monitor
	mu       sync.Mutex

	// Download queue
	downloadQueue chan *DownloadTask
	wg            sync.WaitGroup
	stopCh        chan struct{}
}

type Monitor struct {
	snapshotDir string
	layerURL    string
	toc         *estargz.TOC
	loader      *Loader
}

type DownloadTask struct {
	url   string
	entry *estargz.TOCEntry
	dest  string
	ctx   context.Context
}

func NewLoader(ctx context.Context, cfg *config.Config) (*Loader, error) {
	// Create cache
	c, err := cache.New(cfg.Cache.Dir, cfg.Cache.MaxSize)
	if err != nil {
		return nil, fmt.Errorf("failed to create cache: %w", err)
	}

	l := &Loader{
		config:        cfg,
		cache:         c,
		parser:        estargz.NewParser(),
		monitors:      make(map[string]*Monitor),
		downloadQueue: make(chan *DownloadTask, 100),
		stopCh:        make(chan struct{}),
	}

	// Start download workers
	for i := 0; i < cfg.Download.Concurrency; i++ {
		l.wg.Add(1)
		go l.downloadWorker(ctx)
	}

	log.G(ctx).Info("lazy loader started")
	return l, nil
}

func (l *Loader) Setup(ctx context.Context, snapshotDir, parentDir string) error {
	l.mu.Lock()
	defer l.mu.Unlock()

	// For V1.0, we'll do a simple setup
	// In V2.0, this will setup fanotify monitoring
	log.G(ctx).WithFields(map[string]interface{}{
		"snapshot": snapshotDir,
		"parent":   parentDir,
	}).Debug("setup lazy loading")

	return nil
}

func (l *Loader) RegisterLayer(ctx context.Context, snapshotDir, layerURL string) error {
	l.mu.Lock()
	defer l.mu.Unlock()

	// Check if it's an estargz layer
	if !l.parser.IsEstargz(ctx, layerURL) {
		log.G(ctx).WithField("url", layerURL).Debug("not an estargz layer, skipping")
		return nil
	}

	// Parse TOC
	toc, err := l.parser.ParseTOC(ctx, layerURL)
	if err != nil {
		return fmt.Errorf("failed to parse TOC: %w", err)
	}

	monitor := &Monitor{
		snapshotDir: snapshotDir,
		layerURL:    layerURL,
		toc:         toc,
		loader:      l,
	}

	l.monitors[snapshotDir] = monitor
	log.G(ctx).WithField("snapshot", snapshotDir).Info("registered estargz layer")

	return nil
}

func (l *Loader) FetchFile(ctx context.Context, snapshotDir, path string) error {
	l.mu.Lock()
	monitor, ok := l.monitors[snapshotDir]
	l.mu.Unlock()

	if !ok {
		// Not a lazy-loaded snapshot
		return nil
	}

	// Find file in TOC
	entry := l.parser.GetFileEntry(monitor.toc, path)
	if entry == nil {
		return fmt.Errorf("file not found in TOC: %s", path)
	}

	// Check cache
	cacheKey := entry.ChunkDigest
	if cacheKey == "" {
		cacheKey = fmt.Sprintf("%s:%s", monitor.layerURL, path)
	}

	if l.cache.Has(cacheKey) {
		log.G(ctx).WithField("path", path).Debug("cache hit")
		// Copy from cache
		data, err := l.cache.Get(cacheKey)
		if err != nil {
			return err
		}
		dest := filepath.Join(snapshotDir, path)
		return os.WriteFile(dest, data, 0o644)
	}

	// Queue download
	dest := filepath.Join(snapshotDir, path)
	task := &DownloadTask{
		url:   monitor.layerURL,
		entry: entry,
		dest:  dest,
		ctx:   ctx,
	}

	select {
	case l.downloadQueue <- task:
		log.G(ctx).WithField("path", path).Debug("queued download")
	case <-ctx.Done():
		return ctx.Err()
	}

	return nil
}

func (l *Loader) downloadWorker(ctx context.Context) {
	defer l.wg.Done()

	client := &http.Client{
		Timeout: time.Duration(l.config.Download.Timeout) * time.Second,
	}

	for {
		select {
		case <-l.stopCh:
			return
		case task := <-l.downloadQueue:
			if err := l.processDownload(task.ctx, client, task); err != nil {
				log.G(ctx).WithError(err).WithField("dest", task.dest).Error("download failed")
			}
		}
	}
}

func (l *Loader) processDownload(ctx context.Context, client *http.Client, task *DownloadTask) error {
	log.G(ctx).WithFields(map[string]interface{}{
		"url":    task.url,
		"offset": task.entry.Offset,
		"size":   task.entry.Size,
	}).Debug("downloading chunk")

	// Download with Range request
	start := task.entry.Offset
	end := start + task.entry.Size - 1

	req, err := http.NewRequestWithContext(ctx, "GET", task.url, nil)
	if err != nil {
		return fmt.Errorf("failed to create request: %w", err)
	}
	req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end))

	var resp *http.Response
	var lastErr error

	// Retry logic
	for attempt := 0; attempt <= l.config.Download.MaxRetries; attempt++ {
		resp, lastErr = client.Do(req)
		if lastErr == nil && resp.StatusCode == http.StatusPartialContent {
			break
		}
		if resp != nil {
			resp.Body.Close()
		}
		if attempt < l.config.Download.MaxRetries {
			time.Sleep(time.Second * time.Duration(attempt+1))
		}
	}

	if lastErr != nil {
		return fmt.Errorf("failed after %d retries: %w", l.config.Download.MaxRetries, lastErr)
	}
	defer resp.Body.Close()

	// Read data
	data, err := io.ReadAll(resp.Body)
	if err != nil {
		return fmt.Errorf("failed to read response: %w", err)
	}

	// Ensure directory exists
	destDir := filepath.Dir(task.dest)
	if err := os.MkdirAll(destDir, 0o755); err != nil {
		return fmt.Errorf("failed to create directory: %w", err)
	}

	// Write to destination
	if err := os.WriteFile(task.dest, data, 0o644); err != nil {
		return fmt.Errorf("failed to write file: %w", err)
	}

	// Store in cache
	cacheKey := task.entry.ChunkDigest
	if cacheKey == "" {
		cacheKey = fmt.Sprintf("%s:%s", task.url, task.entry.Name)
	}
	l.cache.Put(cacheKey, data)

	log.G(ctx).WithField("dest", task.dest).Info("download completed")
	return nil
}

func (l *Loader) Close() error {
	close(l.stopCh)
	l.wg.Wait()
	return l.cache.Close()
}
