// ============================================================================
// File: pkg/puller/puller.go - v2.0 with concurrent downloading
// ============================================================================
package puller

import (
	"archive/tar"
	"bytes"
	"compress/gzip"
	"context"
	"fmt"
	"io"
	"net/http"
	"os"
	"path/filepath"
	"sync"
	"time"

	"gitee.com/wangtsingx/onyx-snapshotter/pkg/cache"
	"gitee.com/wangtsingx/onyx-snapshotter/pkg/content"
	"gitee.com/wangtsingx/onyx-snapshotter/pkg/metrics"
	"github.com/sirupsen/logrus"
)

type Config struct {
	RegistryURL   string
	Username      string
	Password      string
	Timeout       time.Duration
	MaxRetries    int
	MaxConcurrent int
}

type Puller struct {
	cfg    *Config
	cs     *content.Store
	cache  *cache.LRUCache
	client *http.Client
	sem    chan struct{}
}

func NewPuller(cfg *Config, cs *content.Store, cache *cache.LRUCache) *Puller {
	return &Puller{
		cfg:    cfg,
		cs:     cs,
		cache:  cache,
		client: &http.Client{Timeout: cfg.Timeout},
		sem:    make(chan struct{}, cfg.MaxConcurrent),
	}
}

func (p *Puller) PullLayer(ctx context.Context, layerDigest, targetID string) error {
	timer := metrics.NewTimer()
	defer func() {
		metrics.PullDuration.Observe(timer.ObserveDuration().Seconds())
	}()

	logrus.Infof("pulling layer %s", layerDigest)
	metrics.LayerPulls.Inc()

	// Check cache first
	if data, ok := p.cache.Get(layerDigest); ok {
		logrus.Debugf("layer %s found in cache", layerDigest)
		return p.extractCached(data, targetID)
	}

	// Download with retry
	var data []byte
	var err error
	for i := 0; i < p.cfg.MaxRetries; i++ {
		data, err = p.downloadLayer(ctx, layerDigest)
		if err == nil {
			break
		}
		logrus.WithError(err).Warnf("download attempt %d failed", i+1)
		time.Sleep(time.Second * time.Duration(i+1))
	}
	if err != nil {
		metrics.PullErrors.Inc()
		return fmt.Errorf("failed to download layer after %d retries: %w", p.cfg.MaxRetries, err)
	}

	// Cache the layer
	p.cache.Put(layerDigest, data)

	// Extract with concurrent file processing
	return p.extractLayerConcurrent(data, targetID)
}

func (p *Puller) downloadLayer(ctx context.Context, layerDigest string) ([]byte, error) {
	// Acquire semaphore
	select {
	case p.sem <- struct{}{}:
		defer func() { <-p.sem }()
	case <-ctx.Done():
		return nil, ctx.Err()
	}

	url := fmt.Sprintf("%s/v2/library/alpine/blobs/%s", p.cfg.RegistryURL, layerDigest)
	req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
	if err != nil {
		return nil, err
	}

	if p.cfg.Username != "" {
		req.SetBasicAuth(p.cfg.Username, p.cfg.Password)
	}

	resp, err := p.client.Do(req)
	if err != nil {
		return nil, err
	}
	defer resp.Body.Close()

	if resp.StatusCode != http.StatusOK {
		return nil, fmt.Errorf("unexpected status: %d", resp.StatusCode)
	}

	data, err := io.ReadAll(resp.Body)
	if err != nil {
		return nil, err
	}

	metrics.DownloadedBytes.Add(float64(len(data)))
	return data, nil
}

func (p *Puller) extractCached(data []byte, targetID string) error {
	return p.extractLayerConcurrent(data, targetID)
}

func (p *Puller) extractLayerConcurrent(data []byte, targetID string) error {
	targetDir := filepath.Join("/var/lib/onyx/snapshots", targetID, "fs")

	// Create gzip reader
	gzr, err := gzip.NewReader(bytes.NewReader(data))
	if err != nil {
		return fmt.Errorf("failed to create gzip reader: %w", err)
	}
	defer gzr.Close()

	tr := tar.NewReader(gzr)

	// Channel for file extraction jobs
	type fileJob struct {
		header *tar.Header
		data   []byte
	}
	jobs := make(chan fileJob, 100)
	
	var wg sync.WaitGroup
	var extractErr error
	var errMu sync.Mutex

	// Start worker goroutines
	numWorkers := 5
	for i := 0; i < numWorkers; i++ {
		wg.Add(1)
		go func() {
			defer wg.Done()
			for job := range jobs {
				if err := p.extractFile(job.header, job.data, targetDir); err != nil {
					errMu.Lock()
					if extractErr == nil {
						extractErr = err
					}
					errMu.Unlock()
					return
				}
			}
		}()
	}

	// Read tar entries and dispatch to workers
	for {
		header, err := tr.Next()
		if err == io.EOF {
			break
		}
		if err != nil {
			close(jobs)
			wg.Wait()
			return fmt.Errorf("failed to read tar: %w", err)
		}

		// Handle directories immediately
		if header.Typeflag == tar.TypeDir {
			path := filepath.Join(targetDir, header.Name)
			if err := os.MkdirAll(path, os.FileMode(header.Mode)); err != nil {
				close(jobs)
				wg.Wait()
				return err
			}
			continue
		}

		// Skip non-regular files for now
		if header.Typeflag != tar.TypeReg {
			continue
		}

		// Read file data
		data, err := io.ReadAll(tr)
		if err != nil {
			close(jobs)
			wg.Wait()
			return fmt.Errorf("failed to read file data: %w", err)
		}

		// Send to worker
		jobs <- fileJob{header: header, data: data}
	}

	close(jobs)
	wg.Wait()

	if extractErr != nil {
		return extractErr
	}

	logrus.Infof("layer %s extracted successfully", targetID)
	return nil
}

func (p *Puller) extractFile(header *tar.Header, data []byte, targetDir string) error {
	targetPath := filepath.Join(targetDir, header.Name)

	// Use content store with block deduplication
	if err := p.cs.WriteFile(targetPath, data, os.FileMode(header.Mode)); err != nil {
		return fmt.Errorf("failed to write file %s: %w", targetPath, err)
	}

	// Preserve timestamps
	if err := os.Chtimes(targetPath, header.AccessTime, header.ModTime); err != nil {
		logrus.WithError(err).Warnf("failed to set times for %s", targetPath)
	}

	return nil
}

func (p *Puller) FetchFile(ctx context.Context, layerDigest, filePath string) ([]byte, error) {
	// For v2.0, implement range request support
	logrus.Debugf("fetching file %s from layer %s", filePath, layerDigest)
	
	// This would use HTTP range requests in production
	// For now, fall back to full layer pull
	return nil, fmt.Errorf("range requests not yet implemented")
}

